repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/tests/test_classifier_pt.py
|
from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Union
from alibi_detect.cd.pytorch.classifier import ClassifierDriftTorch
n = 100
class MyModel(nn.Module):
def __init__(self, n_features: int, softmax: bool = False):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, 2)
self.softmax = softmax
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
x = self.dense2(x)
if self.softmax:
x = nn.Softmax()(x)
return x
# test List[Any] inputs to the detector
def identity_fn(x: Union[torch.Tensor, list]) -> torch.Tensor:
if isinstance(x, list):
return torch.from_numpy(np.array(x))
else:
return x
p_val = [.05]
n_features = [4]
preds_type = ['probs', 'logits']
binarize_preds = [True, False]
n_folds = [None, 2]
train_size = [.5]
preprocess_batch = [None, identity_fn]
update_x_ref = [None, {'last': 1000}, {'reservoir_sampling': 1000}]
tests_clfdrift = list(product(p_val, n_features, preds_type, binarize_preds, n_folds,
train_size, preprocess_batch, update_x_ref))
n_tests = len(tests_clfdrift)
@pytest.fixture
def clfdrift_params(request):
return tests_clfdrift[request.param]
@pytest.mark.parametrize('clfdrift_params', list(range(n_tests)), indirect=True)
def test_clfdrift(clfdrift_params):
p_val, n_features, preds_type, binarize_preds, n_folds, \
train_size, preprocess_batch, update_x_ref = clfdrift_params
np.random.seed(0)
torch.manual_seed(0)
model = MyModel(n_features, softmax=(preds_type == 'probs'))
x_ref = np.random.randn(*(n, n_features)).astype(np.float32)
x_test1 = np.ones_like(x_ref)
to_list = False
if preprocess_batch is not None:
to_list = True
x_ref = [_ for _ in x_ref]
update_x_ref = None
cd = ClassifierDriftTorch(
x_ref=x_ref,
model=model,
p_val=p_val,
update_x_ref=update_x_ref,
train_size=train_size,
n_folds=n_folds,
preds_type=preds_type,
binarize_preds=binarize_preds,
preprocess_batch_fn=preprocess_batch,
batch_size=1
)
x_test0 = x_ref.copy()
preds_0 = cd.predict(x_test0)
assert cd.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['distance'] >= 0
if to_list:
x_test1 = [_ for _ in x_test1]
preds_1 = cd.predict(x_test1)
assert cd.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_1['data']['distance'] >= 0
assert preds_0['data']['distance'] < preds_1['data']['distance']
assert cd.meta['params']['preds_type'] == preds_type
assert cd.meta['params']['binarize_preds '] == binarize_preds
| 2,921 | 28.515152 | 85 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/tests/conftest.py
|
import pytest
@pytest.fixture
def seed(pytestconfig):
"""
Returns the random seed set by pytest-randomly.
"""
return pytestconfig.getoption("randomly_seed")
| 175 | 16.6 | 51 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/tests/test_spot_the_diff_pt.py
|
from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Union
from alibi_detect.cd.pytorch.spot_the_diff import SpotTheDiffDriftTorch
n = 100
class MyKernel(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense = nn.Linear(n_features, 20)
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return torch.einsum('ji,ki->jk', self.dense(x), self.dense(y))
# test List[Any] inputs to the detector
def identity_fn(x: Union[torch.Tensor, list]) -> torch.Tensor:
if isinstance(x, list):
return torch.from_numpy(np.array(x))
else:
return x
p_val = [.05]
n_features = [4]
train_size = [.5]
preprocess_batch = [None, identity_fn]
kernel = [None, MyKernel]
n_diffs = [1, 5]
tests_stddrift = list(product(p_val, n_features, train_size, preprocess_batch, kernel, n_diffs))
n_tests = len(tests_stddrift)
@pytest.fixture
def stddrift_params(request):
return tests_stddrift[request.param]
@pytest.mark.parametrize('stddrift_params', list(range(n_tests)), indirect=True)
def test_stddrift(stddrift_params):
p_val, n_features, train_size, preprocess_batch, kernel, n_diffs = stddrift_params
np.random.seed(0)
torch.manual_seed(0)
if kernel is not None:
kernel = kernel(n_features)
x_ref = np.random.randn(*(n, n_features)).astype(np.float32)
x_test1 = np.ones_like(x_ref)
to_list = False
if preprocess_batch is not None:
to_list = True
x_ref = [_ for _ in x_ref]
cd = SpotTheDiffDriftTorch(
x_ref=x_ref,
kernel=kernel,
p_val=p_val,
n_diffs=n_diffs,
train_size=train_size,
preprocess_batch_fn=preprocess_batch,
batch_size=3,
epochs=1
)
x_test0 = x_ref.copy()
preds_0 = cd.predict(x_test0)
assert cd._detector.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['diffs'].shape == (n_diffs, n_features)
assert preds_0['data']['diff_coeffs'].shape == (n_diffs,)
if to_list:
x_test1 = [_ for _ in x_test1]
preds_1 = cd.predict(x_test1)
assert cd._detector.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_0['data']['distance'] < preds_1['data']['distance']
assert cd.meta['params']['n_diffs'] == n_diffs
| 2,440 | 27.057471 | 96 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/tests/test_lsdd_online_pt.py
|
from functools import partial
from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Callable, List
from alibi_detect.cd.pytorch.lsdd_online import LSDDDriftOnlineTorch
from alibi_detect.cd.pytorch.preprocess import HiddenOutput, preprocess_drift
from alibi_detect.utils._random import fixed_seed
n, n_hidden, n_classes = 400, 10, 5
class MyModel(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
return self.dense2(x)
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
if len(x) > 1: # test List[Any] reference data inputs to the detector with Any=np.ndarray
return np.concatenate(x, axis=0)
else: # test Any inputs to the prediction function of the detector with Any=List[np.ndarray]
return np.array(x)[0]
n_features = [10]
ert = [25]
window_size = [5]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_list, None)
]
n_bootstraps = [200]
tests_lsdddriftonline = list(product(n_features, ert, window_size, preprocess, n_bootstraps))
n_tests = len(tests_lsdddriftonline)
@pytest.fixture
def lsdd_online_params(request):
return tests_lsdddriftonline[request.param]
@pytest.mark.parametrize('lsdd_online_params', list(range(n_tests)), indirect=True)
def test_lsdd_online(lsdd_online_params, seed):
n_features, ert, window_size, preprocess, n_bootstraps = lsdd_online_params
with fixed_seed(seed):
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable) and 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = MyModel(n_features)
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
else:
preprocess_fn = None
with fixed_seed(seed):
cd = LSDDDriftOnlineTorch(
x_ref=x_ref,
ert=ert,
window_size=window_size,
preprocess_fn=preprocess_fn,
n_bootstraps=n_bootstraps
)
x_h0 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32) + 1
detection_times_h0 = []
test_stats_h0 = []
for x_t in x_h0:
if to_list:
x_t = [x_t]
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h0.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h0.append(pred_t['data']['time'])
cd.reset_state()
average_delay_h0 = np.array(detection_times_h0).mean()
test_stats_h0 = [ts for ts in test_stats_h0 if ts is not None]
assert ert/3 < average_delay_h0 < 3*ert
cd.reset_state()
detection_times_h1 = []
test_stats_h1 = []
for x_t in x_h1:
if to_list:
x_t = [x_t]
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h1.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h1.append(pred_t['data']['time'])
cd.reset_state()
average_delay_h1 = np.array(detection_times_h1).mean()
test_stats_h1 = [ts for ts in test_stats_h1 if ts is not None]
assert np.abs(average_delay_h1) < ert/2
assert np.mean(test_stats_h1) > np.mean(test_stats_h0)
def test_lsdd_online_state_online(tmp_path, seed):
"""
Test save/load/reset state methods for LSDDDriftOnlineTorch. State is saved, reset, and loaded, with
prediction results and stateful attributes compared to original.
"""
n = 100
with fixed_seed(seed):
x_ref = np.random.normal(0, 1, (n, n_classes))
x = np.random.normal(0.1, 1, (n, n_classes))
dd = LSDDDriftOnlineTorch(x_ref, window_size=10, ert=20)
# Store state for comparison
state_dict_t0 = {}
for key in dd.online_state_keys:
state_dict_t0[key] = getattr(dd, key)
# Run for 10 time steps
test_stats_1 = []
for t, x_t in enumerate(x):
if t == 5:
dd.save_state(tmp_path)
# Store state for comparison
state_dict_t5 = {}
for key in dd.online_state_keys:
state_dict_t5[key] = getattr(dd, key)
preds = dd.predict(x_t)
test_stats_1.append(preds['data']['test_stat'])
# Reset and check state cleared
dd.reset_state()
for key, orig_val in state_dict_t0.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Repeat, check that same test_stats both times
test_stats_2 = []
for t, x_t in enumerate(x):
preds = dd.predict(x_t)
test_stats_2.append(preds['data']['test_stat'])
np.testing.assert_array_equal(test_stats_1, test_stats_2)
# Load state from t=5 timestep
dd.load_state(tmp_path)
# Compare stateful attributes to original at t=5
for key, orig_val in state_dict_t5.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Compare predictions to original at t=5
new_pred = dd.predict(x[5])
assert new_pred['data']['test_stat'] == test_stats_1[5]
| 5,861 | 34.96319 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/tests/test_preprocess_pt.py
|
from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from alibi_detect.cd.pytorch import HiddenOutput
n, dim1, dim2, n_classes, latent_dim, n_hidden = 100, 2, 3, 5, 2, 7
n_features = dim1 * dim2
shape = (n, dim1, dim2)
X = np.random.rand(n * n_features).reshape(shape).astype('float32')
class Model1(nn.Module):
def __init__(self):
super(Model1, self).__init__()
self.dense1 = nn.Linear(dim2, n_hidden)
self.dense2 = nn.Linear(n_hidden, n_classes)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.dense1(x)
return self.dense2(x)
model2 = nn.Sequential(
nn.Linear(dim2, n_hidden),
nn.Linear(n_hidden, n_classes)
)
model = [1, 2]
layer = [0, 1, 2]
flatten = [True, False]
tests_hidden_output = list(product(model, layer, flatten))
n_tests_hidden_output = len(tests_hidden_output)
@pytest.fixture
def hidden_output_params(request):
return tests_hidden_output[request.param]
@pytest.mark.parametrize('hidden_output_params', list(range(n_tests_hidden_output)), indirect=True)
def test_hidden_output(hidden_output_params):
model, layer, flatten = hidden_output_params
model = Model1() if model == 1 else model2
X_hidden = HiddenOutput(model=model, layer=layer, flatten=flatten)(torch.from_numpy(X))
if layer == 0:
assert_shape = (n, dim1, dim2)
elif layer == 1:
assert_shape = (n, dim1, n_hidden)
elif layer == 2:
assert_shape = (n, dim1, n_classes)
if flatten:
assert_shape = (assert_shape[0],) + (np.prod(assert_shape[1:]),)
assert X_hidden.shape == assert_shape
| 1,651 | 28.5 | 99 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/tests/test_contextmmd_pt.py
|
from functools import partial
from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Callable, List
from alibi_detect.cd.pytorch.context_aware import ContextMMDDriftTorch
from alibi_detect.cd.pytorch.preprocess import HiddenOutput, preprocess_drift
class MyModel(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
return self.dense2(x)
# test List[Any] inputs to the detector
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
return np.concatenate(x, axis=0)
n = 250
n_features = [10]
n_enc = [None, 3]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_list, None)
]
update_ref = [{'last': 750}, None]
preprocess_at_init = [True, False]
n_permutations = [10]
tests_context_mmddrift = list(product(n_features, n_enc, preprocess,
n_permutations, update_ref, preprocess_at_init))
n_tests = len(tests_context_mmddrift)
@pytest.fixture
def context_mmd_params(request):
return tests_context_mmddrift[request.param]
@pytest.mark.parametrize('context_mmd_params', list(range(n_tests)), indirect=True)
def test_context_mmd(context_mmd_params):
n_features, n_enc, preprocess, n_permutations, update_ref, preprocess_at_init = context_mmd_params
np.random.seed(0)
torch.manual_seed(0)
c_ref = np.random.randn(*(n, 1)).astype(np.float32)
x_ref = c_ref + np.random.randn(*(n, n_features)).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
if not preprocess_at_init:
pytest.skip("Skip tests where preprocess_at_init=False and x_ref is list.")
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable) and 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = MyModel(n_features)
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
else:
preprocess_fn = None
cd = ContextMMDDriftTorch(
x_ref=x_ref,
c_ref=c_ref,
p_val=.05,
preprocess_at_init=preprocess_at_init if isinstance(preprocess_fn, Callable) else False,
update_ref=update_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations
)
c = c_ref.copy()
x = x_ref.copy()
preds = cd.predict(x, c, return_p_val=True, return_distance=False, return_coupling=True)
assert preds['data']['is_drift'] == 0 and preds['data']['p_val'] >= cd.p_val
assert preds['data']['distance'] is None
assert isinstance(preds['data']['coupling_xy'], np.ndarray)
if isinstance(update_ref, dict):
k = list(update_ref.keys())[0]
assert cd.n == len(x) + len(x_ref)
assert cd.x_ref.shape[0] == min(update_ref[k], len(x) + len(x_ref))
assert cd.c_ref.shape[0] == min(update_ref[k], len(x) + len(c_ref))
c_h1 = np.random.randn(*(n, 1)).astype(np.float32)
x_h1 = c_h1 + np.random.randn(*(n, n_features)).astype(np.float32)
if to_list:
x_h1 = [_[None, :] for _ in x_h1]
preds = cd.predict(x_h1, c_h1, return_p_val=True, return_distance=True, return_coupling=False)
if preds['data']['is_drift'] == 1:
assert preds['data']['p_val'] < preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] > preds['data']['distance_threshold']
else:
assert preds['data']['p_val'] >= preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] <= preds['data']['distance_threshold']
assert 'coupling_xy' not in preds['data']
| 4,034 | 36.71028 | 102 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/tests/test_lsdd_pt.py
|
from functools import partial
from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Callable, List
from alibi_detect.cd.pytorch.lsdd import LSDDDriftTorch
from alibi_detect.cd.pytorch.preprocess import HiddenOutput, preprocess_drift
n, n_hidden, n_classes = 500, 10, 5
class MyModel(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
return self.dense2(x)
# test List[Any] inputs to the detector
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
return np.concatenate(x, axis=0)
n_features = [10]
n_enc = [None, 3]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_list, None)
]
update_x_ref = [None]
preprocess_at_init = [True, False]
n_permutations = [10]
tests_lsdddrift = list(product(n_features, n_enc, preprocess,
n_permutations, update_x_ref, preprocess_at_init))
n_tests = len(tests_lsdddrift)
@pytest.fixture
def lsdd_params(request):
return tests_lsdddrift[request.param]
@pytest.mark.parametrize('lsdd_params', list(range(n_tests)), indirect=True)
def test_lsdd(lsdd_params):
n_features, n_enc, preprocess, n_permutations, update_x_ref, preprocess_at_init = lsdd_params
np.random.seed(0)
torch.manual_seed(0)
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
if not preprocess_at_init:
return
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable) and 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = MyModel(n_features)
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
else:
preprocess_fn = None
cd = LSDDDriftTorch(
x_ref=x_ref,
p_val=.05,
preprocess_at_init=preprocess_at_init if isinstance(preprocess_fn, Callable) else False,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations
)
perturbation = np.random.normal(size=(n, n_features)) / 100 # LSDD struggles with copies/repeats
x = x_ref.copy() + perturbation.astype(np.float32)
preds = cd.predict(x, return_p_val=True)
assert preds['data']['is_drift'] == 0 and preds['data']['p_val'] >= cd.p_val
if isinstance(update_x_ref, dict):
k = list(update_x_ref.keys())[0]
assert cd.n == len(x) + len(x_ref)
assert cd.x_ref.shape[0] == min(update_x_ref[k], len(x) + len(x_ref))
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
if to_list:
x_h1 = [_[None, :] for _ in x_h1]
preds = cd.predict(x_h1, return_p_val=True)
if preds['data']['is_drift'] == 1:
assert preds['data']['p_val'] < preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] > preds['data']['distance_threshold']
else:
assert preds['data']['p_val'] >= preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] <= preds['data']['distance_threshold']
| 3,599 | 34.643564 | 101 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/tests/test_mmd_online_pt.py
|
from functools import partial
from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Callable, List
from alibi_detect.cd.pytorch.mmd_online import MMDDriftOnlineTorch
from alibi_detect.cd.pytorch.preprocess import HiddenOutput, preprocess_drift
from alibi_detect.utils._random import fixed_seed
n, n_hidden, n_classes = 400, 10, 5
class MyModel(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
return self.dense2(x)
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
if len(x) > 1: # test List[Any] reference data inputs to the detector with Any=np.ndarray
return np.concatenate(x, axis=0)
else: # test Any inputs to the prediction function of the detector with Any=List[np.ndarray]
return np.array(x)[0]
n_features = [10]
ert = [25]
window_size = [5]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_list, None)
]
n_bootstraps = [200]
tests_mmddriftonline = list(product(n_features, ert, window_size, preprocess, n_bootstraps))
n_tests = len(tests_mmddriftonline)
@pytest.fixture
def mmd_online_params(request):
return tests_mmddriftonline[request.param]
@pytest.mark.parametrize('mmd_online_params', list(range(n_tests)), indirect=True)
def test_mmd_online(mmd_online_params, seed):
n_features, ert, window_size, preprocess, n_bootstraps = mmd_online_params
with fixed_seed(seed):
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable) and 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = MyModel(n_features)
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
else:
preprocess_fn = None
with fixed_seed(seed):
cd = MMDDriftOnlineTorch(
x_ref=x_ref,
ert=ert,
window_size=window_size,
preprocess_fn=preprocess_fn,
n_bootstraps=n_bootstraps
)
x_h0 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32) + 1
detection_times_h0 = []
test_stats_h0 = []
for x_t in x_h0:
if to_list:
x_t = [x_t]
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h0.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h0.append(pred_t['data']['time'])
cd.reset_state()
average_delay_h0 = np.array(detection_times_h0).mean()
test_stats_h0 = [ts for ts in test_stats_h0 if ts is not None]
assert ert/3 < average_delay_h0 < 3*ert
cd.reset_state()
detection_times_h1 = []
test_stats_h1 = []
for x_t in x_h1:
if to_list:
x_t = [x_t]
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h1.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h1.append(pred_t['data']['time'])
cd.reset_state()
average_delay_h1 = np.array(detection_times_h1).mean()
test_stats_h1 = [ts for ts in test_stats_h1 if ts is not None]
assert np.abs(average_delay_h1) < ert/2
assert np.mean(test_stats_h1) > np.mean(test_stats_h0)
def test_mmd_online_state_online(tmp_path, seed):
"""
Test save/load/reset state methods for MMDDriftOnlineTorch. State is saved, reset, and loaded, with
prediction results and stateful attributes compared to original.
"""
n = 100
with fixed_seed(seed):
x_ref = np.random.normal(0, 1, (n, n_classes))
x = np.random.normal(0.1, 1, (n, n_classes))
dd = MMDDriftOnlineTorch(x_ref, window_size=10, ert=20)
# Store state for comparison
state_dict_t0 = {}
for key in dd.online_state_keys:
state_dict_t0[key] = getattr(dd, key)
# Run for 10 time steps
test_stats_1 = []
for t, x_t in enumerate(x):
if t == 5:
dd.save_state(tmp_path)
# Store state for comparison
state_dict_t5 = {}
for key in dd.online_state_keys:
state_dict_t5[key] = getattr(dd, key)
preds = dd.predict(x_t)
test_stats_1.append(preds['data']['test_stat'])
# Reset and check state cleared
dd.reset_state()
for key, orig_val in state_dict_t0.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Repeat, check that same test_stats both times
test_stats_2 = []
for t, x_t in enumerate(x):
preds = dd.predict(x_t)
test_stats_2.append(preds['data']['test_stat'])
np.testing.assert_array_equal(test_stats_1, test_stats_2)
# Load state from t=5 timestep
dd.load_state(tmp_path)
# Compare stateful attributes to original at t=5
for key, orig_val in state_dict_t5.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Compare predictions to original at t=5
new_pred = dd.predict(x[5])
assert new_pred['data']['test_stat'] == test_stats_1[5]
| 5,847 | 34.877301 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/learned_kernel.py
|
from functools import partial
import numpy as np
import tensorflow as tf
from typing import Callable, Dict, Optional, Tuple, Union
from alibi_detect.cd.base import BaseLearnedKernelDrift
from alibi_detect.utils.tensorflow.data import TFDataset
from alibi_detect.utils.tensorflow.misc import clone_model
from alibi_detect.utils.tensorflow.distance import mmd2_from_kernel_matrix, batch_compute_kernel_matrix
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
from alibi_detect.utils._types import OptimizerTF
class LearnedKernelDriftTF(BaseLearnedKernelDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
kernel: tf.keras.Model,
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
n_permutations: int = 100,
var_reg: float = 1e-5,
reg_loss_fn: Callable = (lambda kernel: 0),
train_size: Optional[float] = .75,
retrain_from_scratch: bool = True,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
learning_rate: float = 1e-3,
batch_size: int = 32,
batch_size_predict: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
dataset: Callable = TFDataset,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) data drift detector where the kernel is trained to maximise an
estimate of the test power. The kernel is trained on a split of the reference and test instances
and then the MMD is evaluated on held out instances and a permutation test is performed.
For details see Liu et al (2020): Learning Deep Kernels for Non-Parametric Two-Sample Tests
(https://arxiv.org/abs/2002.09116)
Parameters
----------
x_ref
Data used as reference distribution.
kernel
Trainable TensorFlow model that returns a similarity between two instances.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before applying the kernel.
n_permutations
The number of permutations to use in the permutation test once the MMD has been computed.
var_reg
Constant added to the estimated variance of the MMD for stability.
reg_loss_fn
The regularisation term reg_loss_fn(kernel) is added to the loss function being optimized.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the kernel.
The drift is detected on `1 - train_size`.
retrain_from_scratch
Whether the kernel should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
optimizer
Optimizer used during training of the kernel.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the kernel.
batch_size_predict
Batch size used for the trained drift detector predictions.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the kernel.
epochs
Number of training epochs for the kernel. Corresponds to the smaller of the reference and test sets.
verbose
Verbosity level during the training of the kernel. 0 is silent, 1 a progress bar.
train_kwargs
Optional additional kwargs when training the kernel.
dataset
Dataset object used during training.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations,
train_size=train_size,
retrain_from_scratch=retrain_from_scratch,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': Framework.TENSORFLOW.value})
# define and compile kernel
self.original_kernel = kernel
self.kernel = clone_model(kernel)
self.dataset = partial(dataset, batch_size=batch_size, shuffle=True)
self.kernel_mat_fn = partial(
batch_compute_kernel_matrix, preprocess_fn=preprocess_batch_fn, batch_size=batch_size_predict
)
self.train_kwargs = {'optimizer': optimizer, 'epochs': epochs, 'learning_rate': learning_rate,
'reg_loss_fn': reg_loss_fn, 'preprocess_fn': preprocess_batch_fn, 'verbose': verbose}
if isinstance(train_kwargs, dict):
self.train_kwargs.update(train_kwargs)
self.j_hat = LearnedKernelDriftTF.JHat(self.kernel, var_reg)
class JHat(tf.keras.Model):
"""
A module that wraps around the kernel. When passed a batch of reference and batch of test
instances it returns an estimate of a correlate of test power.
Equation 4 of https://arxiv.org/abs/2002.09116
"""
def __init__(self, kernel: tf.keras.Model, var_reg: float):
super().__init__()
self.config = {'kernel': kernel, 'var_reg': var_reg}
self.kernel = kernel
self.var_reg = var_reg
def call(self, x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
k_xx, k_yy, k_xy = self.kernel(x, x), self.kernel(y, y), self.kernel(x, y)
h_mat = k_xx + k_yy - k_xy - tf.transpose(k_xy)
n = len(x)
mmd2_est = (tf.reduce_sum(h_mat)-tf.linalg.trace(h_mat))/(n*(n-1))
var_est = (4*tf.reduce_sum(tf.reduce_sum(h_mat, axis=-1)**2)/(n**3) -
4*tf.reduce_sum(h_mat)**2/(n**4))
reg_var_est = var_est + self.var_reg
return mmd2_est/tf.math.sqrt(reg_var_est)
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the maximum mean discrepancy
as a distance measure between the reference data and the data to be tested. The kernel
used within the MMD is first trained to maximise an estimate of the resulting test power.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the MMD^2 between the reference and test set, \
and the MMD^2 threshold above which drift is flagged.
"""
x_ref, x_cur = self.preprocess(x)
(x_ref_tr, x_cur_tr), (x_ref_te, x_cur_te) = self.get_splits(x_ref, x_cur)
ds_ref_tr, ds_cur_tr = self.dataset(x_ref_tr), self.dataset(x_cur_tr)
self.kernel = clone_model(self.original_kernel) if self.retrain_from_scratch else self.kernel
train_args = [self.j_hat, (ds_ref_tr, ds_cur_tr)]
LearnedKernelDriftTF.trainer(*train_args, **self.train_kwargs)
if isinstance(x_ref_te, np.ndarray) and isinstance(x_cur_te, np.ndarray):
x_all = np.concatenate([x_ref_te, x_cur_te], axis=0)
else:
x_all = x_ref_te + x_cur_te
kernel_mat = self.kernel_mat_fn(x_all, x_all, self.kernel)
kernel_mat = kernel_mat - tf.linalg.diag(tf.linalg.diag_part(kernel_mat)) # zero diagonal
mmd2 = mmd2_from_kernel_matrix(kernel_mat, len(x_cur_te), permute=False, zero_diag=False).numpy()
mmd2_permuted = np.array(
[mmd2_from_kernel_matrix(kernel_mat, len(x_cur_te), permute=True, zero_diag=False).numpy()
for _ in range(self.n_permutations)]
)
p_val = (mmd2 <= mmd2_permuted).mean()
idx_threshold = int(self.p_val * len(mmd2_permuted))
distance_threshold = np.sort(mmd2_permuted)[::-1][idx_threshold]
return p_val, mmd2, distance_threshold
@staticmethod
def trainer(
j_hat: JHat,
datasets: Tuple[tf.keras.utils.Sequence, tf.keras.utils.Sequence],
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
learning_rate: float = 1e-3,
preprocess_fn: Callable = None,
epochs: int = 20,
reg_loss_fn: Callable = (lambda kernel: 0),
verbose: int = 1,
) -> None:
"""
Train the kernel to maximise an estimate of test power using minibatch gradient descent.
"""
ds_ref, ds_cur = datasets
optimizer = optimizer(learning_rate=learning_rate) if isinstance(optimizer, type) else optimizer
n_minibatch = min(len(ds_ref), len(ds_cur))
# iterate over epochs
loss_ma = 0.
for epoch in range(epochs):
if verbose:
pbar = tf.keras.utils.Progbar(n_minibatch, 1)
for step, (x_ref, x_cur) in enumerate(zip(ds_ref, ds_cur)):
if isinstance(preprocess_fn, Callable): # type: ignore
x_ref, x_cur = preprocess_fn(x_ref), preprocess_fn(x_cur)
with tf.GradientTape() as tape:
estimate = j_hat(x_ref, x_cur)
loss = -estimate + reg_loss_fn(j_hat.kernel) # ascent
grads = tape.gradient(loss, j_hat.trainable_weights)
optimizer.apply_gradients(zip(grads, j_hat.trainable_weights))
if verbose == 1:
loss_ma = loss_ma + (loss - loss_ma) / (step + 1)
pbar_values = [('loss', loss_ma)]
pbar.add(1, values=pbar_values)
| 11,206 | 46.28692 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/mmd.py
|
import logging
import numpy as np
import tensorflow as tf
from typing import Callable, Dict, Optional, Tuple, Union
from alibi_detect.cd.base import BaseMMDDrift
from alibi_detect.utils.tensorflow.distance import mmd2_from_kernel_matrix
from alibi_detect.utils.tensorflow.kernels import GaussianRBF
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
logger = logging.getLogger(__name__)
class MMDDriftTF(BaseMMDDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
kernel: Callable = GaussianRBF,
sigma: Optional[np.ndarray] = None,
configure_kernel_from_x_ref: bool = True,
n_permutations: int = 100,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
kernel
Kernel used for the MMD computation, defaults to Gaussian RBF kernel.
sigma
Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array.
The kernel evaluation is then averaged over those bandwidths.
configure_kernel_from_x_ref
Whether to already configure the kernel bandwidth from the reference data.
n_permutations
Number of permutations used in the permutation test.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
sigma=sigma,
configure_kernel_from_x_ref=configure_kernel_from_x_ref,
n_permutations=n_permutations,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': Framework.TENSORFLOW.value})
# initialize kernel
if isinstance(sigma, np.ndarray):
sigma = tf.convert_to_tensor(sigma)
self.kernel = kernel(sigma) if kernel == GaussianRBF else kernel
# compute kernel matrix for the reference data
if self.infer_sigma or isinstance(sigma, tf.Tensor):
self.k_xx = self.kernel(self.x_ref, self.x_ref, infer_sigma=self.infer_sigma)
self.infer_sigma = False
else:
self.k_xx, self.infer_sigma = None, True
def kernel_matrix(self, x: Union[np.ndarray, tf.Tensor], y: Union[np.ndarray, tf.Tensor]) -> tf.Tensor:
""" Compute and return full kernel matrix between arrays x and y. """
k_xy = self.kernel(x, y, self.infer_sigma)
k_xx = self.k_xx if self.k_xx is not None and self.update_x_ref is None else self.kernel(x, x)
k_yy = self.kernel(y, y)
kernel_mat = tf.concat([tf.concat([k_xx, k_xy], 1), tf.concat([tf.transpose(k_xy, (1, 0)), k_yy], 1)], 0)
return kernel_mat
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the maximum mean discrepancy
as a distance measure between the reference data and the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the MMD^2 between the reference and test set, \
and the MMD^2 threshold above which drift is flagged.
"""
x_ref, x = self.preprocess(x)
# compute kernel matrix, MMD^2 and apply permutation test using the kernel matrix
n = x.shape[0]
kernel_mat = self.kernel_matrix(x_ref, x)
kernel_mat = kernel_mat - tf.linalg.diag(tf.linalg.diag_part(kernel_mat)) # zero diagonal
mmd2 = mmd2_from_kernel_matrix(kernel_mat, n, permute=False, zero_diag=False).numpy()
mmd2_permuted = np.array(
[mmd2_from_kernel_matrix(kernel_mat, n, permute=True, zero_diag=False).numpy()
for _ in range(self.n_permutations)]
)
p_val = (mmd2 <= mmd2_permuted).mean()
# compute distance threshold
idx_threshold = int(self.p_val * len(mmd2_permuted))
distance_threshold = np.sort(mmd2_permuted)[::-1][idx_threshold]
return p_val, mmd2, distance_threshold
| 6,034 | 44.719697 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/utils.py
|
from typing import Callable
from functools import partial
def activate_train_mode_for_all_layers(model: Callable) -> Callable:
model.trainable = False # type: ignore
model = partial(model, training=True) # Note this affects batchnorm etc also
return model
| 272 | 29.333333 | 81 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/classifier.py
|
from functools import partial
import numpy as np
import tensorflow as tf
from tensorflow.keras.losses import BinaryCrossentropy
from scipy.special import softmax
from typing import Callable, Dict, Optional, Tuple, Union
from alibi_detect.cd.base import BaseClassifierDrift
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.utils.tensorflow.data import TFDataset
from alibi_detect.utils.tensorflow.misc import clone_model
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
from alibi_detect.utils._types import OptimizerTF
class ClassifierDriftTF(BaseClassifierDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: np.ndarray,
model: tf.keras.Model,
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
preds_type: str = 'probs',
binarize_preds: bool = False,
reg_loss_fn: Callable = (lambda model: 0),
train_size: Optional[float] = .75,
n_folds: Optional[int] = None,
retrain_from_scratch: bool = True,
seed: int = 0,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
learning_rate: float = 1e-3,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
dataset: Callable = TFDataset,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Classifier-based drift detector. The classifier is trained on a fraction of the combined
reference and test data and drift is detected on the remaining data. To use all the data
to detect drift, a stratified cross-validation scheme can be chosen.
Parameters
----------
x_ref
Data used as reference distribution.
model
TensorFlow classification model used for drift detection.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
preds_type
Whether the model outputs 'probs' or 'logits'.
binarize_preds
Whether to test for discrepency on soft (e.g. prob/log-prob) model predictions directly
with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
reg_loss_fn
The regularisation term reg_loss_fn(model) is added to the loss function being optimized.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
n_folds
Optional number of stratified folds used for training. The model preds are then calculated
on all the out-of-fold predictions. This allows to leverage all the reference and test data
for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
are specified, `n_folds` is prioritized.
retrain_from_scratch
Whether the classifier should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
seed
Optional random seed for fold selection.
optimizer
Optimizer used during training of the classifier.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the classifier.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model.
epochs
Number of training epochs for the classifier for each (optional) fold.
verbose
Verbosity level during the training of the classifier.
0 is silent, 1 a progress bar and 2 prints the statistics after each epoch.
train_kwargs
Optional additional kwargs when fitting the classifier.
dataset
Dataset object used during training.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
preds_type=preds_type,
binarize_preds=binarize_preds,
train_size=train_size,
n_folds=n_folds,
retrain_from_scratch=retrain_from_scratch,
seed=seed,
input_shape=input_shape,
data_type=data_type
)
if preds_type not in ['probs', 'logits']:
raise ValueError("'preds_type' should be 'probs' or 'logits'")
self.meta.update({'backend': Framework.TENSORFLOW.value})
# define and compile classifier model
self.original_model = model
self.model = clone_model(model)
self.loss_fn = BinaryCrossentropy(from_logits=(self.preds_type == 'logits'))
self.dataset = partial(dataset, batch_size=batch_size, shuffle=True)
self.predict_fn = partial(predict_batch, preprocess_fn=preprocess_batch_fn, batch_size=batch_size)
optimizer = optimizer(learning_rate=learning_rate) if isinstance(optimizer, type) else optimizer
self.train_kwargs = {'optimizer': optimizer, 'epochs': epochs,
'reg_loss_fn': reg_loss_fn, 'preprocess_fn': preprocess_batch_fn, 'verbose': verbose}
if isinstance(train_kwargs, dict):
self.train_kwargs.update(train_kwargs)
def score(self, x: np.ndarray) -> Tuple[float, float, np.ndarray, np.ndarray, # type: ignore[override]
Union[np.ndarray, list], Union[np.ndarray, list]]:
"""
Compute the out-of-fold drift metric such as the accuracy from a classifier
trained to distinguish the reference data from the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value, a notion of distance between the trained classifier's out-of-fold performance \
and that which we'd expect under the null assumption of no drift, \
and the out-of-fold classifier model prediction probabilities on the reference and test data \
as well as the associated reference and test instances of the out-of-fold predictions.
"""
x_ref, x = self.preprocess(x) # type: ignore[assignment]
x, y, splits = self.get_splits(x_ref, x) # type: ignore
# iterate over folds: train a new model for each fold and make out-of-fold (oof) predictions
preds_oof_list, idx_oof_list = [], []
for idx_tr, idx_te in splits:
y_tr = np.eye(2)[y[idx_tr]]
if isinstance(x, np.ndarray):
x_tr, x_te = x[idx_tr], x[idx_te]
elif isinstance(x, list):
x_tr, x_te = [x[_] for _ in idx_tr], [x[_] for _ in idx_te]
else:
raise TypeError(f'x needs to be of type np.ndarray or list and not {type(x)}.')
ds_tr = self.dataset(x_tr, y_tr)
if self.retrain_from_scratch:
# clone model to re-initialise
self.model = clone_model(self.original_model)
# Clone optimizer to prevent error due to cloned model (with new tf>=2.11 optimizers)
optimizer = self.train_kwargs['optimizer']
self.train_kwargs['optimizer'] = optimizer.__class__.from_config(optimizer.get_config())
train_args = [self.model, self.loss_fn, None]
self.train_kwargs.update({'dataset': ds_tr})
trainer(*train_args, **self.train_kwargs)
preds = self.predict_fn(x_te, self.model)
preds_oof_list.append(preds)
idx_oof_list.append(idx_te)
preds_oof = np.concatenate(preds_oof_list, axis=0)
probs_oof = softmax(preds_oof, axis=-1) if self.preds_type == 'logits' else preds_oof
idx_oof = np.concatenate(idx_oof_list, axis=0)
y_oof = y[idx_oof]
n_cur = y_oof.sum()
n_ref = len(y_oof) - n_cur
p_val, dist = self.test_probs(y_oof, probs_oof, n_ref, n_cur)
idx_sort = np.argsort(idx_oof)
probs_sort = probs_oof[idx_sort]
if isinstance(x, np.ndarray):
x_oof = x[idx_oof]
x_sort = x_oof[idx_sort]
else:
x_oof = [x[_] for _ in idx_oof]
x_sort = [x_oof[_] for _ in idx_sort]
return p_val, dist, probs_sort[:n_ref, 1], probs_sort[n_ref:, 1], x_sort[:n_ref], x_sort[n_ref:]
| 10,298 | 48.514423 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/lsdd_online.py
|
from tqdm import tqdm
import numpy as np
import tensorflow as tf
from typing import Any, Callable, Optional, Union
from alibi_detect.cd.base_online import BaseMultiDriftOnline
from alibi_detect.utils.tensorflow import GaussianRBF, quantile, permed_lsdds
from alibi_detect.utils.frameworks import Framework
class LSDDDriftOnlineTF(BaseMultiDriftOnline):
online_state_keys: tuple = ('t', 'test_stats', 'drift_preds', 'test_window', 'k_xtc')
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_size: int,
preprocess_fn: Optional[Callable] = None,
x_ref_preprocessed: bool = False,
sigma: Optional[np.ndarray] = None,
n_bootstraps: int = 1000,
n_kernel_centers: Optional[int] = None,
lambda_rd_max: float = 0.2,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Online least squares density difference (LSDD) data drift detector using preconfigured thresholds.
Motivated by Bu et al. (2017): https://ieeexplore.ieee.org/abstract/document/7890493
Modifications are made such that a desired ERT can be accurately targeted however.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift. For the multivariate detectors, the ERT is defined
as the expected run-time from t=0.
window_size
The size of the sliding test-window used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
sigma
Optionally set the bandwidth of the Gaussian kernel used in estimating the LSDD. Can also pass multiple
bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. If `sigma`
is not specified, the 'median heuristic' is adopted whereby `sigma` is set as the median pairwise distance
between reference samples.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ert.
n_kernel_centers
The number of reference samples to use as centers in the Gaussian kernel model used to estimate LSDD.
Defaults to 2*window_size.
lambda_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2 as in the paper.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
ert=ert,
window_size=window_size,
preprocess_fn=preprocess_fn,
x_ref_preprocessed=x_ref_preprocessed,
n_bootstraps=n_bootstraps,
verbose=verbose,
input_shape=input_shape,
data_type=data_type
)
self.backend = Framework.TENSORFLOW.value
self.meta.update({'backend': self.backend})
self.n_kernel_centers = n_kernel_centers
self.lambda_rd_max = lambda_rd_max
self._configure_normalization()
# initialize kernel
if sigma is None:
self.kernel = GaussianRBF()
_ = self.kernel(self.x_ref, self.x_ref, infer_sigma=True)
else:
sigma = tf.convert_to_tensor(sigma)
self.kernel = GaussianRBF(sigma)
if self.n_kernel_centers is None:
self.n_kernel_centers = 2*window_size
self._configure_kernel_centers()
self._configure_thresholds()
self._configure_ref_subset() # self.initialise_state() called inside here
def _configure_normalization(self, eps: float = 1e-12):
"""
Configure the normalization functions used to normalize reference and test data to zero mean and unit variance.
The reference data `x_ref` is also normalized here.
"""
x_ref_means = tf.reduce_mean(self.x_ref, axis=0)
x_ref_stds = tf.math.reduce_std(self.x_ref, axis=0)
self._normalize = lambda x: (x - x_ref_means)/(x_ref_stds + eps)
self._unnormalize = lambda x: (x * (x_ref_stds + eps) + x_ref_means).numpy()
self.x_ref = self._normalize(self.x_ref)
def _configure_kernel_centers(self):
"Set aside reference samples to act as kernel centers"
perm = tf.random.shuffle(tf.range(self.n))
self.c_inds, self.non_c_inds = perm[:self.n_kernel_centers], perm[self.n_kernel_centers:]
self.kernel_centers = tf.gather(self.x_ref, self.c_inds)
if np.unique(self.kernel_centers.numpy(), axis=0).shape[0] < self.n_kernel_centers:
perturbation = tf.random.normal(self.kernel_centers.shape, mean=0, stddev=1e-6)
self.kernel_centers = self.kernel_centers + perturbation
self.x_ref_eff = tf.gather(self.x_ref, self.non_c_inds) # the effective reference set
self.k_xc = self.kernel(self.x_ref_eff, self.kernel_centers)
def _configure_thresholds(self):
"""
Configure the test statistic thresholds via bootstrapping.
"""
# Each bootstrap sample splits the reference samples into a sub-reference sample (x)
# and an extended test window (y). The extended test window will be treated as W overlapping
# test windows of size W (so 2W-1 test samples in total)
w_size = self.window_size
etw_size = 2*w_size-1 # etw = extended test window
nkc_size = self.n - self.n_kernel_centers # nkc = non-kernel-centers
rw_size = nkc_size - etw_size # rw = ref-window
perms = [tf.random.shuffle(tf.range(nkc_size)) for _ in range(self.n_bootstraps)]
x_inds_all = [perm[:rw_size] for perm in perms]
y_inds_all = [perm[rw_size:] for perm in perms]
# For stability in high dimensions we don't divide H by (pi*sigma^2)^(d/2)
# Results in an alternative test-stat of LSDD*(pi*sigma^2)^(d/2). Same p-vals etc.
H = GaussianRBF(np.sqrt(2.)*self.kernel.sigma)(self.kernel_centers, self.kernel_centers)
# Compute lsdds for first test-window. We infer regularisation constant lambda here.
y_inds_all_0 = [y_inds[:w_size] for y_inds in y_inds_all]
lsdds_0, H_lam_inv = permed_lsdds(
self.k_xc, x_inds_all, y_inds_all_0, H, lam_rd_max=self.lambda_rd_max,
)
# Can compute threshold for first window
thresholds = [quantile(lsdds_0, 1-self.fpr)]
# And now to iterate through the other W-1 overlapping windows
p_bar = tqdm(range(1, w_size), "Computing thresholds") if self.verbose else range(1, w_size)
for w in p_bar:
y_inds_all_w = [y_inds[w:(w+w_size)] for y_inds in y_inds_all]
lsdds_w, _ = permed_lsdds(self.k_xc, x_inds_all, y_inds_all_w, H, H_lam_inv=H_lam_inv)
thresholds.append(quantile(lsdds_w, 1-self.fpr))
x_inds_all = [x_inds_all[i] for i in range(len(x_inds_all)) if lsdds_w[i] < thresholds[-1]]
y_inds_all = [y_inds_all[i] for i in range(len(y_inds_all)) if lsdds_w[i] < thresholds[-1]]
self.thresholds = thresholds
self.H_lam_inv = H_lam_inv
def _initialise_state(self) -> None:
"""
Initialise online state (the stateful attributes updated by `score` and `predict`). This method relies on
attributes defined by `_configure_ref_subset`, hence must be called afterwards.
"""
super()._initialise_state()
self.test_window = tf.gather(self.x_ref_eff, self.init_test_inds)
self.k_xtc = self.kernel(self.test_window, self.kernel_centers)
def _configure_ref_subset(self):
"""
Configure the reference data split. If the randomly selected split causes an initial detection, further splits
are attempted.
"""
etw_size = 2*self.window_size-1 # etw = extended test window
nkc_size = self.n - self.n_kernel_centers # nkc = non-kernel-centers
rw_size = nkc_size - etw_size # rw = ref-window
# Make split and ensure it doesn't cause an initial detection
lsdd_init = None
while lsdd_init is None or lsdd_init >= self.get_threshold(0):
# Make split
perm = tf.random.shuffle(tf.range(nkc_size))
self.ref_inds, self.init_test_inds = perm[:rw_size], perm[-self.window_size:]
self.c2s = tf.reduce_mean(tf.gather(self.k_xc, self.ref_inds), axis=0) # (below Eqn 21)
# Compute initial lsdd to check for initial detection
self._initialise_state() # to set self.test_window and self.k_xtc
h_init = self.c2s - tf.reduce_mean(self.k_xtc, axis=0) # (Eqn 21)
lsdd_init = h_init[None, :] @ self.H_lam_inv @ h_init[:, None] # (Eqn 11)
def _update_state(self, x_t: tf.Tensor):
"""
Update online state based on the provided test instance.
Parameters
----------
x_t
The test instance.
"""
self.t += 1
k_xtc = self.kernel(x_t, self.kernel_centers)
self.test_window = tf.concat([self.test_window[(1-self.window_size):], x_t], axis=0)
self.k_xtc = tf.concat([self.k_xtc[(1-self.window_size):], k_xtc], axis=0)
def score(self, x_t: Union[np.ndarray, Any]) -> float:
"""
Compute the test-statistic (LSDD) between the reference window and test window.
Parameters
----------
x_t
A single instance to be added to the test-window.
Returns
-------
LSDD estimate between reference window and test window.
"""
x_t = super()._preprocess_xt(x_t)
x_t = tf.convert_to_tensor(x_t)
x_t = self._normalize(x_t)
self._update_state(x_t)
h = self.c2s - tf.reduce_mean(self.k_xtc, axis=0) # (Eqn 21)
lsdd = h[None, :] @ self.H_lam_inv @ h[:, None] # (Eqn 11)
return float(lsdd)
| 10,963 | 46.258621 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/spot_the_diff.py
|
import logging
import numpy as np
import tensorflow as tf
from typing import Callable, Dict, Optional, Union
from alibi_detect.cd.tensorflow.classifier import ClassifierDriftTF
from alibi_detect.utils.tensorflow.data import TFDataset
from alibi_detect.utils.tensorflow import GaussianRBF
from alibi_detect.utils.tensorflow.prediction import predict_batch
logger = logging.getLogger(__name__)
class SpotTheDiffDriftTF:
def __init__(
self,
x_ref: np.ndarray,
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_fn: Optional[Callable] = None,
kernel: Optional[tf.keras.Model] = None,
n_diffs: int = 1,
initial_diffs: Optional[np.ndarray] = None,
l1_reg: float = 0.01,
binarize_preds: bool = False,
train_size: Optional[float] = .75,
n_folds: Optional[int] = None,
retrain_from_scratch: bool = True,
seed: int = 0,
optimizer: tf.keras.optimizers = tf.keras.optimizers.Adam,
learning_rate: float = 1e-3,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
dataset: Callable = TFDataset,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Classifier-based drift detector with a classifier of form y = a + b_1*k(x,w_1) + ... + b_J*k(x,w_J),
where k is a kernel and w_1,...,w_J are learnable test locations. If drift has occured the test locations
learn to be more/less (given by sign of b_i) similar to test instances than reference instances.
The test locations are regularised to be close to the average reference instance such that the **difference**
is then interpretable as the transformation required for each feature to make the average instance more/less
like a test instance than a reference instance.
The classifier is trained on a fraction of the combined reference and test data and drift is detected on
the remaining data. To use all the data to detect drift, a stratified cross-validation scheme can be chosen.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
kernel
Differentiable TensorFlow model used to define similarity between instances, defaults to Gaussian RBF.
n_diffs
The number of test locations to use, each corresponding to an interpretable difference.
initial_diffs
Array used to initialise the diffs that will be learned. Defaults to Gaussian
for each feature with equal variance to that of reference data.
l1_reg
Strength of l1 regularisation to apply to the differences.
binarize_preds
Whether to test for discrepency on soft (e.g. probs/logits) model predictions directly
with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
n_folds
Optional number of stratified folds used for training. The model preds are then calculated
on all the out-of-fold instances. This allows to leverage all the reference and test data
for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
are specified, `n_folds` is prioritized.
retrain_from_scratch
Whether the classifier should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
seed
Optional random seed for fold selection.
optimizer
Optimizer used during training of the classifier.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the classifier.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model.
epochs
Number of training epochs for the classifier for each (optional) fold.
verbose
Verbosity level during the training of the classifier. 0 is silent, 1 a progress bar.
train_kwargs
Optional additional kwargs when fitting the classifier.
dataset
Dataset object used during training.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
if preprocess_fn is not None and preprocess_batch_fn is not None:
raise ValueError("SpotTheDiffDrift detector only supports preprocess_fn or preprocess_batch_fn, not both.")
if n_folds is not None and n_folds > 1:
logger.warning("When using multiple folds the returned diffs will correspond to the final fold only.")
if not x_ref_preprocessed and preprocess_fn is not None:
x_ref_proc = preprocess_fn(x_ref)
elif not x_ref_preprocessed and preprocess_batch_fn is not None:
x_ref_proc = predict_batch(
x_ref, lambda x: x, preprocess_fn=preprocess_batch_fn, batch_size=batch_size
)
else:
x_ref_proc = x_ref
if kernel is None:
kernel = GaussianRBF(trainable=True)
if initial_diffs is None:
initial_diffs = np.random.normal(size=(n_diffs,) + x_ref_proc.shape[1:]) * x_ref_proc.std(0)
else:
if len(initial_diffs) != n_diffs:
raise ValueError("Should have initial_diffs.shape[0] == n_diffs")
model = SpotTheDiffDriftTF.InterpretableClf(kernel, x_ref_proc, initial_diffs)
reg_loss_fn = (lambda model: tf.reduce_mean(tf.abs(model.diffs)) * l1_reg)
self._detector = ClassifierDriftTF(
x_ref=x_ref,
model=model,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=True,
update_x_ref=None,
preprocess_fn=preprocess_fn,
preds_type='logits',
binarize_preds=binarize_preds,
reg_loss_fn=reg_loss_fn,
train_size=train_size,
n_folds=n_folds,
retrain_from_scratch=retrain_from_scratch,
seed=seed,
optimizer=optimizer,
learning_rate=learning_rate,
batch_size=batch_size,
preprocess_batch_fn=preprocess_batch_fn,
epochs=epochs,
verbose=verbose,
train_kwargs=train_kwargs,
dataset=dataset,
input_shape=input_shape,
data_type=data_type
)
self.meta = self._detector.meta
self.meta['params']['name'] = 'SpotTheDiffDrift'
self.meta['params']['n_diffs'] = n_diffs
self.meta['params']['l1_reg'] = l1_reg
self.meta['params']['initial_diffs'] = initial_diffs
class InterpretableClf(tf.keras.Model):
def __init__(self, kernel: tf.keras.Model, x_ref: np.ndarray, initial_diffs: np.ndarray):
super().__init__()
self.config = {'kernel': kernel, 'x_ref': x_ref, 'initial_diffs': initial_diffs}
self.kernel = kernel
self.mean = tf.convert_to_tensor(x_ref.mean(0))
self.diffs = tf.Variable(initial_diffs, dtype=np.float32)
self.bias = tf.Variable(tf.zeros((1,)))
self.coeffs = tf.Variable(tf.zeros((len(initial_diffs),)))
def call(self, x: tf.Tensor) -> tf.Tensor:
k_xtl = self.kernel(x, self.mean + self.diffs)
logits = self.bias + k_xtl @ self.coeffs[:, None]
return tf.concat([-logits, logits], axis=-1)
def get_config(self) -> dict:
return self.config
@classmethod
def from_config(cls, config):
return cls(**config)
def predict(
self, x: np.ndarray, return_p_val: bool = True, return_distance: bool = True,
return_probs: bool = True, return_model: bool = False
) -> Dict[str, Dict[str, Union[str, int, float, Callable]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the test.
return_distance
Whether to return a notion of strength of the drift.
K-S test stat if binarize_preds=False, otherwise relative error reduction.
return_probs
Whether to return the instance level classifier probabilities for the reference and test data
(0=reference data, 1=test data).
return_model
Whether to return the updated model trained to discriminate reference and test instances.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the detector's metadata.
- ``'data'`` contains the drift prediction, the diffs used to distinguish reference from test instances, \
and optionally the p-value, performance of the classifier relative to its expectation under the \
no-change null, the out-of-fold classifier model prediction probabilities on the reference and test \
data as well as well as the associated reference and test instances of the out-of-fold predictions, \
and the trained model.
"""
preds = self._detector.predict(x, return_p_val, return_distance, return_probs, return_model=True)
preds['data']['diffs'] = preds['data']['model'].diffs.numpy()
preds['data']['diff_coeffs'] = preds['data']['model'].coeffs.numpy()
if not return_model:
del preds['data']['model']
return preds
| 10,788 | 46.528634 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/mmd_online.py
|
from tqdm import tqdm
import numpy as np
import tensorflow as tf
from typing import Any, Callable, Optional, Union
from alibi_detect.cd.base_online import BaseMultiDriftOnline
from alibi_detect.utils.tensorflow.kernels import GaussianRBF
from alibi_detect.utils.tensorflow import zero_diag, quantile, subset_matrix
from alibi_detect.utils.frameworks import Framework
class MMDDriftOnlineTF(BaseMultiDriftOnline):
online_state_keys: tuple = ('t', 'test_stats', 'drift_preds', 'test_window', 'k_xy')
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_size: int,
preprocess_fn: Optional[Callable] = None,
x_ref_preprocessed: bool = False,
kernel: Callable = GaussianRBF,
sigma: Optional[np.ndarray] = None,
n_bootstraps: int = 1000,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Online maximum Mean Discrepancy (MMD) data drift detector using preconfigured thresholds.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift. For the multivariate detectors, the ERT is defined
as the expected run-time from t=0.
window_size
The size of the sliding test-window used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
kernel
Kernel used for the MMD computation, defaults to Gaussian RBF kernel.
sigma
Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array.
The kernel evaluation is then averaged over those bandwidths. If `sigma` is not specified, the 'median
heuristic' is adopted whereby `sigma` is set as the median pairwise distance between reference samples.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ERT.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
ert=ert,
window_size=window_size,
preprocess_fn=preprocess_fn,
x_ref_preprocessed=x_ref_preprocessed,
n_bootstraps=n_bootstraps,
verbose=verbose,
input_shape=input_shape,
data_type=data_type
)
self.backend = Framework.TENSORFLOW.value
self.meta.update({'backend': self.backend})
# initialize kernel
if isinstance(sigma, np.ndarray):
sigma = tf.convert_to_tensor(sigma)
self.kernel = kernel(sigma) if kernel == GaussianRBF else kernel
# compute kernel matrix for the reference data
self.k_xx = self.kernel(self.x_ref, self.x_ref, infer_sigma=(sigma is None))
self._configure_thresholds()
self._configure_ref_subset() # self.initialise_state() called inside here
def _initialise_state(self) -> None:
"""
Initialise online state (the stateful attributes updated by `score` and `predict`). This method relies on
attributes defined by `_configure_ref_subset`, hence must be called afterwards.
"""
super()._initialise_state()
self.test_window = tf.gather(self.x_ref, self.init_test_inds)
self.k_xy = self.kernel(tf.gather(self.x_ref, self.ref_inds), self.test_window)
def _configure_ref_subset(self):
"""
Configure the reference data split. If the randomly selected split causes an initial detection, further splits
are attempted.
"""
etw_size = 2 * self.window_size - 1 # etw = extended test window
rw_size = self.n - etw_size # rw = ref window#
# Make split and ensure it doesn't cause an initial detection
mmd_init = None
while mmd_init is None or mmd_init >= self.get_threshold(0):
# Make split
perm = tf.random.shuffle(tf.range(self.n))
self.ref_inds, self.init_test_inds = perm[:rw_size], perm[-self.window_size:]
# Compute initial mmd to check for initial detection
self._initialise_state() # to set self.test_window and self.k_xtc
self.k_xx_sub = subset_matrix(self.k_xx, self.ref_inds, self.ref_inds)
self.k_xx_sub_sum = tf.reduce_sum(zero_diag(self.k_xx_sub)) / (rw_size * (rw_size - 1))
k_yy = self.kernel(self.test_window, self.test_window)
mmd_init = (
self.k_xx_sub_sum +
tf.reduce_sum(zero_diag(k_yy)) / (self.window_size * (self.window_size - 1)) -
2 * tf.reduce_mean(self.k_xy)
)
def _configure_thresholds(self):
"""
Configure the test statistic thresholds via bootstrapping.
"""
# Each bootstrap sample splits the reference samples into a sub-reference sample (x)
# and an extended test window (y). The extended test window will be treated as W overlapping
# test windows of size W (so 2W-1 test samples in total)
w_size = self.window_size
etw_size = 2 * w_size - 1 # etw = extended test window
rw_size = self.n - etw_size # rw = ref window
perms = [tf.random.shuffle(tf.range(self.n)) for _ in range(self.n_bootstraps)]
x_inds_all = [perm[:-etw_size] for perm in perms]
y_inds_all = [perm[-etw_size:] for perm in perms]
if self.verbose:
print("Generating permutations of kernel matrix..")
# Need to compute mmd for each bs for each of W overlapping windows
# Most of the computation can be done once however
# We avoid summing the rw_size^2 submatrix for each bootstrap sample by instead computing the full
# sum once and then subtracting the relavent parts (k_xx_sum = k_full_sum - 2*k_xy_sum - k_yy_sum).
# We also reduce computation of k_xy_sum from O(nW) to O(W) by caching column sums
k_full_sum = tf.reduce_sum(zero_diag(self.k_xx))
k_xy_col_sums_all = [
tf.reduce_sum(subset_matrix(self.k_xx, x_inds, y_inds), axis=0) for x_inds, y_inds in
(tqdm(zip(x_inds_all, y_inds_all), total=self.n_bootstraps) if self.verbose else
zip(x_inds_all, y_inds_all))
]
k_xx_sums_all = [(
k_full_sum -
tf.reduce_sum(zero_diag(subset_matrix(self.k_xx, y_inds, y_inds))) -
2 * tf.reduce_sum(k_xy_col_sums)
) / (rw_size * (rw_size - 1)) for y_inds, k_xy_col_sums in zip(y_inds_all, k_xy_col_sums_all)]
k_xy_col_sums_all = [k_xy_col_sums / (rw_size * w_size) for k_xy_col_sums in k_xy_col_sums_all]
# Now to iterate through the W overlapping windows
thresholds = []
p_bar = tqdm(range(w_size), "Computing thresholds") if self.verbose else range(w_size)
for w in p_bar:
y_inds_all_w = [y_inds[w:w + w_size] for y_inds in y_inds_all] # test windows of size W
mmds = [(
k_xx_sum +
tf.reduce_sum(zero_diag(subset_matrix(self.k_xx, y_inds_w, y_inds_w))) / (w_size * (w_size - 1)) -
2 * tf.reduce_sum(k_xy_col_sums[w:w + w_size]))
for k_xx_sum, y_inds_w, k_xy_col_sums in zip(k_xx_sums_all, y_inds_all_w, k_xy_col_sums_all)
]
mmds = tf.stack(mmds, axis=0) # an mmd for each bootstrap sample
# Now we discard all bootstrap samples for which mmd is in top (1/ert)% and record the thresholds
thresholds.append(quantile(mmds, 1 - self.fpr))
y_inds_all = [y_inds_all[i] for i in range(len(y_inds_all)) if mmds[i] < thresholds[-1]]
k_xx_sums_all = [
k_xx_sums_all[i] for i in range(len(k_xx_sums_all)) if mmds[i] < thresholds[-1]
]
k_xy_col_sums_all = [
k_xy_col_sums_all[i] for i in range(len(k_xy_col_sums_all)) if mmds[i] < thresholds[-1]
]
self.thresholds = thresholds
def _update_state(self, x_t: np.ndarray): # type: ignore[override]
"""
Update online state based on the provided test instance.
Parameters
----------
x_t
The test instance.
"""
self.t += 1
kernel_col = self.kernel(tf.gather(self.x_ref, self.ref_inds), x_t)
self.test_window = tf.concat([self.test_window[(1 - self.window_size):], x_t], axis=0)
self.k_xy = tf.concat([self.k_xy[:, (1 - self.window_size):], kernel_col], axis=1)
def score(self, x_t: Union[np.ndarray, Any]) -> float:
"""
Compute the test-statistic (squared MMD) between the reference window and test window.
Parameters
----------
x_t
A single instance to be added to the test-window.
Returns
-------
Squared MMD estimate between reference window and test window.
"""
x_t = super()._preprocess_xt(x_t)
self._update_state(x_t)
k_yy = self.kernel(self.test_window, self.test_window)
mmd = (
self.k_xx_sub_sum +
tf.reduce_sum(zero_diag(k_yy)) / (self.window_size * (self.window_size - 1)) -
2 * tf.reduce_mean(self.k_xy)
)
return mmd.numpy()
| 10,418 | 46.144796 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/context_aware.py
|
import logging
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from typing import Callable, Dict, Optional, Tuple, Union, List
from alibi_detect.cd.base import BaseContextMMDDrift
from alibi_detect.utils.tensorflow.kernels import GaussianRBF
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
from alibi_detect.cd._domain_clf import _SVCDomainClf
from tqdm import tqdm
logger = logging.getLogger(__name__)
class ContextMMDDriftTF(BaseContextMMDDrift):
lams: Optional[Tuple[tf.Tensor, tf.Tensor]]
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
c_ref: np.ndarray,
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
x_kernel: Callable = GaussianRBF,
c_kernel: Callable = GaussianRBF,
n_permutations: int = 1000,
prop_c_held: float = 0.25,
n_folds: int = 5,
batch_size: Optional[int] = 256,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
verbose: bool = False,
) -> None:
"""
A context-aware drift detector based on a conditional analogue of the maximum mean discrepancy (MMD).
Only detects differences between samples that can not be attributed to differences between associated
sets of contexts. p-values are computed using a conditional permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
c_ref
Context for the reference distribution.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_ref
Reference data can optionally be updated to the last N instances seen by the detector.
The parameter should be passed as a dictionary *{'last': N}*.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_kernel
Kernel defined on the input data, defaults to Gaussian RBF kernel.
c_kernel
Kernel defined on the context data, defaults to Gaussian RBF kernel.
n_permutations
Number of permutations used in the permutation test.
prop_c_held
Proportion of contexts held out to condition on.
n_folds
Number of cross-validation folds used when tuning the regularisation parameters.
batch_size
If not None, then compute batches of MMDs at a time (rather than all at once).
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
verbose
Whether or not to print progress during configuration.
"""
super().__init__(
x_ref=x_ref,
c_ref=c_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_ref=update_ref,
preprocess_fn=preprocess_fn,
x_kernel=x_kernel,
c_kernel=c_kernel,
n_permutations=n_permutations,
prop_c_held=prop_c_held,
n_folds=n_folds,
batch_size=batch_size,
input_shape=input_shape,
data_type=data_type,
verbose=verbose
)
self.meta.update({'backend': Framework.TENSORFLOW.value})
# initialize kernel
self.x_kernel = x_kernel(init_sigma_fn=_sigma_median_diag) if x_kernel == GaussianRBF else x_kernel
self.c_kernel = c_kernel(init_sigma_fn=_sigma_median_diag) if c_kernel == GaussianRBF else c_kernel
# Initialize classifier (hardcoded for now)
self.clf = _SVCDomainClf(self.c_kernel)
def score(self, # type: ignore[override]
x: Union[np.ndarray, list], c: np.ndarray) -> Tuple[float, float, float, Tuple]:
"""
Compute the MMD based conditional test statistic, and perform a conditional permutation test to obtain a
p-value representing the test statistic's extremity under the null hypothesis.
Parameters
----------
x
Batch of instances.
c
Context associated with batch of instances.
Returns
-------
p-value obtained from the conditional permutation test, the conditional MMD test statistic, the test \
statistic threshold above which drift is flagged, and a tuple containing the coupling matrices \
(W_{ref,ref}, W_{test,test}, W_{ref,test}).
"""
x_ref, x = self.preprocess(x)
# Hold out a portion of contexts for conditioning on
n, n_held = len(c), int(len(c)*self.prop_c_held)
inds_held = np.random.choice(n, n_held, replace=False)
inds_test = np.setdiff1d(np.arange(n), inds_held)
c_held = c[inds_held]
c, x = c[inds_test], x[inds_test]
n_ref, n_test = len(x_ref), len(x)
bools = tf.concat([tf.zeros(n_ref), tf.ones(n_test)], axis=0)
# Compute kernel matrices
x_all = tf.concat([x_ref, x], axis=0)
c_all = tf.concat([self.c_ref, c], axis=0)
K = self.x_kernel(x_all, x_all)
L = self.c_kernel(c_all, c_all)
L_held = self.c_kernel(c_held, c_all)
# Fit and calibrate the domain classifier
c_all_np, bools_np = c_all.numpy(), bools.numpy()
self.clf.fit(c_all_np, bools_np)
self.clf.calibrate(c_all_np, bools_np)
# Obtain n_permutations conditional reassignments
prop_scores = self.clf.predict(c_all_np)
self.redrawn_bools = [tfp.distributions.Bernoulli(probs=prop_scores).sample()
for _ in range(self.n_permutations)]
iters = tqdm(self.redrawn_bools, total=self.n_permutations) if self.verbose else self.redrawn_bools
# Compute test stat on original and reassigned data
stat, coupling_xx, coupling_yy, coupling_xy = self._cmmd(K, L, bools, L_held=L_held)
permuted_stats = tf.stack([self._cmmd(K, L, perm_bools, L_held=L_held)[0] for perm_bools in iters])
# Compute p-value
p_val = tf.reduce_mean(tf.cast(stat <= permuted_stats, float))
coupling = (coupling_xx.numpy(), coupling_yy.numpy(), coupling_xy.numpy())
# compute distance threshold
idx_threshold = int(self.p_val * len(permuted_stats))
distance_threshold = np.sort(permuted_stats)[::-1][idx_threshold]
return p_val.numpy().item(), stat.numpy().item(), distance_threshold, coupling
def _cmmd(self, K: tf.Tensor, L: tf.Tensor, bools: tf.Tensor, L_held: tf.Tensor = None) \
-> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""
Private method to compute the MMD-ADiTT test statistic.
"""
# Get ref/test indices
idx_0, idx_1 = np.where(bools == 0)[0], np.where(bools == 1)[0]
n_ref, n_test = len(idx_0), len(idx_1)
# Form kernel matrices
L_0, L_1 = tf.gather(tf.gather(L, idx_0), idx_0, axis=1), tf.gather(tf.gather(L, idx_1), idx_1, axis=1)
K_0, K_1 = tf.gather(tf.gather(K, idx_0), idx_0, axis=1), tf.gather(tf.gather(K, idx_1), idx_1, axis=1)
# Avoid using tf.gather_nd since this would require [n_fef, n_ref, 2] and [n_test, n_test, 2] idx tensors
# Initialise regularisation parameters
# Implemented only for first _cmmd call which corresponds to original window assignment
if self.lams is None:
possible_lams = tf.convert_to_tensor([2**(-i) for i in range(20)], dtype=tf.float64)
lam_0 = self._pick_lam(possible_lams, K_0, L_0, n_folds=self.n_folds)
lam_1 = self._pick_lam(possible_lams, K_1, L_1, n_folds=self.n_folds)
self.lams = (lam_0, lam_1)
# Compute stat
L_0_inv = tf.linalg.inv(L_0 + n_ref*self.lams[0]*tf.eye(int(n_ref)))
L_1_inv = tf.linalg.inv(L_1 + n_test*self.lams[1]*tf.eye(int(n_test)))
A_0 = tf.gather(L_held, idx_0, axis=1) @ L_0_inv
A_1 = tf.gather(L_held, idx_1, axis=1) @ L_1_inv
# Allow batches of MMDs to be computed at a time (rather than all)
if self.batch_size is not None:
bs = self.batch_size
coupling_xx = tf.reduce_mean(tf.stack([tf.reduce_mean(tf.einsum('ij,ik->ijk', A_0_i, A_0_i), axis=0)
for A_0_i in tf.split(A_0, _split_chunks(len(A_0), bs))]), axis=0)
coupling_yy = tf.reduce_mean(tf.stack([tf.reduce_mean(tf.einsum('ij,ik->ijk', A_1_i, A_1_i), axis=0)
for A_1_i in tf.split(A_1, _split_chunks(len(A_1), bs))]), axis=0)
coupling_xy = tf.reduce_mean(tf.stack([
tf.reduce_mean(tf.einsum('ij,ik->ijk', A_0_i, A_1_i), axis=0)
for A_0_i, A_1_i in zip(tf.split(A_0, _split_chunks(len(A_0), bs)),
tf.split(A_1, _split_chunks(len(A_1), bs)))
]), axis=0)
else:
coupling_xx = tf.reduce_mean(tf.einsum('ij,ik->ijk', A_0, A_0), axis=0)
coupling_yy = tf.reduce_mean(tf.einsum('ij,ik->ijk', A_1, A_1), axis=0)
coupling_xy = tf.reduce_mean(tf.einsum('ij,ik->ijk', A_0, A_1), axis=0)
sim_xx = tf.reduce_sum(tf.gather(tf.gather(K, idx_0), idx_0, axis=1)*coupling_xx)
sim_yy = tf.reduce_sum(tf.gather(tf.gather(K, idx_1), idx_1, axis=1)*coupling_yy)
sim_xy = tf.reduce_sum(tf.gather(tf.gather(K, idx_0), idx_1, axis=1)*coupling_xy)
stat = sim_xx + sim_yy - 2*sim_xy
return stat, coupling_xx, coupling_yy, coupling_xy
def _pick_lam(self, lams: tf.Tensor, K: tf.Tensor, L: tf.Tensor, n_folds: int = 5) -> tf.Tensor:
"""
The conditional mean embedding is estimated as the solution of a regularised regression problem.
This private method function uses cross validation to select the regularisation parameter that
minimises squared error on the out-of-fold instances. The error is a distance in the RKHS and is
therefore an MMD-like quantity itself.
"""
n = len(L)
fold_size = n // n_folds
K, L = tf.cast(K, tf.float64), tf.cast(K, tf.float64)
perm = tf.random.shuffle(range(n))
K, L = tf.gather(tf.gather(K, perm), perm, axis=1), tf.gather(tf.gather(L, perm), perm, axis=1)
losses = tf.zeros_like(lams, dtype=tf.float64)
for fold in range(n_folds):
inds_oof = np.arange(n)[(fold*fold_size):((fold+1)*fold_size)]
inds_if = np.setdiff1d(np.arange(n), inds_oof)
K_if = tf.gather(tf.gather(K, inds_if), inds_if, axis=1)
L_if = tf.gather(tf.gather(L, inds_if), inds_if, axis=1)
n_if = len(K_if)
L_inv_lams = tf.stack(
[tf.linalg.inv(L_if + n_if*lam*tf.eye(n_if, dtype=tf.float64)) for lam in lams]) # n_lam x n_if x n_if
KW = tf.einsum('ij,ljk->lik', K_if, L_inv_lams)
lW = tf.einsum('ij,ljk->lik', tf.gather(tf.gather(L, inds_oof), inds_if, axis=1), L_inv_lams)
lWKW = tf.einsum('lij,ljk->lik', lW, KW)
lWKWl = tf.einsum('lkj,jk->lk', lWKW, tf.gather(tf.gather(L, inds_if), inds_oof, axis=1)) # n_lam x n_oof
lWk = tf.einsum('lij,ji->li', lW, tf.gather(tf.gather(K, inds_if), inds_oof, axis=1)) # n_lam x n_oof
kxx = tf.ones_like(lWk) * tf.reduce_max(K)
losses += tf.reduce_sum(lWKWl + kxx - 2*lWk, axis=-1)
return tf.cast(lams[tf.argmin(losses)], tf.float32)
def _split_chunks(n: int, p: int) -> List[int]:
"""
Private function to calculate chunk sizes for tf.split(). An array/tensor of length n is aimed to be split into p
number of chunks of roughly equivalent size.
Parameters
----------
n
Size of array/tensor to be split.
p
Number of chunks.
Returns
-------
List containing the size of each chunk.
"""
if p >= n:
chunks = [n]
else:
chunks = [n // p + 1] * (n % p) + [n // p] * (p - n % p)
return chunks
def _sigma_median_diag(x: tf.Tensor, y: tf.Tensor, dist: tf.Tensor) -> tf.Tensor:
"""
Private version of the bandwidth estimation function :py:func:`~alibi_detect.utils.tensorflow.kernels.sigma_median`,
with the +n (and -1) term excluded to account for the diagonal of the kernel matrix.
Parameters
----------
x
Tensor of instances with dimension [Nx, features].
y
Tensor of instances with dimension [Ny, features].
dist
Tensor with dimensions [Nx, Ny], containing the pairwise distances between `x` and `y`.
Returns
-------
The computed bandwidth, `sigma`.
"""
n_median = tf.math.reduce_prod(dist.shape) // 2
sigma = tf.expand_dims((.5 * tf.sort(tf.reshape(dist, (-1,)))[n_median]) ** .5, axis=0)
return sigma
| 13,836 | 45.589226 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/__init__.py
|
from alibi_detect.utils.missing_optional_dependency import import_optional
HiddenOutput, UAE, preprocess_drift = import_optional(
'alibi_detect.cd.tensorflow.preprocess',
names=['HiddenOutput', 'UAE', 'preprocess_drift']
)
__all__ = [
"HiddenOutput",
"UAE",
"preprocess_drift"
]
| 301 | 22.230769 | 74 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/lsdd.py
|
import numpy as np
import tensorflow as tf
from typing import Callable, Dict, Optional, Tuple, Union
from alibi_detect.cd.base import BaseLSDDDrift
from alibi_detect.utils.tensorflow.kernels import GaussianRBF
from alibi_detect.utils.tensorflow.distance import permed_lsdds
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
class LSDDDriftTF(BaseLSDDDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
sigma: Optional[np.ndarray] = None,
n_permutations: int = 100,
n_kernel_centers: Optional[int] = None,
lambda_rd_max: float = 0.2,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Least-squares density difference (LSDD) data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
sigma
Optionally set the bandwidth of the Gaussian kernel used in estimating the LSDD. Can also pass multiple
bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. If `sigma`
is not specified, the 'median heuristic' is adopted whereby `sigma` is set as the median pairwise distance
between reference samples.
n_permutations
Number of permutations used in the permutation test.
n_kernel_centers
The number of reference samples to use as centers in the Gaussian kernel model used to estimate LSDD.
Defaults to 1/20th of the reference data.
lambda_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2 as in the paper.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
sigma=sigma,
n_permutations=n_permutations,
n_kernel_centers=n_kernel_centers,
lambda_rd_max=lambda_rd_max,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': Framework.TENSORFLOW.value})
if self.preprocess_at_init or self.preprocess_fn is None or self.x_ref_preprocessed:
x_ref = tf.convert_to_tensor(self.x_ref)
self._configure_normalization(x_ref)
x_ref = self._normalize(x_ref)
self._initialize_kernel(x_ref)
self._configure_kernel_centers(x_ref)
self.x_ref = x_ref.numpy() # type: ignore[union-attr]
# For stability in high dimensions we don't divide H by (pi*sigma^2)^(d/2)
# Results in an alternative test-stat of LSDD*(pi*sigma^2)^(d/2). Same p-vals etc.
self.H = GaussianRBF(np.sqrt(2.) * self.kernel.sigma)(self.kernel_centers, self.kernel_centers)
def _initialize_kernel(self, x_ref: tf.Tensor):
if self.sigma is None:
self.kernel = GaussianRBF()
_ = self.kernel(x_ref, x_ref, infer_sigma=True)
else:
sigma = tf.convert_to_tensor(self.sigma)
self.kernel = GaussianRBF(sigma)
def _configure_normalization(self, x_ref: tf.Tensor, eps: float = 1e-12):
x_ref_means = tf.reduce_mean(x_ref, axis=0)
x_ref_stds = tf.math.reduce_std(x_ref, axis=0)
self._normalize = lambda x: (x - x_ref_means) / (x_ref_stds + eps)
self._unnormalize = lambda x: (x * (x_ref_stds + eps) + x_ref_means).numpy()
def _configure_kernel_centers(self, x_ref: tf.Tensor):
"Set aside reference samples to act as kernel centers"
perm = tf.random.shuffle(tf.range(self.x_ref.shape[0]))
c_inds, non_c_inds = perm[:self.n_kernel_centers], perm[self.n_kernel_centers:]
self.kernel_centers = tf.gather(x_ref, c_inds)
if np.unique(self.kernel_centers.numpy(), axis=0).shape[0] < self.n_kernel_centers:
perturbation = tf.random.normal(self.kernel_centers.shape, mean=0, stddev=1e-6)
self.kernel_centers = self.kernel_centers + perturbation
x_ref_eff = tf.gather(x_ref, non_c_inds) # the effective reference set
self.k_xc = self.kernel(x_ref_eff, self.kernel_centers)
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the least-squares density
difference as a distance measure between the reference data and the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the LSDD between the reference and test set, \
and the LSDD threshold above which drift is flagged.
"""
x_ref, x = self.preprocess(x)
if self.preprocess_fn is not None and not self.preprocess_at_init and not self.x_ref_preprocessed:
self._configure_normalization(x_ref)
x_ref = self._normalize(x_ref)
self._initialize_kernel(x_ref)
self._configure_kernel_centers(x_ref)
self.H = GaussianRBF(np.sqrt(2.) * self.kernel.sigma)(self.kernel_centers, self.kernel_centers)
x = self._normalize(x)
k_yc = self.kernel(x, self.kernel_centers)
k_all_c = tf.concat([self.k_xc, k_yc], axis=0)
n_x = x_ref.shape[0] - self.n_kernel_centers
n_all = k_all_c.shape[0]
perms = [tf.random.shuffle(tf.range(n_all)) for _ in range(self.n_permutations)]
x_perms = [perm[:n_x] for perm in perms]
y_perms = [perm[n_x:] for perm in perms]
lsdd_permuted, _, lsdd = permed_lsdds( # type: ignore
k_all_c, x_perms, y_perms, self.H, lam_rd_max=self.lambda_rd_max, return_unpermed=True
)
p_val = tf.reduce_mean(tf.cast(lsdd <= lsdd_permuted, float))
idx_threshold = int(self.p_val * len(lsdd_permuted))
distance_threshold = np.sort(lsdd_permuted)[::-1][idx_threshold]
return float(p_val), float(lsdd), float(distance_threshold)
| 7,873 | 47.306748 | 118 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/preprocess.py
|
from typing import Callable, Dict, Optional, Type, Union
import numpy as np
import tensorflow as tf
from alibi_detect.utils.tensorflow.prediction import (
predict_batch, predict_batch_transformer)
from tensorflow.keras.layers import Dense, Flatten, Input, InputLayer
from tensorflow.keras.models import Model
class _Encoder(tf.keras.Model):
def __init__(
self,
input_layer: Union[tf.keras.layers.Layer, tf.keras.Model],
mlp: Optional[tf.keras.Model] = None,
enc_dim: Optional[int] = None,
step_dim: Optional[int] = None
) -> None:
super().__init__()
self.input_layer = input_layer
if isinstance(mlp, tf.keras.Model):
self.mlp = mlp
elif isinstance(enc_dim, int) and isinstance(step_dim, int):
self.mlp = tf.keras.Sequential(
[
Flatten(),
Dense(enc_dim + 2 * step_dim, activation=tf.nn.relu),
Dense(enc_dim + step_dim, activation=tf.nn.relu),
Dense(enc_dim, activation=None)
]
)
else:
raise ValueError('Need to provide either `enc_dim` and `step_dim` or a '
'tf.keras.Sequential or tf.keras.Model `mlp`')
def call(self, x: Union[np.ndarray, tf.Tensor, Dict[str, tf.Tensor]]) -> tf.Tensor:
x = self.input_layer(x)
return self.mlp(x)
class UAE(tf.keras.Model):
def __init__(
self,
encoder_net: Optional[tf.keras.Model] = None,
input_layer: Optional[Union[tf.keras.layers.Layer, tf.keras.Model]] = None,
shape: Optional[tuple] = None,
enc_dim: Optional[int] = None
) -> None:
super().__init__()
is_enc = isinstance(encoder_net, tf.keras.Model)
is_enc_dim = isinstance(enc_dim, int)
if is_enc:
self.encoder = encoder_net
elif not is_enc and is_enc_dim: # set default encoder
input_layer = InputLayer(input_shape=shape) if input_layer is None else input_layer
input_dim = np.prod(shape)
step_dim = int((input_dim - enc_dim) / 3)
self.encoder = _Encoder(input_layer, enc_dim=enc_dim, step_dim=step_dim)
elif not is_enc and not is_enc_dim:
raise ValueError('Need to provide either `enc_dim` or a tf.keras.Sequential'
' or tf.keras.Model `encoder_net`.')
def call(self, x: Union[np.ndarray, tf.Tensor, Dict[str, tf.Tensor]]) -> tf.Tensor:
return self.encoder(x)
class HiddenOutput(tf.keras.Model):
def __init__(
self,
model: tf.keras.Model,
layer: int = -1,
input_shape: tuple = None,
flatten: bool = False
) -> None:
super().__init__()
if input_shape and not model.inputs:
inputs = Input(shape=input_shape)
model.call(inputs)
else:
inputs = model.inputs
self.model = Model(inputs=inputs, outputs=model.layers[layer].output)
self.flatten = Flatten() if flatten else tf.identity
def call(self, x: Union[np.ndarray, tf.Tensor]) -> tf.Tensor:
return self.flatten(self.model(x))
def preprocess_drift(x: Union[np.ndarray, list], model: tf.keras.Model,
preprocess_batch_fn: Callable = None, tokenizer: Callable = None,
max_len: int = None, batch_size: int = int(1e10), dtype: Type[np.generic] = np.float32) \
-> Union[np.ndarray, tf.Tensor]:
"""
Prediction function used for preprocessing step of drift detector.
Parameters
----------
x
Batch of instances.
model
Model used for preprocessing.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the TensorFlow model.
tokenizer
Optional tokenizer for text drift.
max_len
Optional max token length for text drift.
batch_size
Batch size.
dtype
Model output type, e.g. np.float32 or tf.float32.
Returns
-------
Numpy array with predictions.
"""
if tokenizer is None:
return predict_batch(x, model, batch_size=batch_size, preprocess_fn=preprocess_batch_fn, dtype=dtype)
else:
return predict_batch_transformer(x, model, tokenizer, max_len, batch_size=batch_size, dtype=dtype)
| 4,509 | 36.272727 | 111 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_contextmmd_tf.py
|
from functools import partial
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
from typing import Callable, List
from alibi_detect.cd.tensorflow.context_aware import ContextMMDDriftTF
from alibi_detect.cd.tensorflow.preprocess import HiddenOutput, UAE, preprocess_drift
n, n_hidden, n_classes = 250, 10, 3
tf.random.set_seed(0)
def mymodel(shape):
x_in = Input(shape=shape)
x = Dense(n_hidden)(x_in)
x_out = Dense(n_classes, activation='softmax')(x)
return tf.keras.models.Model(inputs=x_in, outputs=x_out)
# test List[Any] inputs to the detector
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
return np.concatenate(x, axis=0)
n_features = [10]
n_enc = [None, 3]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_drift, {'model': UAE}),
(preprocess_list, None)
]
update_ref = [{'last': 750}, None]
preprocess_at_init = [True, False]
n_permutations = [10]
tests_context_mmddrift = list(product(n_features, n_enc, preprocess,
n_permutations, update_ref, preprocess_at_init))
n_tests = len(tests_context_mmddrift)
@pytest.fixture
def context_mmd_params(request):
return tests_context_mmddrift[request.param]
@pytest.mark.parametrize('context_mmd_params', list(range(n_tests)), indirect=True)
def test_context_mmd(context_mmd_params):
n_features, n_enc, preprocess, n_permutations, update_ref, preprocess_at_init = context_mmd_params
np.random.seed(0)
c_ref = np.random.randn(*(n, 1)).astype(np.float32)
x_ref = c_ref + np.random.randn(*(n, n_features)).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
if not preprocess_at_init:
pytest.skip("Skip tests where preprocess_at_init=False and x_ref is list.")
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable):
if 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = mymodel((n_features,))
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
elif preprocess_kwargs['model'].__name__ == 'UAE' \
and n_features > 1 and isinstance(n_enc, int):
tf.random.set_seed(0)
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(n_enc)
]
)
preprocess_fn = partial(preprocess_fn, model=UAE(encoder_net=encoder_net))
else:
preprocess_fn = None
cd = ContextMMDDriftTF(
x_ref=x_ref,
c_ref=c_ref,
p_val=.05,
preprocess_at_init=preprocess_at_init if isinstance(preprocess_fn, Callable) else False,
update_ref=update_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations
)
c = c_ref.copy()
x = x_ref.copy()
preds = cd.predict(x, c, return_p_val=True, return_distance=False, return_coupling=True)
assert preds['data']['is_drift'] == 0 and preds['data']['p_val'] >= cd.p_val
assert preds['data']['distance'] is None
assert isinstance(preds['data']['coupling_xy'], np.ndarray)
if isinstance(update_ref, dict):
k = list(update_ref.keys())[0]
assert cd.n == len(x) + len(x_ref)
assert cd.x_ref.shape[0] == min(update_ref[k], len(x) + len(x_ref))
assert cd.c_ref.shape[0] == min(update_ref[k], len(x) + len(c_ref))
c_h1 = np.random.randn(*(n, 1)).astype(np.float32)
x_h1 = c_h1 + np.random.randn(*(n, n_features)).astype(np.float32)
if to_list:
x_h1 = [_[None, :] for _ in x_h1]
preds = cd.predict(x_h1, c_h1, return_p_val=True, return_distance=True, return_coupling=False)
if preds['data']['is_drift'] == 1:
assert preds['data']['p_val'] < preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] > preds['data']['distance_threshold']
else:
assert preds['data']['p_val'] >= preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] <= preds['data']['distance_threshold']
assert 'coupling_xy' not in preds['data']
| 4,514 | 37.262712 | 102 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_classifier_tf.py
|
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input
from typing import Union
from alibi_detect.cd.tensorflow.classifier import ClassifierDriftTF
n = 100
def mymodel(shape, softmax: bool = True):
x_in = Input(shape=shape)
x = Dense(20, activation=tf.nn.relu)(x_in)
x = Dense(2)(x)
if softmax:
x = tf.nn.softmax(x)
return tf.keras.models.Model(inputs=x_in, outputs=x)
# test List[Any] inputs to the detector
def identity_fn(x: Union[np.ndarray, list]) -> np.ndarray:
if isinstance(x, list):
return np.array(x)
else:
return x
p_val = [.05]
n_features = [4]
preds_type = ['probs', 'logits']
binarize_preds = [True, False]
n_folds = [None, 2]
train_size = [.5]
preprocess_batch = [None, identity_fn]
update_x_ref = [None, {'last': 1000}, {'reservoir_sampling': 1000}]
tests_clfdrift = list(product(p_val, n_features, preds_type, binarize_preds, n_folds,
train_size, preprocess_batch, update_x_ref))
n_tests = len(tests_clfdrift)
@pytest.fixture
def clfdrift_params(request):
return tests_clfdrift[request.param]
@pytest.mark.parametrize('clfdrift_params', list(range(n_tests)), indirect=True)
def test_clfdrift(clfdrift_params):
p_val, n_features, preds_type, binarize_preds, n_folds, \
train_size, preprocess_batch, update_x_ref = clfdrift_params
np.random.seed(0)
tf.random.set_seed(0)
model = mymodel((n_features,), softmax=(preds_type == 'probs'))
x_ref = np.random.randn(*(n, n_features))
x_test1 = np.ones_like(x_ref)
to_list = False
if preprocess_batch is not None:
to_list = True
x_ref = [_ for _ in x_ref]
update_x_ref = None
cd = ClassifierDriftTF(
x_ref=x_ref,
model=model,
p_val=p_val,
update_x_ref=update_x_ref,
train_size=train_size,
n_folds=n_folds,
preds_type=preds_type,
binarize_preds=binarize_preds,
preprocess_batch_fn=preprocess_batch,
batch_size=1
)
x_test0 = x_ref.copy()
preds_0 = cd.predict(x_test0)
assert cd.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['distance'] >= 0
if to_list:
x_test1 = [_ for _ in x_test1]
preds_1 = cd.predict(x_test1)
assert cd.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_1['data']['distance'] >= 0
assert preds_0['data']['distance'] < preds_1['data']['distance']
assert cd.meta['params']['preds_type'] == preds_type
assert cd.meta['params']['binarize_preds '] == binarize_preds
| 2,729 | 28.354839 | 85 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/tests/conftest.py
|
import pytest
@pytest.fixture
def seed(pytestconfig):
"""
Returns the random seed set by pytest-randomly.
"""
return pytestconfig.getoption("randomly_seed")
| 175 | 16.6 | 51 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_lsdd_tf.py
|
from functools import partial
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
from typing import Callable, List
from alibi_detect.cd.tensorflow.lsdd import LSDDDriftTF
from alibi_detect.cd.tensorflow.preprocess import HiddenOutput, UAE, preprocess_drift
n, n_hidden, n_classes = 500, 10, 5
tf.random.set_seed(0)
def mymodel(shape):
x_in = Input(shape=shape)
x = Dense(n_hidden)(x_in)
x_out = Dense(n_classes, activation='softmax')(x)
return tf.keras.models.Model(inputs=x_in, outputs=x_out)
# test List[Any] inputs to the detector
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
return np.concatenate(x, axis=0)
n_features = [10]
n_enc = [None, 3]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_drift, {'model': UAE}),
(preprocess_list, None)
]
update_x_ref = [None]
preprocess_at_init = [True, False]
n_permutations = [10]
tests_lsdddrift = list(product(n_features, n_enc, preprocess,
n_permutations, update_x_ref, preprocess_at_init))
n_tests = len(tests_lsdddrift)
@pytest.fixture
def lsdd_params(request):
return tests_lsdddrift[request.param]
@pytest.mark.parametrize('lsdd_params', list(range(n_tests)), indirect=True)
def test_lsdd(lsdd_params):
n_features, n_enc, preprocess, n_permutations, update_x_ref, preprocess_at_init = lsdd_params
np.random.seed(0)
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
if not preprocess_at_init:
return
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable):
if 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = mymodel((n_features,))
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
elif preprocess_kwargs['model'].__name__ == 'UAE' \
and n_features > 1 and isinstance(n_enc, int):
tf.random.set_seed(0)
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(n_enc)
]
)
preprocess_fn = partial(preprocess_fn, model=UAE(encoder_net=encoder_net))
else:
preprocess_fn = None
else:
preprocess_fn = None
cd = LSDDDriftTF(
x_ref=x_ref,
p_val=.05,
preprocess_at_init=preprocess_at_init if isinstance(preprocess_fn, Callable) else False,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations
)
perturbation = np.random.normal(size=(n, n_features)) / 100 # LSDD struggles with copies/repeats
x = x_ref.copy() + perturbation.astype(np.float32)
preds = cd.predict(x, return_p_val=True)
assert preds['data']['is_drift'] == 0 and preds['data']['p_val'] >= cd.p_val
if isinstance(update_x_ref, dict):
k = list(update_x_ref.keys())[0]
assert cd.n == len(x) + len(x_ref)
assert cd.x_ref.shape[0] == min(update_x_ref[k], len(x) + len(x_ref))
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
if to_list:
x_h1 = [_[None, :] for _ in x_h1]
preds = cd.predict(x_h1, return_p_val=True)
if preds['data']['is_drift'] == 1:
assert preds['data']['p_val'] < preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] > preds['data']['distance_threshold']
else:
assert preds['data']['p_val'] >= preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] <= preds['data']['distance_threshold']
| 4,088 | 35.508929 | 101 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_lsdd_online_tf.py
|
from functools import partial
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
from typing import Callable, List
from alibi_detect.cd.tensorflow.lsdd_online import LSDDDriftOnlineTF
from alibi_detect.cd.tensorflow.preprocess import HiddenOutput, UAE, preprocess_drift
from alibi_detect.utils._random import fixed_seed
n, n_hidden, n_classes = 400, 10, 5
def mymodel(shape):
x_in = Input(shape=shape)
x = Dense(n_hidden)(x_in)
x_out = Dense(n_classes, activation='softmax')(x)
return tf.keras.models.Model(inputs=x_in, outputs=x_out)
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
if len(x) > 1: # test List[Any] reference data inputs to the detector with Any=np.ndarray
return np.concatenate(x, axis=0)
else: # test Any inputs to the prediction function of the detector with Any=List[np.ndarray]
return np.array(x)[0]
n_features = [10]
n_enc = [None, 3]
ert = [25]
window_size = [5]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_drift, {'model': UAE}),
(preprocess_list, None)
]
n_bootstraps = [200]
tests_lsdddriftonline = list(product(n_features, n_enc, ert, window_size, preprocess, n_bootstraps))
n_tests = len(tests_lsdddriftonline)
@pytest.fixture
def lsdd_online_params(request):
return tests_lsdddriftonline[request.param]
@pytest.mark.parametrize('lsdd_online_params', list(range(n_tests)), indirect=True)
def test_lsdd_online(lsdd_online_params, seed):
n_features, n_enc, ert, window_size, preprocess, n_bootstraps = lsdd_online_params
with fixed_seed(seed):
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable):
if 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = mymodel((n_features,))
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
elif preprocess_kwargs['model'].__name__ == 'UAE' \
and n_features > 1 and isinstance(n_enc, int):
with fixed_seed(0):
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(n_enc)
]
)
preprocess_fn = partial(preprocess_fn, model=UAE(encoder_net=encoder_net))
else:
preprocess_fn = None
else:
preprocess_fn = None
with fixed_seed(seed):
cd = LSDDDriftOnlineTF(
x_ref=x_ref,
ert=ert,
window_size=window_size,
preprocess_fn=preprocess_fn,
n_bootstraps=n_bootstraps
)
x_h0 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32) + 1
detection_times_h0 = []
test_stats_h0 = []
for x_t in x_h0:
if to_list:
x_t = [x_t]
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h0.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h0.append(pred_t['data']['time'])
cd.reset_state()
average_delay_h0 = np.array(detection_times_h0).mean()
test_stats_h0 = [ts for ts in test_stats_h0 if ts is not None]
assert ert/3 < average_delay_h0 < 3*ert
cd.reset_state()
detection_times_h1 = []
test_stats_h1 = []
for x_t in x_h1:
if to_list:
x_t = [x_t]
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h1.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h1.append(pred_t['data']['time'])
cd.reset_state()
average_delay_h1 = np.array(detection_times_h1).mean()
test_stats_h1 = [ts for ts in test_stats_h1 if ts is not None]
assert np.abs(average_delay_h1) < ert/2
assert np.mean(test_stats_h1) > np.mean(test_stats_h0)
def test_lsdd_online_state_online(tmp_path, seed):
"""
Test save/load/reset state methods for LSDDDriftOnlineTF. State is saved, reset, and loaded, with
prediction results and stateful attributes compared to original.
"""
n = 100
with fixed_seed(seed):
x_ref = np.random.normal(0, 1, (n, n_classes))
x = np.random.normal(0.1, 1, (n, n_classes))
dd = LSDDDriftOnlineTF(x_ref, window_size=10, ert=20)
# Store state for comparison
state_dict_t0 = {}
for key in dd.online_state_keys:
state_dict_t0[key] = getattr(dd, key)
# Run for 10 time steps
test_stats_1 = []
for t, x_t in enumerate(x):
if t == 5:
dd.save_state(tmp_path)
# Store state for comparison
state_dict_t5 = {}
for key in dd.online_state_keys:
state_dict_t5[key] = getattr(dd, key)
preds = dd.predict(x_t)
test_stats_1.append(preds['data']['test_stat'])
# Reset and check state cleared
dd.reset_state()
for key, orig_val in state_dict_t0.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Repeat, check that same test_stats both times
test_stats_2 = []
for t, x_t in enumerate(x):
preds = dd.predict(x_t)
test_stats_2.append(preds['data']['test_stat'])
np.testing.assert_array_equal(test_stats_1, test_stats_2)
# Load state from t=5 timestep
dd.load_state(tmp_path)
# Compare stateful attributes to original at t=5
for key, orig_val in state_dict_t5.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Compare predictions to original at t=5
new_pred = dd.predict(x[5])
assert new_pred['data']['test_stat'] == test_stats_1[5]
| 6,400 | 35.787356 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_spot_the_diff_tf.py
|
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense
from typing import Union
from alibi_detect.cd.tensorflow.spot_the_diff import SpotTheDiffDriftTF
n = 100
class MyKernel(tf.keras.Model): # TODO: Support then test models using keras functional API
def __init__(self, n_features: int):
super().__init__()
self.config = {'n_features': n_features}
self.dense = Dense(20)
def call(self, x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
return tf.einsum('ji,ki->jk', self.dense(x), self.dense(y))
def get_config(self) -> dict:
return self.config
@classmethod
def from_config(cls, config):
return cls(**config)
# test List[Any] inputs to the detector
def identity_fn(x: Union[np.ndarray, list]) -> np.ndarray:
if isinstance(x, list):
return np.array(x)
else:
return x
p_val = [.05]
n_features = [4]
train_size = [.5]
preprocess_batch = [None, identity_fn]
kernel = [None, MyKernel]
n_diffs = [1, 5]
tests_stddrift = list(product(p_val, n_features, train_size, preprocess_batch, kernel, n_diffs))
n_tests = len(tests_stddrift)
@pytest.fixture
def stddrift_params(request):
return tests_stddrift[request.param]
@pytest.mark.parametrize('stddrift_params', list(range(n_tests)), indirect=True)
def test_stddrift(stddrift_params):
p_val, n_features, train_size, preprocess_batch, kernel, n_diffs = stddrift_params
np.random.seed(0)
tf.random.set_seed(0)
if kernel is not None:
kernel = kernel(n_features)
x_ref = np.random.randn(*(n, n_features)).astype(np.float32)
x_test1 = np.ones_like(x_ref)
to_list = False
if preprocess_batch is not None:
to_list = True
x_ref = [_ for _ in x_ref]
cd = SpotTheDiffDriftTF(
x_ref=x_ref,
kernel=kernel,
p_val=p_val,
n_diffs=n_diffs,
train_size=train_size,
preprocess_batch_fn=preprocess_batch,
batch_size=3,
epochs=1
)
x_test0 = x_ref.copy()
preds_0 = cd.predict(x_test0)
assert cd._detector.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['diffs'].shape == (n_diffs, n_features)
assert preds_0['data']['diff_coeffs'].shape == (n_diffs,)
if to_list:
x_test1 = [_ for _ in x_test1]
preds_1 = cd.predict(x_test1)
assert cd._detector.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_0['data']['distance'] < preds_1['data']['distance']
assert cd.meta['params']['n_diffs'] == n_diffs
| 2,674 | 27.157895 | 96 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_mmd_tf.py
|
from functools import partial
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
from typing import Callable, List
from alibi_detect.cd.tensorflow.mmd import MMDDriftTF
from alibi_detect.cd.tensorflow.preprocess import HiddenOutput, UAE, preprocess_drift
n, n_hidden, n_classes = 500, 10, 5
tf.random.set_seed(0)
def mymodel(shape):
x_in = Input(shape=shape)
x = Dense(n_hidden)(x_in)
x_out = Dense(n_classes, activation='softmax')(x)
return tf.keras.models.Model(inputs=x_in, outputs=x_out)
# test List[Any] inputs to the detector
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
return np.concatenate(x, axis=0)
n_features = [10]
n_enc = [None, 3]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_drift, {'model': UAE}),
(preprocess_list, None)
]
update_x_ref = [{'last': 750}, {'reservoir_sampling': 750}, None]
preprocess_at_init = [True, False]
n_permutations = [10]
tests_mmddrift = list(product(n_features, n_enc, preprocess,
n_permutations, update_x_ref, preprocess_at_init))
n_tests = len(tests_mmddrift)
@pytest.fixture
def mmd_params(request):
return tests_mmddrift[request.param]
@pytest.mark.parametrize('mmd_params', list(range(n_tests)), indirect=True)
def test_mmd(mmd_params):
n_features, n_enc, preprocess, n_permutations, update_x_ref, preprocess_at_init = mmd_params
np.random.seed(0)
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
if not preprocess_at_init:
return
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable):
if 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = mymodel((n_features,))
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
elif preprocess_kwargs['model'].__name__ == 'UAE' \
and n_features > 1 and isinstance(n_enc, int):
tf.random.set_seed(0)
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(n_enc)
]
)
preprocess_fn = partial(preprocess_fn, model=UAE(encoder_net=encoder_net))
else:
preprocess_fn = None
else:
preprocess_fn = None
cd = MMDDriftTF(
x_ref=x_ref,
p_val=.05,
preprocess_at_init=preprocess_at_init if isinstance(preprocess_fn, Callable) else False,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations
)
x = x_ref.copy()
preds = cd.predict(x, return_p_val=True)
assert preds['data']['is_drift'] == 0 and preds['data']['p_val'] >= cd.p_val
if isinstance(update_x_ref, dict):
k = list(update_x_ref.keys())[0]
assert cd.n == len(x) + len(x_ref)
assert cd.x_ref.shape[0] == min(update_x_ref[k], len(x) + len(x_ref))
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
if to_list:
x_h1 = [_[None, :] for _ in x_h1]
preds = cd.predict(x_h1, return_p_val=True)
if preds['data']['is_drift'] == 1:
assert preds['data']['p_val'] < preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] > preds['data']['distance_threshold']
else:
assert preds['data']['p_val'] >= preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] <= preds['data']['distance_threshold']
| 3,983 | 35.218182 | 96 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_mmd_online_tf.py
|
from functools import partial
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
from typing import Callable, List
from alibi_detect.cd.tensorflow.mmd_online import MMDDriftOnlineTF
from alibi_detect.cd.tensorflow.preprocess import HiddenOutput, UAE, preprocess_drift
from alibi_detect.utils._random import fixed_seed
n, n_hidden, n_classes = 400, 10, 5
def mymodel(shape):
x_in = Input(shape=shape)
x = Dense(n_hidden)(x_in)
x_out = Dense(n_classes, activation='softmax')(x)
return tf.keras.models.Model(inputs=x_in, outputs=x_out)
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
if len(x) > 1: # test List[Any] reference data inputs to the detector with Any=np.ndarray
return np.concatenate(x, axis=0)
else: # test Any inputs to the prediction function of the detector with Any=List[np.ndarray]
return np.array(x)[0]
n_features = [10]
n_enc = [None, 3]
ert = [25]
window_size = [5]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_drift, {'model': UAE}),
(preprocess_list, None)
]
n_bootstraps = [200]
tests_mmddriftonline = list(product(n_features, n_enc, ert, window_size, preprocess, n_bootstraps))
n_tests = len(tests_mmddriftonline)
@pytest.fixture
def mmd_online_params(request):
return tests_mmddriftonline[request.param]
@pytest.mark.parametrize('mmd_online_params', list(range(n_tests)), indirect=True)
def test_mmd_online(mmd_online_params, seed):
n_features, n_enc, ert, window_size, preprocess, n_bootstraps = mmd_online_params
with fixed_seed(seed):
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable):
if 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = mymodel((n_features,))
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
elif preprocess_kwargs['model'].__name__ == 'UAE' \
and n_features > 1 and isinstance(n_enc, int):
with fixed_seed(0):
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(n_enc)
]
)
preprocess_fn = partial(preprocess_fn, model=UAE(encoder_net=encoder_net))
else:
preprocess_fn = None
else:
preprocess_fn = None
with fixed_seed(seed):
cd = MMDDriftOnlineTF(
x_ref=x_ref,
ert=ert,
window_size=window_size,
preprocess_fn=preprocess_fn,
n_bootstraps=n_bootstraps
)
x_h0 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32) + 1
detection_times_h0 = []
test_stats_h0 = []
for x_t in x_h0:
if to_list:
x_t = [x_t]
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h0.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h0.append(pred_t['data']['time'])
cd.reset_state()
average_delay_h0 = np.array(detection_times_h0).mean()
test_stats_h0 = [ts for ts in test_stats_h0 if ts is not None]
assert ert/3 < average_delay_h0 < 3*ert
cd.reset_state()
detection_times_h1 = []
test_stats_h1 = []
for x_t in x_h1:
if to_list:
x_t = [x_t]
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h1.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h1.append(pred_t['data']['time'])
cd.reset_state()
average_delay_h1 = np.array(detection_times_h1).mean()
print(detection_times_h0, average_delay_h0)
test_stats_h1 = [ts for ts in test_stats_h1 if ts is not None]
assert np.abs(average_delay_h1) < ert/2
assert np.mean(test_stats_h1) > np.mean(test_stats_h0)
def test_mmd_online_state_online(tmp_path, seed):
"""
Test save/load/reset state methods for MMDDriftOnlineTF. State is saved, reset, and loaded, with
prediction results and stateful attributes compared to original.
"""
n = 100
with fixed_seed(seed):
x_ref = np.random.normal(0, 1, (n, n_classes))
x = np.random.normal(0.1, 1, (n, n_classes))
dd = MMDDriftOnlineTF(x_ref, window_size=10, ert=20)
# Store state for comparison
state_dict_t0 = {}
for key in dd.online_state_keys:
state_dict_t0[key] = getattr(dd, key)
# Run for 10 time steps
test_stats_1 = []
for t, x_t in enumerate(x):
if t == 5:
dd.save_state(tmp_path)
# Store state for comparison
state_dict_t5 = {}
for key in dd.online_state_keys:
state_dict_t5[key] = getattr(dd, key)
preds = dd.predict(x_t)
test_stats_1.append(preds['data']['test_stat'])
# Reset and check state cleared
dd.reset_state()
for key, orig_val in state_dict_t0.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Repeat, check that same test_stats both times
test_stats_2 = []
for t, x_t in enumerate(x):
preds = dd.predict(x_t)
test_stats_2.append(preds['data']['test_stat'])
np.testing.assert_array_equal(test_stats_1, test_stats_2)
# Load state from t=5 timestep
dd.load_state(tmp_path)
# Compare stateful attributes to original at t=5
for key, orig_val in state_dict_t5.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Compare predictions to original at t=5
new_pred = dd.predict(x[5])
assert new_pred['data']['test_stat'] == test_stats_1[5]
| 6,434 | 35.771429 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_preprocess_tf.py
|
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
from alibi_detect.cd.tensorflow import UAE, HiddenOutput
n, n_features, n_classes, latent_dim = 100, 10, 5, 2
X_uae = np.random.rand(n * n_features).reshape(n, n_features).astype('float32')
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(latent_dim)
]
)
tests_uae = [encoder_net, latent_dim]
n_tests_uae = len(tests_uae)
@pytest.fixture
def uae_params(request):
return tests_uae[request.param]
@pytest.mark.parametrize('uae_params', list(range(n_tests_uae)), indirect=True)
def test_uae(uae_params):
enc = uae_params
if isinstance(enc, tf.keras.Sequential):
encoder_net, enc_dim = enc, None
elif isinstance(enc, int):
encoder_net, enc_dim = None, enc
X_enc = UAE(encoder_net=encoder_net, shape=X_uae.shape[1:], enc_dim=enc_dim)(X_uae)
assert X_enc.shape == (n, latent_dim)
dim1, dim2, n_hidden = 2, 3, 7
n_features = dim1 * dim2
shape = (dim1, dim2)
X_h = np.random.rand(n * n_features).reshape((n,) + shape).astype('float32')
class Model1(tf.keras.Model):
def __init__(self):
super(Model1, self).__init__()
self.dense1 = Dense(n_hidden)
self.dense2 = Dense(n_classes, activation='softmax')
def call(self, x: np.ndarray) -> tf.Tensor:
x = self.dense1(x)
return self.dense2(x)
def model2():
x_in = Input(shape=(dim1, dim2))
x = Dense(n_hidden)(x_in)
x_out = Dense(n_classes, activation='softmax')(x)
return tf.keras.models.Model(inputs=x_in, outputs=x_out)
tests_hidden_output = [
(1, -2, shape, True), (1, -2, shape, False),
(1, -1, shape, True), (1, -1, shape, False),
(2, -2, None, True), (2, -2, None, False),
(2, -1, None, True), (2, -1, None, False),
(2, -1, shape, True), (2, -1, shape, False)
]
n_tests_hidden_output = len(tests_hidden_output)
@pytest.fixture
def hidden_output_params(request):
return tests_hidden_output[request.param]
@pytest.mark.parametrize('hidden_output_params', list(range(n_tests_hidden_output)), indirect=True)
def test_hidden_output(hidden_output_params):
model, layer, input_shape, flatten = hidden_output_params
print(model, layer, input_shape, flatten)
model = Model1() if model == 1 else model2()
X_hidden = HiddenOutput(model=model, layer=layer, input_shape=input_shape, flatten=flatten)(X_h)
if layer == -2:
assert_shape = (n, dim1, n_hidden)
elif layer == -1:
assert_shape = (n, dim1, n_classes)
if flatten:
assert_shape = (assert_shape[0],) + (np.prod(assert_shape[1:]),)
assert X_hidden.shape == assert_shape
| 2,732 | 29.707865 | 100 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tensorflow/tests/test_learned_kernel_tf.py
|
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense
from typing import Union
from alibi_detect.cd.tensorflow.learned_kernel import LearnedKernelDriftTF
n = 100
class MyKernel(tf.keras.Model): # TODO: Support then test models using keras functional API
def __init__(self, n_features: int):
super().__init__()
self.config = {'n_features': n_features}
self.dense = Dense(20)
def call(self, x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
return tf.einsum('ji,ki->jk', self.dense(x), self.dense(y))
def get_config(self) -> dict:
return self.config
@classmethod
def from_config(cls, config):
return cls(**config)
# test List[Any] inputs to the detector
def identity_fn(x: Union[np.ndarray, list]) -> np.ndarray:
if isinstance(x, list):
return np.array(x)
else:
return x
p_val = [.05]
n_features = [4]
train_size = [.5]
preprocess_batch = [None, identity_fn]
update_x_ref = [None, {'last': 1000}, {'reservoir_sampling': 1000}]
tests_lkdrift = list(product(p_val, n_features, train_size, preprocess_batch, update_x_ref))
n_tests = len(tests_lkdrift)
@pytest.fixture
def lkdrift_params(request):
return tests_lkdrift[request.param]
@pytest.mark.parametrize('lkdrift_params', list(range(n_tests)), indirect=True)
def test_lkdrift(lkdrift_params):
p_val, n_features, train_size, preprocess_batch, update_x_ref = lkdrift_params
np.random.seed(0)
tf.random.set_seed(0)
kernel = MyKernel(n_features)
x_ref = np.random.randn(*(n, n_features))
x_test1 = np.ones_like(x_ref)
to_list = False
if preprocess_batch is not None:
to_list = True
x_ref = [_ for _ in x_ref]
update_x_ref = None
cd = LearnedKernelDriftTF(
x_ref=x_ref,
kernel=kernel,
p_val=p_val,
update_x_ref=update_x_ref,
train_size=train_size,
preprocess_batch_fn=preprocess_batch,
batch_size=3,
epochs=1
)
x_test0 = x_ref.copy()
preds_0 = cd.predict(x_test0)
assert cd.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
if to_list:
x_test1 = [_ for _ in x_test1]
preds_1 = cd.predict(x_test1)
assert cd.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_0['data']['distance'] < preds_1['data']['distance']
| 2,479 | 26.555556 | 92 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/missing_optional_dependency.py
|
"""Functionality for optional importing
This module provides a way to import optional dependencies. In the case that the user imports some functionality from
alibi-detect that is not usable due to missing optional dependencies this code is used to allow the import but replace
it with an object that throws an error on use. This way we avoid errors at import time that prevent the user using
functionality independent of the missing dependency.
"""
from typing import Union, List, Optional, Any
from string import Template
from importlib import import_module
err_msg_template = Template((
"Attempted to use $object_name without the correct optional dependencies installed. To install "
+ "the correct optional dependencies, run `pip install alibi-detect[$missing_dependency]` "
+ "from the command line. For more information, check the Installation documentation "
+ "at https://docs.seldon.io/projects/alibi-detect/en/stable/overview/getting_started.html."
))
"""Mapping used to ensure correct pip install message is generated if a missing optional dependency is detected. This
dict is used to control two behaviours:
1. When we import objects from missing dependencies we check that any `ModuleNotFoundError` or `ImportError`
corresponds to a missing optional dependency by checking the name of the missing dependency is in `ERROR_TYPES`. We
then map this name to the corresponding optional dependency bucket that will resolve the issue.
2. Some optional dependencies have multiple names such as `torch` and `pytorch`, instead of enforcing a single
naming convention across the whole code base we instead use `ERROR_TYPES` to capture both cases. This is done right
before the pip install message is issued as this is the most robust place to capture these differences.
"""
ERROR_TYPES = {
"prophet": 'prophet',
"tensorflow_probability": 'tensorflow',
"tensorflow": 'tensorflow',
"torch": 'torch',
"pytorch": 'torch',
"keops": 'keops',
"pykeops": 'keops',
}
class MissingDependency:
"""Missing Dependency Class.
Used to replace any object that requires unmet optional dependencies. Attribute access or calling the __call__
method on this object will raise an error.
"""
def __init__(self,
object_name: str,
err: Union[ModuleNotFoundError, ImportError],
missing_dependency: str = 'all',):
"""Metaclass for MissingDependency classes.
Parameters
----------
object_name
Name of object we are replacing
err
Error to be raised when the class is initialized or used
missing_dependency
Name of missing dependency required for object
"""
self.missing_dependency = missing_dependency
self.object_name = object_name
self.err = err
@property
def err_msg(self) -> str:
"""Generate error message informing user to install missing dependencies."""
return err_msg_template.substitute(
object_name=self.object_name,
missing_dependency=self.missing_dependency)
def __getattr__(self, key):
"""Raise an error when attributes are accessed."""
raise ImportError(self.err_msg) from self.err
def __call__(self, *args, **kwargs):
"""If called, raise an error."""
raise ImportError(self.err_msg) from self.err
def import_optional(module_name: str, names: Optional[List[str]] = None) -> Any:
"""Import a module that depends on optional dependencies
Note: This function is used to import modules that depend on optional dependencies. Because it mirrors the python
import functionality its return type has to be `Any`. Using objects imported with this function can lead to
misspecification of types as `Any` when the developer intended to be more restrictive.
Parameters
----------
module_name
The module to import
names
The names to import from the module. If None, all names are imported.
Returns
-------
The module or named objects within the modules if names is not None. If the import fails due to a \
`ModuleNotFoundError` or `ImportError` then the requested module or named objects are replaced with instances of \
the MissingDependency class above.
"""
try:
module = import_module(module_name)
if names is not None:
objs = tuple(getattr(module, name) for name in names)
return objs if len(objs) > 1 else objs[0]
return module
except (ImportError, ModuleNotFoundError) as err:
if err.name is None:
raise err
dep_name, *_ = err.name.split('.')
if str(dep_name) not in ERROR_TYPES:
raise err
missing_dependency = ERROR_TYPES[dep_name]
if names is not None:
missing_dependencies = \
tuple(MissingDependency(
missing_dependency=missing_dependency,
object_name=name,
err=err) for name in names)
return missing_dependencies if len(missing_dependencies) > 1 else missing_dependencies[0]
return MissingDependency(
missing_dependency=missing_dependency,
object_name=module_name,
err=err)
| 5,347 | 41.110236 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/url.py
|
from typing import Union, List
from urllib.parse import urljoin, quote_plus
def _join_url(base: str, parts: Union[str, List[str]]) -> str:
"""
Constructs a full (“absolute”) URL by combining a “base URL” (base) with additional relative URL parts.
The behaviour is similar to os.path.join() on linux, but also behaves consistently on Windows.
Parameters
----------
base
The base URL, e.g. `'https://mysite.com/'`.
parts
Part to append, or list of parts to append e.g. `['/dir1/', 'dir2', 'dir3']`.
Returns
-------
The joined url e.g. `https://mysite.com/dir1/dir2/dir3`.
"""
parts = [parts] if isinstance(parts, str) else parts
if len(parts) == 0:
raise TypeError("The `parts` argument must contain at least one item.")
url = urljoin(base + "/", "/".join(quote_plus(part.strip(r"\/"), safe="/") for part in parts))
return url
| 913 | 34.153846 | 107 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/_types.py
|
"""
Defining types compatible with different Python versions and defining custom types.
"""
import sys
from sklearn.base import BaseEstimator # import here (instead of later) since sklearn currently a core dep
from alibi_detect.utils.frameworks import has_tensorflow, has_pytorch
from typing import Union, Type
# Literal for typing
if sys.version_info >= (3, 8):
from typing import Literal # noqa
else:
from typing_extensions import Literal # noqa
from typing_extensions import TypeAlias
# Optional dep dependent tuples of types, for isinstance checks and pydantic
supported_models_tf: tuple = ()
supported_models_torch: tuple = ()
supported_optimizers_tf: tuple = ()
supported_optimizers_torch: tuple = ()
if has_tensorflow:
import tensorflow as tf
supported_models_tf = (tf.keras.Model, )
if hasattr(tf.keras.optimizers, 'legacy'):
supported_optimizers_tf = (tf.keras.optimizers.Optimizer, tf.keras.optimizers.legacy.Optimizer, type)
else:
supported_optimizers_tf = (tf.keras.optimizers.Optimizer, type)
if has_pytorch:
import torch
supported_models_torch = (torch.nn.Module, )
supported_optimizers_torch = (type, ) # Note type not object!
supported_models_sklearn = (BaseEstimator, )
supported_models_all = supported_models_tf + supported_models_torch + supported_models_sklearn
supported_optimizers_all = supported_optimizers_tf + supported_optimizers_torch
# type aliases, for use with mypy (must be FwdRef's if involving opt. deps.)
OptimizerTF: TypeAlias = Union['tf.keras.optimizers.Optimizer', 'tf.keras.optimizers.legacy.Optimizer',
Type['tf.keras.optimizers.Optimizer'], Type['tf.keras.optimizers.legacy.Optimizer']]
| 1,719 | 42 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/warnings.py
|
"""
This module defines custom warnings and exceptions used across the Alibi Detect library.
"""
import functools
import warnings
from typing import Dict, Any, Callable
def deprecated_alias(**aliases: str) -> Callable:
"""
Function decorator to warn about deprecated kwargs (and replace them).
"""
def deco(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
_rename_kwargs(f.__name__, kwargs, aliases)
return f(*args, **kwargs)
return wrapper
return deco
def _rename_kwargs(func_name: str, kwargs: Dict[str, Any], aliases: Dict[str, str]):
"""
Private function to rename deprecated kwarg to new name, and raise DeprecationWarning.
"""
for alias, new in aliases.items():
if alias in kwargs:
if new in kwargs:
raise ValueError(f"{func_name} received both the deprecated kwarg `{alias}` "
f"and it's replacement `{new}`.")
warnings.warn(f'`{alias}` is deprecated; use `{new}`.', UserWarning, stacklevel=3)
kwargs[new] = kwargs.pop(alias)
| 1,119 | 32.939394 | 94 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/prediction.py
|
import numpy as np
from typing import Callable, Union
def tokenize_transformer(x: Union[list, np.ndarray], tokenizer: Callable, max_len: int, backend: str) -> dict:
"""
Batch tokenizer for transformer models.
Parameters
----------
x
Batch of instances.
tokenizer
Tokenizer for model.
max_len
Max token length.
backend
PyTorch ('pt') or TensorFlow ('tf') backend.
Returns
-------
Tokenized instances.
"""
return tokenizer(list(x), padding=True, truncation=True, max_length=max_len, return_tensors=backend)
| 593 | 22.76 | 110 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/perturbation.py
|
import random
from io import BytesIO
from typing import List, Tuple, Union
import cv2
import numpy as np
import skimage as sk
from alibi_detect.utils.data import Bunch
from alibi_detect.utils.discretizer import Discretizer
from alibi_detect.utils.distance import abdm, multidim_scaling
from alibi_detect.utils.mapping import ohe2ord
from PIL import Image
from scipy.ndimage import zoom
from scipy.ndimage.interpolation import map_coordinates
from skimage.filters import gaussian
def apply_mask(X: np.ndarray,
mask_size: tuple = (4, 4),
n_masks: int = 1,
coord: tuple = None,
channels: list = [0, 1, 2],
mask_type: str = 'uniform',
noise_distr: tuple = (0, 1),
noise_rng: tuple = (0, 1),
clip_rng: tuple = (0, 1)
) -> Tuple[np.ndarray, np.ndarray]:
"""
Mask images. Can zero out image patches or add normal or uniformly distributed noise.
Parameters
----------
X
Batch of instances to be masked.
mask_size
Tuple with the size of the mask.
n_masks
Number of masks applied for each instance in the batch X.
coord
Upper left (x,y)-coordinates for the mask.
channels
Channels of the image to apply the mask to.
mask_type
Type of mask. One of 'uniform', 'random' (both additive noise) or 'zero' (zero values for mask).
noise_distr
Mean and standard deviation for noise of 'random' mask type.
noise_rng
Min and max value for noise of 'uniform' type.
clip_rng
Min and max values for the masked instances.
Returns
-------
Tuple with masked instances and the masks.
"""
X_shape = X.shape
# initialize mask
if mask_type != 'zero':
mask = np.zeros((n_masks,) + X_shape[1:])
elif mask_type == 'zero':
mask = np.ones((n_masks,) + X_shape[1:])
else:
raise ValueError('Only `normal`, `uniform` and `zero` masking available.')
# create noise for mask
if mask_type == 'normal':
noise = np.random.normal(loc=noise_distr[0], scale=noise_distr[1], size=(n_masks,) + mask_size)
elif mask_type == 'uniform':
noise = np.random.uniform(low=noise_rng[0], high=noise_rng[1], size=(n_masks,) + mask_size)
# find upper left coordinate for mask
if coord is None:
x_start = np.random.randint(0, X_shape[1] - mask_size[0], n_masks)
y_start = np.random.randint(0, X_shape[2] - mask_size[1], n_masks)
else:
x_start, y_start = coord
# update masks
for _ in range(x_start.shape[0]):
if mask_type == 'zero':
update_val: Union[float, np.ndarray] = 0.0
else:
update_val = noise[_]
for c in channels:
mask[
_,
x_start[_]:x_start[_] + mask_size[0],
y_start[_]:y_start[_] + mask_size[1],
c
] = update_val
# apply masks to instances
X_masks = []
for _ in range(X_shape[0]):
if mask_type == 'zero':
X_mask_ = X[_].reshape((1,) + X_shape[1:]) * mask
else:
X_mask_ = np.clip(X[_].reshape((1,) + X_shape[1:]) + mask, clip_rng[0], clip_rng[1])
X_masks.append(X_mask_)
X_mask = np.concatenate(X_masks, axis=0)
return X_mask, mask
def inject_outlier_ts(X: np.ndarray,
perc_outlier: int,
perc_window: int = 10,
n_std: float = 2.,
min_std: float = 1.
) -> Bunch:
"""
Inject outliers in both univariate and multivariate time series data.
Parameters
----------
X
Time series data to perturb (inject outliers).
perc_outlier
Percentage of observations which are perturbed to outliers. For multivariate data,
the percentage is evenly split across the individual time series.
perc_window
Percentage of the observations used to compute the standard deviation used in the perturbation.
n_std
Number of standard deviations in the window used to perturb the original data.
min_std
Minimum number of standard deviations away from the current observation. This is included because
of the stochastic nature of the perturbation which could lead to minimal perturbations without a floor.
Returns
-------
Bunch object with the perturbed time series and the outlier labels.
"""
n_dim = len(X.shape)
if n_dim == 1:
X = X.reshape(-1, 1)
n_samples, n_ts = X.shape
X_outlier = X.copy()
is_outlier = np.zeros(n_samples)
# one sided window used to compute mean and stdev from
window = int(perc_window * n_samples * .5 / 100)
# distribute outliers evenly over different time series
n_outlier = int(n_samples * perc_outlier * .01 / n_ts)
if n_outlier == 0:
return Bunch(data=X_outlier, target=is_outlier, target_names=['normal', 'outlier'])
for s in range(n_ts):
outlier_idx = np.sort(random.sample(range(n_samples), n_outlier))
window_idx = [
np.maximum(outlier_idx - window, 0),
np.minimum(outlier_idx + window, n_samples)
]
stdev = np.array([X_outlier[window_idx[0][i]:window_idx[1][i], s].std() for i in range(len(outlier_idx))])
rnd = np.random.normal(size=n_outlier)
X_outlier[outlier_idx, s] += np.sign(rnd) * np.maximum(np.abs(rnd * n_std), min_std) * stdev
is_outlier[outlier_idx] = 1
if n_dim == 1:
X_outlier = X_outlier.reshape(n_samples, )
return Bunch(data=X_outlier, target=is_outlier, target_names=['normal', 'outlier'])
def inject_outlier_tabular(X: np.ndarray,
cols: List[int],
perc_outlier: int,
y: np.ndarray = None,
n_std: float = 2.,
min_std: float = 1.
) -> Bunch:
"""
Inject outliers in numerical tabular data.
Parameters
----------
X
Tabular data to perturb (inject outliers).
cols
Columns of X that are numerical and can be perturbed.
perc_outlier
Percentage of observations which are perturbed to outliers. For multiple numerical features,
the percentage is evenly split across the features.
y
Outlier labels.
n_std
Number of feature-wise standard deviations used to perturb the original data.
min_std
Minimum number of standard deviations away from the current observation. This is included because
of the stochastic nature of the perturbation which could lead to minimal perturbations without a floor.
Returns
-------
Bunch object with the perturbed tabular data and the outlier labels.
"""
n_dim = len(X.shape)
if n_dim == 1:
X = X.reshape(-1, 1)
n_samples, n_features = X.shape
X_outlier = X.astype(np.float32).copy()
if y is None:
is_outlier = np.zeros(n_samples)
else:
is_outlier = y
n_cols = len(cols)
# distribute outliers evenly over different columns
n_outlier = int(n_samples * perc_outlier * .01 / n_cols)
if n_outlier == 0:
return Bunch(data=X_outlier, target=is_outlier, target_names=['normal', 'outlier'])
# add perturbations
stdev = X_outlier.std(axis=0)
for col in cols:
outlier_idx = np.sort(random.sample(range(n_samples), n_outlier))
rnd = np.random.normal(size=n_outlier)
X_outlier[outlier_idx, col] += np.sign(rnd) * np.maximum(np.abs(rnd * n_std), min_std) * stdev[col]
is_outlier[outlier_idx] = 1
if n_dim == 1:
X_outlier = X_outlier.reshape(n_samples, )
return Bunch(data=X_outlier, target=is_outlier, target_names=['normal', 'outlier'])
def inject_outlier_categorical(X: np.ndarray,
cols: List[int],
perc_outlier: int,
y: np.ndarray = None,
cat_perturb: dict = None,
X_fit: np.ndarray = None,
disc_perc: list = [25, 50, 75],
smooth: float = 1.
) -> Bunch:
"""
Inject outliers in categorical variables of tabular data.
Parameters
----------
X
Tabular data with categorical variables to perturb (inject outliers).
cols
Columns of X that are categorical and can be perturbed.
perc_outlier
Percentage of observations which are perturbed to outliers. For multiple numerical features,
the percentage is evenly split across the features.
y
Outlier labels.
cat_perturb
Dictionary mapping each category in the categorical variables to their furthest neighbour.
X_fit
Optional data used to infer pairwise distances from.
disc_perc
List with percentiles used in binning of numerical features used for the 'abdm' pairwise distance measure.
smooth
Smoothing exponent between 0 and 1 for the distances.
Lower values will smooth the difference in distance metric between different features.
Returns
-------
Bunch object with the perturbed tabular data, outlier labels and \
a dictionary used to map categories to their furthest neighbour.
"""
if cat_perturb is None:
# transform the categorical variables into numerical ones via
# pairwise distances computed with abdm and multidim scaling
X_fit = X.copy() if X_fit is None else X_fit
# find number of categories for each categorical variable
cat_vars = {k: None for k in cols}
for k in cols:
cat_vars[k] = len(np.unique(X_fit[:, k])) # type: ignore
# TODO: extend method for OHE
ohe = False
if ohe:
X_ord, cat_vars_ord = ohe2ord(X, cat_vars)
else:
X_ord, cat_vars_ord = X, cat_vars
# bin numerical features to compute the pairwise distance matrices
n_ord = X_ord.shape[1]
if len(cols) != n_ord:
fnames = [str(_) for _ in range(n_ord)]
disc = Discretizer(X_ord, cols, fnames, percentiles=disc_perc)
X_bin = disc.discretize(X_ord)
cat_vars_bin = {k: len(disc.names[k]) for k in range(n_ord) if k not in cols}
else:
X_bin = X_ord
cat_vars_bin = {}
# pairwise distances for categorical variables
d_pair = abdm(X_bin, cat_vars_ord, cat_vars_bin)
# multidim scaling
feature_range = (np.ones((1, n_ord)) * -1e10, np.ones((1, n_ord)) * 1e10)
d_abs = multidim_scaling(d_pair,
n_components=2,
use_metric=True,
standardize_cat_vars=True,
smooth=smooth,
feature_range=feature_range,
update_feature_range=False)[0]
# find furthest category away for each category in the categorical variables
cat_perturb = {k: np.zeros(len(v)) for k, v in d_abs.items()}
for k, v in d_abs.items():
for i in range(len(v)):
cat_perturb[k][i] = np.argmax(np.abs(v[i] - v))
else:
d_abs = None
n_dim = len(X.shape)
if n_dim == 1:
X = X.reshape(-1, 1)
n_samples, n_features = X.shape
X_outlier = X.astype(np.float32).copy()
if y is None:
is_outlier = np.zeros(n_samples)
else:
is_outlier = y
n_cols = len(cols)
# distribute outliers evenly over different columns
n_outlier = int(n_samples * perc_outlier * .01 / n_cols)
for col in cols:
outlier_idx = np.sort(random.sample(range(n_samples), n_outlier))
col_cat = X_outlier[outlier_idx, col].astype(int)
col_map = np.tile(cat_perturb[col], (n_outlier, 1))
X_outlier[outlier_idx, col] = np.diag(col_map.T[col_cat])
is_outlier[outlier_idx] = 1
if n_dim == 1:
X_outlier = X_outlier.reshape(n_samples, )
return Bunch(data=X_outlier,
target=is_outlier,
cat_perturb=cat_perturb,
d_abs=d_abs,
target_names=['normal', 'outlier'])
# Note: the perturbation functions below are adopted from
# https://github.com/hendrycks/robustness/blob/master/ImageNet-C/imagenet_c/imagenet_c/corruptions.py
# and used in Dan Hendrycks and Thomas Dietterich, "Benchmarking Neural Network Robustness to Common
# Corruptions and Perturbations" (ICLR 2019).
# TODO: add proper batch support
def scale_minmax(x: np.ndarray, xrange: tuple = None) -> Tuple[np.ndarray, bool]:
"""
Minmax scaling to [0,1].
Parameters
----------
x
Numpy array to be scaled.
xrange
Tuple with min and max data range.
Returns
-------
Scaled array and boolean whether the array is actually scaled.
"""
scale_back = False
if isinstance(xrange, tuple):
scale_back = True
x = (x - xrange[0]) / (xrange[1] - xrange[0])
return x, scale_back
# Noise
def gaussian_noise(x: np.ndarray, stdev: float, xrange: tuple = None) -> np.ndarray:
"""
Inject Gaussian noise.
Parameters
----------
x
Instance to be perturbed.
stdev
Standard deviation of noise.
xrange
Tuple with min and max data range.
Returns
-------
Perturbed instance.
"""
x, scale_back = scale_minmax(x, xrange)
x_gn = x + np.random.normal(size=x.shape, scale=stdev)
if scale_back:
x_gn = x_gn * (xrange[1] - xrange[0]) + xrange[0]
if isinstance(xrange, tuple):
return np.clip(x_gn, xrange[0], xrange[1])
else:
return x_gn
def shot_noise(x: np.ndarray, lam: float, xrange: tuple = None) -> np.ndarray:
"""
Inject Poisson noise.
Parameters
----------
x
Instance to be perturbed.
lam
Scalar for the lambda parameter determining the expectation of the interval.
xrange
Tuple with min and max data range.
Returns
-------
Perturbed instance.
"""
x, scale_back = scale_minmax(x, xrange)
x_sn = np.random.poisson(x * lam) / float(lam)
if scale_back:
x_sn = x_sn * (xrange[1] - xrange[0]) + xrange[0]
if isinstance(xrange, tuple):
return np.clip(x_sn, xrange[0], xrange[1])
else:
return x_sn
def speckle_noise(x: np.ndarray, stdev: float, xrange: tuple = None) -> np.ndarray:
"""
Inject speckle noise.
Parameters
----------
x
Instance to be perturbed.
stdev
Standard deviation of noise.
xrange
Tuple with min and max data range.
Returns
-------
Perturbed instance.
"""
x, scale_back = scale_minmax(x, xrange)
x_sp = x * (1 + np.random.normal(size=x.shape, scale=stdev))
if scale_back:
x_sp = x_sp * (xrange[1] - xrange[0]) + xrange[0]
if isinstance(xrange, tuple):
return np.clip(x_sp, xrange[0], xrange[1])
else:
return x_sp
def impulse_noise(x: np.ndarray, amount: float, xrange: tuple = None) -> np.ndarray:
"""
Inject salt & pepper noise.
Parameters
----------
x
Instance to be perturbed.
amount
Proportion of pixels to replace with noise.
xrange
Tuple with min and max data range.
Returns
-------
Perturbed instance.
"""
if isinstance(xrange, tuple):
xmin, xmax = xrange[0], xrange[1]
else:
xmin, xmax = x.min(), x.max()
x_sc = (x - xmin) / (xmax - xmin) # scale to [0,1]
x_in = sk.util.random_noise(x_sc, mode='s&p', amount=amount) # inject noise
x_in = x_in * (xmax - xmin) + xmin # scale back
if isinstance(xrange, tuple):
return np.clip(x_in, xrange[0], xrange[1])
else:
return x_in
# Blur
def gaussian_blur(x: np.ndarray, sigma: float, multichannel: bool = True, xrange: tuple = None) -> np.ndarray:
"""
Apply Gaussian blur.
Parameters
----------
x
Instance to be perturbed.
sigma
Standard deviation determining the strength of the blur.
multichannel
Whether the image contains multiple channels (RGB) or not.
xrange
Tuple with min and max data range.
Returns
-------
Perturbed instance.
"""
x, scale_back = scale_minmax(x, xrange)
x_gb = gaussian(x, sigma=sigma, multichannel=multichannel)
if scale_back:
x_gb = x_gb * (xrange[1] - xrange[0]) + xrange[0]
if isinstance(xrange, tuple):
return np.clip(x_gb, xrange[0], xrange[1])
else:
return x_gb
def clipped_zoom(x: np.ndarray, zoom_factor: float) -> np.ndarray:
"""
Helper function for zoom blur.
Parameters
----------
x
Instance to be perturbed.
zoom_factor
Zoom strength.
Returns
-------
Cropped and zoomed instance.
"""
h = x.shape[0]
ch = int(np.ceil(h / float(zoom_factor))) # ceil crop height(= crop width)
top = (h - ch) // 2
x = zoom(x[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)
trim_top = (x.shape[0] - h) // 2 # trim off any extra pixels
return x[trim_top:trim_top + h, trim_top:trim_top + h]
def zoom_blur(x: np.ndarray, max_zoom: float, step_zoom: float, xrange: tuple = None) -> np.ndarray:
"""
Apply zoom blur.
Parameters
----------
x
Instance to be perturbed.
max_zoom
Max zoom strength.
step_zoom
Step size to go from 1 to `max_zoom` strength.
xrange
Tuple with min and max data range.
Returns
-------
Perturbed instance.
"""
x, scale_back = scale_minmax(x, xrange)
zoom_factors = np.arange(1, max_zoom, step_zoom)
out = np.zeros_like(x)
for zoom_factor in zoom_factors:
out += clipped_zoom(x, zoom_factor)
x_z = (x + out) / (len(zoom_factors) + 1)
if scale_back:
x_z = x_z * (xrange[1] - xrange[0]) + xrange[0]
if isinstance(xrange, tuple):
return np.clip(x_z, xrange[0], xrange[1])
else:
return x_z
def glass_blur(x: np.ndarray, sigma: float, max_delta: int, iterations: int, xrange: tuple = None) -> np.ndarray:
"""
Apply glass blur.
Parameters
----------
x
Instance to be perturbed.
sigma
Standard deviation determining the strength of the Gaussian perturbation.
max_delta
Maximum pixel range for the blurring.
iterations
Number of blurring iterations.
xrange
Tuple with min and max data range.
Returns
-------
Perturbed instance.
"""
nrows, ncols = x.shape[:2]
if not isinstance(xrange, tuple):
xrange = (x.min(), x.max())
if xrange[0] != 0 or xrange[1] != 255:
x = (x - xrange[0]) / (xrange[1] - xrange[0]) * 255
x = gaussian(x, sigma=sigma, multichannel=True).astype(np.uint8)
for i in range(iterations):
for h in range(nrows - max_delta, max_delta, -1):
for w in range(ncols - max_delta, max_delta, -1):
dx, dy = np.random.randint(-max_delta, max_delta, size=(2,))
h_prime, w_prime = h + dy, w + dx
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
x_gb = gaussian(x / 255, sigma=sigma, multichannel=True)
x_gb = x_gb * (xrange[1] - xrange[0]) + xrange[0]
if isinstance(xrange, tuple):
return np.clip(x_gb, xrange[0], xrange[1])
else:
return x_gb
def disk(radius: float, alias_blur: float = 0.1, dtype=np.float32) -> np.ndarray:
"""
Helper function for defocus blur.
Parameters
----------
radius
Radius for the Gaussian kernel.
alias_blur
Standard deviation for the Gaussian kernel in both X and Y directions.
dtype
Data type.
Returns
-------
Kernel used for Gaussian blurring.
"""
if radius <= 8.:
L = np.arange(-8., 8. + 1)
ksize = (3, 3)
else:
L = np.arange(-radius, radius + 1)
ksize = (5, 5)
X, Y = np.meshgrid(L, L)
aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur)
def defocus_blur(x: np.ndarray, radius: int, alias_blur: float, xrange: tuple = None) -> np.ndarray:
"""
Apply defocus blur.
Parameters
----------
x
Instance to be perturbed.
radius
Radius for the Gaussian kernel.
alias_blur
Standard deviation for the Gaussian kernel in both X and Y directions.
xrange
Tuple with min and max data range.
Returns
-------
Perturbed instance.
"""
x, scale_back = scale_minmax(x, xrange)
kernel = disk(radius=radius, alias_blur=alias_blur)
channels = []
for d in range(3):
channels.append(cv2.filter2D(x[:, :, d], -1, kernel))
x_db = np.array(channels).transpose((1, 2, 0))
if scale_back:
x_db = x_db * (xrange[1] - xrange[0]) + xrange[0]
if isinstance(xrange, tuple):
return np.clip(x_db, xrange[0], xrange[1])
else:
return x_db
def plasma_fractal(mapsize: int = 256, wibbledecay: float = 3.) -> np.ndarray:
"""
Helper function to apply fog to instance.
Generates a heightmap using diamond-square algorithm.
Returns a square 2d array, side length 'mapsize', of floats in range 0-255.
'mapsize' must be a power of two.
"""
assert (mapsize & (mapsize - 1) == 0)
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100.
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square of points stepsize apart,
calculate middle value as mean of points + wibble"""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond of points stepsize apart,
calculate middle value as mean of points + wibble"""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
def fog(x: np.ndarray, fractal_mult: float, wibbledecay: float, xrange: tuple = None) -> np.ndarray:
"""
Apply fog to instance.
Parameters
----------
x
Instance to be perturbed.
fractal_mult
Strength applied to `plasma_fractal` output.
wibbledecay
Decay factor for size of noise that is applied.
xrange
Tuple with min and max data range.
Returns
-------
Perturbed instance.
"""
x, scale_back = scale_minmax(x, xrange)
max_val = x.max()
nrows, ncols = x.shape[:2]
x_fo = x + fractal_mult * plasma_fractal(wibbledecay=wibbledecay)[:nrows, :ncols][..., np.newaxis]
x_fo = x_fo * max_val / (max_val + fractal_mult)
if scale_back:
x_fo = x_fo * (xrange[1] - xrange[0]) + xrange[0]
if isinstance(xrange, tuple):
return np.clip(x_fo, xrange[0], xrange[1])
else:
return x_fo
# Digital
def contrast(x: np.ndarray, strength: float, xrange: tuple = None) -> np.ndarray:
"""
Change contrast of image.
Parameters
----------
x
Instance to be perturbed.
strength
Strength of contrast change. Lower is actually more contrast.
xrange
Tuple with min and max data range.
Returns
-------
Perturbed instance.
"""
x, scale_back = scale_minmax(x, xrange)
means = np.mean(x, axis=(0, 1), keepdims=True)
x_co = (x - means) * strength + means
if scale_back:
x_co = x_co * (xrange[1] - xrange[0]) + xrange[0]
if isinstance(xrange, tuple):
return np.clip(x_co, xrange[0], xrange[1])
else:
return x_co
def brightness(x: np.ndarray, strength: float, xrange: tuple = None) -> np.ndarray:
"""
Change brightness of image.
Parameters
----------
x
Instance to be perturbed.
strength
Strength of brightness change.
xrange
Tuple with min and max data range.
Returns
-------
Perturbed instance.
"""
x, scale_back = scale_minmax(x, xrange)
x = sk.color.rgb2hsv(x)
x[:, :, 2] = np.clip(x[:, :, 2] + strength, xrange[0], xrange[1])
x_br = sk.color.hsv2rgb(x)
if scale_back:
x_br = x_br * (xrange[1] - xrange[0]) + xrange[0]
if isinstance(xrange, tuple):
return np.clip(x_br, xrange[0], xrange[1])
else:
return x_br
def saturate(x: np.ndarray, strength: tuple, xrange: tuple = None) -> np.ndarray:
"""
Change colour saturation of image.
Parameters
----------
x
Instance to be perturbed.
strength
Strength of saturation change. Tuple consists of (multiplier, shift) of the perturbation.
xrange
Tuple with min and max data range.
Returns
-------
Perturbed instance.
"""
x, scale_back = scale_minmax(x, xrange)
x = sk.color.rgb2hsv(x)
x[:, :, 1] = x[:, :, 1] * strength[0] + strength[1]
if isinstance(xrange, tuple):
x[:, :, 1] = np.clip(x[:, :, 1], xrange[0], xrange[1])
x_sa = sk.color.hsv2rgb(x)
if scale_back:
x_sa = x_sa * (xrange[1] - xrange[0]) + xrange[0]
if isinstance(xrange, tuple):
return np.clip(x_sa, xrange[0], xrange[1])
else:
return x_sa
def pixelate(x: np.ndarray, strength: float, xrange: tuple = None) -> np.ndarray:
"""
Change coarseness of pixels for an image.
Parameters
----------
x
Instance to be perturbed.
strength
Strength of pixelation (<1). Lower is actually more pixelated.
xrange
Tuple with min and max data range.
Returns
-------
Perturbed instance.
"""
rows, cols = x.shape[:2]
if not isinstance(xrange, tuple):
xrange = (x.min(), x.max())
if xrange[0] != 0 or xrange[1] != 255:
x = (x - xrange[0]) / (xrange[1] - xrange[0]) * 255
im = Image.fromarray(x.astype('uint8'), mode='RGB')
im = im.resize((int(rows * strength), int(cols * strength)), Image.BOX)
im = im.resize((rows, cols), Image.BOX)
x_pi = np.array(im, dtype=np.float32) / 255
x_pi = x_pi * (xrange[1] - xrange[0]) + xrange[0]
return x_pi
def jpeg_compression(x: np.ndarray, strength: float, xrange: tuple = None) -> np.ndarray:
"""
Simulate changes due to JPEG compression for an image.
Parameters
----------
x
Instance to be perturbed.
strength
Strength of compression (>1). Lower is actually more compressed.
xrange
Tuple with min and max data range.
Returns
-------
Perturbed instance.
"""
if not isinstance(xrange, tuple):
xrange = (x.min(), x.max())
if xrange[0] != 0 or xrange[1] != 255:
x = (x - xrange[0]) / (xrange[1] - xrange[0]) * 255
x = Image.fromarray(x.astype('uint8'), mode='RGB')
output = BytesIO()
x.save(output, 'JPEG', quality=strength) # type: ignore[attr-defined] # TODO: allow redefinition
x = Image.open(output)
x_jpeg = np.array(x, dtype=np.float32) / 255
x_jpeg = x_jpeg * (xrange[1] - xrange[0]) + xrange[0]
return x_jpeg
def elastic_transform(x: np.ndarray, mult_dxdy: float, sigma: float,
rnd_rng: float, xrange: tuple = None) -> np.ndarray:
"""
Apply elastic transformation to instance.
Parameters
----------
x
Instance to be perturbed.
mult_dxdy
Multiplier for the Gaussian noise in x and y directions.
sigma
Standard deviation determining the strength of the Gaussian perturbation.
rnd_rng
Range for random uniform noise.
xrange
Tuple with min and max data range.
Returns
-------
Perturbed instance.
"""
x, scale_back = scale_minmax(x, xrange)
shape = x.shape
shape_size = shape[:2]
mult_dxdy *= shape[0]
sigma *= shape[0]
rnd_rng *= shape[0]
# random affine
center_square = np.asarray(shape_size, dtype=np.float32) // 2
square_size = min(shape_size) // 3
pts1 = np.asarray([center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size], dtype=np.float32)
pts2 = pts1 + np.random.uniform(-rnd_rng, rnd_rng, size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(x, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
dx = (gaussian(np.random.uniform(-1, 1, size=shape_size),
sigma, mode='reflect', truncate=3) * mult_dxdy).astype(np.float32)
dy = (gaussian(np.random.uniform(-1, 1, size=shape_size),
sigma, mode='reflect', truncate=3) * mult_dxdy).astype(np.float32)
dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]
x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1))
x_et = map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)
if scale_back:
x_et = x_et * (xrange[1] - xrange[0]) + xrange[0]
if isinstance(xrange, tuple):
return np.clip(x_et, xrange[0], xrange[1])
else:
return x_et
| 30,754 | 31.036458 | 114 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/misc.py
|
import numpy as np
def quantile(sample: np.ndarray, p: float, type: int = 7,
sorted: bool = False, interpolate: bool = True) -> float:
"""
Estimate a desired quantile of a univariate distribution from a vector of samples
Parameters
----------
sample
A 1D vector of values
p
The desired quantile in (0,1)
type
The method for computing the quantile.
See https://wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample
sorted
Whether or not the vector is already sorted into ascending order
interpolate
Whether to interpolate the desired quantile.
Returns
-------
An estimate of the quantile
"""
N = len(sample)
if N == 0:
raise ValueError("Cannot compute quantiles with zero samples.")
if len(sample.shape) != 1:
raise ValueError("Quantile estimation only supports vectors of univariate samples.")
if not 1/N <= p <= (N-1)/N:
raise ValueError(f"The {p}-quantile should not be estimated using only {N} samples.")
sorted_sample = sample if sorted else np.sort(sample)
if type == 6:
h = (N+1)*p
elif type == 7:
h = (N-1)*p + 1
elif type == 8:
h = (N+1/3)*p + 1/3
else:
raise ValueError("type must be an int with value 6, 7 or 8.")
h_floor = int(h)
quantile = sorted_sample[h_floor-1]
if h_floor != h and interpolate:
quantile += (h - h_floor)*(sorted_sample[h_floor]-sorted_sample[h_floor-1])
return float(quantile)
| 1,556 | 28.377358 | 93 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/data.py
|
import numpy as np
import pandas as pd
from typing import Tuple, Union
class Bunch(dict):
"""
Container object for internal datasets
Dictionary-like object that exposes its keys as attributes.
"""
def __init__(self, **kwargs):
super().__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def sample_df(df: pd.DataFrame,
n: int):
""" Sample n instances from the dataframe df. """
if n < df.shape[0]+1:
replace = False
else:
replace = True
return df.sample(n=n, replace=replace)
def create_outlier_batch(data: np.ndarray,
target: np.ndarray,
n_samples: int,
perc_outlier: int) -> Union[Bunch, Tuple[np.ndarray, np.ndarray]]:
""" Create a batch with a defined percentage of outliers. """
# create df
data = pd.DataFrame(data=data)
data['target'] = target
# separate inlier and outlier data
normal = data[data['target'] == 0]
outlier = data[data['target'] == 1]
if n_samples == 1:
n_outlier = np.random.binomial(1, .01 * perc_outlier)
n_normal = 1 - n_outlier
else:
n_outlier = int(perc_outlier * .01 * n_samples)
n_normal = int((100 - perc_outlier) * .01 * n_samples)
# draw samples
batch_normal = sample_df(normal, n_normal)
batch_outlier = sample_df(outlier, n_outlier)
batch = pd.concat([batch_normal, batch_outlier])
batch = batch.sample(frac=1).reset_index(drop=True)
is_outlier = batch['target'].values
batch.drop(columns=['target'], inplace=True)
return Bunch(data=batch.values, target=is_outlier, target_names=['normal', 'outlier'])
| 1,920 | 26.442857 | 91 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/sampling.py
|
import numpy as np
import random
def reservoir_sampling(X_ref: np.ndarray,
X: np.ndarray,
reservoir_size: int,
n: int) -> np.ndarray:
"""
Apply reservoir sampling.
Parameters
----------
X_ref
Current instances in reservoir.
X
Data to update reservoir with.
reservoir_size
Size of reservoir.
n
Number of total instances that have passed so far.
Returns
-------
Updated reservoir.
"""
if X.shape[0] + n <= reservoir_size:
return np.concatenate([X_ref, X], axis=0)
n_ref = X_ref.shape[0]
output_size = min(reservoir_size, n_ref + X.shape[0])
shape = (output_size,) + X.shape[1:]
X_reservoir = np.zeros(shape, dtype=X_ref.dtype)
X_reservoir[:n_ref] = X_ref
for item in X:
n += 1
if n_ref < reservoir_size:
X_reservoir[n_ref, :] = item
n_ref += 1
else:
r = int(random.random() * n)
if r < reservoir_size:
X_reservoir[r, :] = item
return X_reservoir
| 1,126 | 24.044444 | 58 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/statstest.py
|
import numpy as np
from typing import Callable, Tuple, Union
def permutation_test(x: np.ndarray, y: np.ndarray, metric: Callable, n_permutations: int = 100,
**kwargs) -> Tuple[float, float, np.ndarray]:
"""
Apply a permutation test to samples x and y.
Parameters
----------
x
Batch of instances of shape [Nx, features].
y
Batch of instances of shape [Ny, features].
n_permutations
Number of permutations used in the test.
metric
Distance metric used for the test. Defaults to Maximum Mean Discrepancy.
kwargs
Kwargs for the metric. For the default this includes for instance the kernel used.
Returns
-------
p-value obtained from the permutation test, the metric between the reference and test set \
and the metric values from the permutation test.
"""
n, k = x.shape[0], 0
dist = metric(x, y, **kwargs)
x_y = np.concatenate([x, y])
dist_permutations = np.zeros(n_permutations)
for _ in range(n_permutations):
np.random.shuffle(x_y)
x, y = x_y[:n], x_y[n:]
dist_permutation = metric(x, y, **kwargs)
dist_permutations[_] = dist_permutation
k += dist <= dist_permutation
return k / n_permutations, dist, dist_permutations
def fdr(p_val: np.ndarray, q_val: float) -> Tuple[int, Union[float, np.ndarray]]:
"""
Checks the significance of univariate tests on each variable between 2 samples of
multivariate data via the False Discovery Rate (FDR) correction of the p-values.
Parameters
----------
p_val
p-values for each univariate test.
q_val
Acceptable q-value threshold.
Returns
-------
Whether any of the p-values are significant after the FDR correction \
and the max threshold value or array of potential thresholds if no p-values \
are significant.
"""
n = p_val.shape[0]
i = np.arange(n) + 1
p_sorted = np.sort(p_val)
q_threshold = q_val * i / n
below_threshold = p_sorted < q_threshold
try:
idx_threshold = np.where(below_threshold)[0].max()
except ValueError: # sorted p-values not below thresholds
return int(below_threshold.any()), q_threshold
return int(below_threshold.any()), q_threshold[idx_threshold]
| 2,318 | 32.608696 | 95 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/_random.py
|
"""
This submodule contains utility functions to manage random number generator (RNG) seeds. It may change
depending on how we decide to handle randomisation in tests (and elsewhere) going forwards. See
https://github.com/SeldonIO/alibi-detect/issues/250.
"""
from contextlib import contextmanager
import random
import numpy as np
import os
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow
if has_tensorflow:
import tensorflow as tf
if has_pytorch:
import torch
# Init global seed
_ALIBI_SEED = None
def set_seed(seed: int):
"""
Sets the Python, NumPy, TensorFlow and PyTorch random seeds, and the PYTHONHASHSEED env variable.
Parameters
----------
seed
Value of the random seed to set.
"""
global _ALIBI_SEED
seed = max(seed, 0) # TODO: This is a fix to allow --randomly-seed=0 in setup.cfg. To be removed in future
_ALIBI_SEED = seed
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
if has_tensorflow:
tf.random.set_seed(seed)
if has_pytorch:
torch.manual_seed(seed)
def get_seed() -> int:
"""
Gets the seed set by :func:`set_seed`.
Example
-------
>>> from alibi_detect.utils._random import set_seed, get_seed
>>> set_seed(42)
>>> get_seed()
42
"""
if _ALIBI_SEED is not None:
return _ALIBI_SEED
else:
raise RuntimeError('`set_seed` must be called before `get_seed` can be called.')
@contextmanager
def fixed_seed(seed: int):
"""
A context manager to run with a requested random seed (applied to all the RNG's set by :func:`set_seed`).
Parameters
----------
seed
Value of the random seed to set in the isolated context.
Example
-------
.. code-block :: python
set_seed(0)
with fixed_seed(42):
dd = cd.LSDDDrift(X_ref) # seeds equal 42 here
p_val = dd.predict(X_h0)['data']['p_val']
# seeds equal 0 here
"""
orig_seed = get_seed()
set_seed(seed)
try:
yield
finally:
set_seed(orig_seed)
| 2,127 | 24.035294 | 111 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/frameworks.py
|
from .missing_optional_dependency import ERROR_TYPES
from typing import Optional, List, Dict, Iterable
from enum import Enum
class Framework(str, Enum):
PYTORCH = 'pytorch'
TENSORFLOW = 'tensorflow'
KEOPS = 'keops'
SKLEARN = 'sklearn'
try:
import tensorflow as tf # noqa
import tensorflow_probability as tfp # noqa
has_tensorflow = True
except ImportError:
has_tensorflow = False
try:
import torch # noqa
has_pytorch = True
except ImportError:
has_pytorch = False
try:
import pykeops # noqa
import torch # noqa
has_keops = True
except ImportError:
has_keops = False
# Map from backend name to boolean value indicating its presence
HAS_BACKEND = {
'tensorflow': has_tensorflow,
'pytorch': has_pytorch,
'sklearn': True,
'keops': has_keops,
}
def _iter_to_str(iterable: Iterable[str]) -> str:
""" Correctly format iterable of items to comma seperated sentence string."""
items = [f'`{option}`' for option in iterable]
last_item_str = f'{items[-1]}' if not items[:-1] else f' and {items[-1]}'
return ', '.join(items[:-1]) + last_item_str
class BackendValidator:
def __init__(self, backend_options: Dict[Optional[str], List[str]], construct_name: str):
"""Checks for required sets of backend options.
Takes a dictionary of backends plus extra dependencies and generates correct error messages if they are unmet.
Parameters
----------
backend_options
Dictionary from backend to list of dependencies that must be satisfied. The keys are the available options
for the user and the values should be a list of dependencies that are checked via the `HAS_BACKEND` map
defined in this module. An example of `backend_options` would be `{'tensorflow': ['tensorflow'], 'pytorch':
['pytorch'], None: []}`.This would mean `'tensorflow'`, `'pytorch'` or `None` are available backend options.
If the user passes a different backend they will receive and error listing the correct backends. In
addition, if one of the dependencies in the `backend_option` values is missing for the specified backend
the validator will issue an error message telling the user what dependency bucket to install.
construct_name
Name of the object that has a set of backends we need to verify.
"""
self.backend_options = backend_options
self.construct_name = construct_name
def verify_backend(self, backend: str):
"""Verifies backend choice.
Verifies backend is implemented and that the correct dependencies are installed for the requested backend. If
the backend is not implemented or a dependency is missing then an error is issued.
Parameters
----------
backend
Choice of backend the user wishes to initialize the alibi-detect construct with. Must be one of the keys
in the `self.backend_options` dictionary.
Raises
------
NotImplementedError
If backend is not a member of `self.backend_options.keys()` a `NotImplementedError` is raised. Note `None`
is a valid choice of backend if it is set as a key on `self.backend_options.keys()`. If a backend is not
implemented for an alibi-detect object then it should not have a key on `self.backend_options`.
ImportError
If one of the dependencies in `self.backend_options[backend]` is missing then an ImportError will be thrown
including a message informing the user how to install.
"""
if backend not in self.backend_options:
self._raise_implementation_error(backend)
dependencies = self.backend_options[backend]
missing_deps = []
for dependency in dependencies:
if not HAS_BACKEND[dependency]:
missing_deps.append(dependency)
if missing_deps:
self._raise_import_error(missing_deps, backend)
def _raise_import_error(self, missing_deps: List[str], backend: str):
"""Raises import error if backend choice has missing dependency."""
optional_dependencies = list(ERROR_TYPES[missing_dep] for missing_dep in missing_deps)
optional_dependencies.sort()
missing_deps_str = _iter_to_str(missing_deps)
error_msg = (f'{missing_deps_str} not installed. Cannot initialize and run {self.construct_name} '
f'with {backend} backend.')
pip_msg = '' if not optional_dependencies else \
(f'The necessary missing dependencies can be installed using '
f'`pip install alibi-detect[{" ".join(optional_dependencies)}]`.')
raise ImportError(f'{error_msg} {pip_msg}')
def _raise_implementation_error(self, backend: str):
"""Raises NotImplementedError error if backend choice is not implemented."""
backend_list = _iter_to_str(self.backend_options.keys())
raise NotImplementedError(f"{backend} backend not implemented. Use one of {backend_list} instead.")
| 5,128 | 40.699187 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/discretizer.py
|
import numpy as np
from typing import Dict, Callable, List
class Discretizer(object):
def __init__(self, data: np.ndarray, categorical_features: List[int], feature_names: List[str],
percentiles: List[int] = [25, 50, 75]) -> None:
"""
Initialize the discretizer.
Parameters
----------
data
Data to discretize
categorical_features
List of indices corresponding to the categorical columns. These features will not be discretized.
The other features will be considered continuous and therefore discretized.
feature_names
List with feature names
percentiles
Percentiles used for discretization
"""
self.to_discretize = ([x for x in range(data.shape[1]) if x not in categorical_features])
self.percentiles = percentiles
bins = self.bins(data)
bins = [np.unique(x) for x in bins]
self.names: Dict[int, list] = {}
self.lambdas: Dict[int, Callable] = {}
for feature, qts in zip(self.to_discretize, bins):
# get nb of borders (nb of bins - 1) and the feature name
n_bins = qts.shape[0]
name = feature_names[feature]
# create names for bins of discretized features
self.names[feature] = ['%s <= %.2f' % (name, qts[0])]
for i in range(n_bins - 1):
self.names[feature].append('%.2f < %s <= %.2f' % (qts[i], name, qts[i + 1]))
self.names[feature].append('%s > %.2f' % (name, qts[n_bins - 1]))
self.lambdas[feature] = lambda x, qts = qts: np.searchsorted(qts, x)
def bins(self, data: np.ndarray) -> List[np.ndarray]:
"""
Parameters
----------
data
Data to discretize
Returns
-------
List with bin values for each feature that is discretized.
"""
bins = []
for feature in self.to_discretize:
qts = np.array(np.percentile(data[:, feature], self.percentiles))
bins.append(qts)
return bins
def discretize(self, data: np.ndarray) -> np.ndarray:
"""
Parameters
----------
data
Data to discretize
Returns
-------
Discretized version of data with the same dimension.
"""
data_disc = data.copy()
for feature in self.lambdas:
if len(data.shape) == 1:
data_disc[feature] = int(self.lambdas[feature](data_disc[feature]))
else:
data_disc[:, feature] = self.lambdas[feature](data_disc[:, feature]).astype(int)
return data_disc
| 2,719 | 33 | 109 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/metrics.py
|
import numpy as np
def accuracy(y_true: np.ndarray, y_pred: np.ndarray) -> float:
y_true_class = np.argmax(y_true, axis=1) if len(y_true.shape) > 1 else np.round(y_true)
y_pred_class = np.argmax(y_pred, axis=1) if len(y_pred.shape) > 1 else np.round(y_pred)
return (y_true_class == y_pred_class).mean()
| 317 | 38.75 | 91 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/__init__.py
| 0 | 0 | 0 |
py
|
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/visualize.py
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import roc_curve, auc
from typing import Dict, Union
import warnings
def plot_instance_score(preds: Dict,
target: np.ndarray,
labels: np.ndarray,
threshold: float,
ylim: tuple = (None, None)) -> None:
"""
Scatter plot of a batch of outlier or adversarial scores compared to the threshold.
Parameters
----------
preds
Dictionary returned by predictions of an outlier or adversarial detector.
target
Ground truth.
labels
List with names of classification labels.
threshold
Threshold used to classify outliers or adversarial instances.
ylim
Min and max y-axis values.
"""
scores = preds['data']['instance_score']
df = pd.DataFrame(dict(idx=np.arange(len(scores)), score=scores, label=target))
groups = df.groupby('label')
fig, ax = plt.subplots()
for name, group in groups:
ax.plot(group.idx, group.score, marker='o', linestyle='', ms=6, label=labels[name])
plt.plot(np.arange(len(scores)), np.ones(len(scores)) * threshold, color='g', label='Threshold')
plt.ylim(ylim)
plt.xlabel('Number of Instances')
plt.ylabel('Instance Level Score')
ax.legend()
plt.show()
def plot_feature_outlier_image(od_preds: Dict,
X: np.ndarray,
X_recon: np.ndarray = None,
instance_ids: list = None,
max_instances: int = 5,
outliers_only: bool = False,
n_channels: int = 3,
figsize: tuple = (20, 20)) -> None:
"""
Plot feature (pixel) wise outlier scores for images.
Parameters
----------
od_preds
Output of an outlier detector's prediction.
X
Batch of instances to apply outlier detection to.
X_recon
Reconstructed instances of X.
instance_ids
List with indices of instances to display.
max_instances
Maximum number of instances to display.
outliers_only
Whether to only show outliers or not.
n_channels
Number of channels of the images.
figsize
Tuple for the figure size.
"""
scores = od_preds['data']['feature_score']
if outliers_only and instance_ids is None:
instance_ids = list(np.where(od_preds['data']['is_outlier'])[0])
elif instance_ids is None:
instance_ids = list(range(len(od_preds['data']['is_outlier'])))
n_instances = min(max_instances, len(instance_ids))
instance_ids = instance_ids[:n_instances]
if outliers_only and n_instances == 0:
warnings.warn('No outliers found!', UserWarning)
return
n_cols = 2
if n_channels == 3:
n_cols += 2
if X_recon is not None:
n_cols += 1
fig, axes = plt.subplots(nrows=n_instances, ncols=n_cols, figsize=figsize)
n_subplot = 1
for i in range(n_instances):
idx = instance_ids[i]
X_outlier = X[idx]
plt.subplot(n_instances, n_cols, n_subplot)
plt.axis('off')
if i == 0:
plt.title('Original')
plt.imshow(X_outlier)
n_subplot += 1
if X_recon is not None:
plt.subplot(n_instances, n_cols, n_subplot)
plt.axis('off')
if i == 0:
plt.title('Reconstruction')
plt.imshow(X_recon[idx])
n_subplot += 1
plt.subplot(n_instances, n_cols, n_subplot)
plt.axis('off')
if i == 0:
plt.title('Outlier Score Channel 0')
plt.imshow(scores[idx][:, :, 0])
n_subplot += 1
if n_channels == 3:
plt.subplot(n_instances, n_cols, n_subplot)
plt.axis('off')
if i == 0:
plt.title('Outlier Score Channel 1')
plt.imshow(scores[idx][:, :, 1])
n_subplot += 1
plt.subplot(n_instances, n_cols, n_subplot)
plt.axis('off')
if i == 0:
plt.title('Outlier Score Channel 2')
plt.imshow(scores[idx][:, :, 2])
n_subplot += 1
plt.show()
def plot_feature_outlier_tabular(od_preds: Dict,
X: np.ndarray,
X_recon: np.ndarray = None,
threshold: float = None,
instance_ids: list = None,
max_instances: int = 5,
top_n: int = int(1e12),
outliers_only: bool = False,
feature_names: list = None,
width: float = .2,
figsize: tuple = (20, 10)) -> None:
"""
Plot feature wise outlier scores for tabular data.
Parameters
----------
od_preds
Output of an outlier detector's prediction.
X
Batch of instances to apply outlier detection to.
X_recon
Reconstructed instances of X.
threshold
Threshold used for outlier score to determine outliers.
instance_ids
List with indices of instances to display.
max_instances
Maximum number of instances to display.
top_n
Maixmum number of features to display, ordered by outlier score.
outliers_only
Whether to only show outliers or not.
feature_names
List with feature names.
width
Column width for bar charts.
figsize
Tuple for the figure size.
"""
if outliers_only and instance_ids is None:
instance_ids = list(np.where(od_preds['data']['is_outlier'])[0])
elif instance_ids is None:
instance_ids = list(range(len(od_preds['data']['is_outlier'])))
n_instances = min(max_instances, len(instance_ids))
instance_ids = instance_ids[:n_instances]
n_features = X.shape[1]
n_cols = 2
labels_values = ['Original']
if X_recon is not None:
labels_values += ['Reconstructed']
labels_scores = ['Outlier Score']
if threshold is not None:
labels_scores = ['Threshold'] + labels_scores
fig, axes = plt.subplots(nrows=n_instances, ncols=n_cols, figsize=figsize)
n_subplot = 1
for i in range(n_instances):
idx = instance_ids[i]
fscore = od_preds['data']['feature_score'][idx]
if top_n >= n_features:
keep_cols = np.arange(n_features)
else:
keep_cols = np.argsort(fscore)[::-1][:top_n]
fscore = fscore[keep_cols]
X_idx = X[idx][keep_cols]
ticks = np.arange(len(keep_cols))
plt.subplot(n_instances, n_cols, n_subplot)
if X_recon is not None:
X_recon_idx = X_recon[idx][keep_cols]
plt.bar(ticks - width, X_idx, width=width, color='b', align='center')
plt.bar(ticks, X_recon_idx, width=width, color='g', align='center')
else:
plt.bar(ticks, X_idx, width=width, color='b', align='center')
if feature_names is not None:
plt.xticks(ticks=ticks, labels=list(np.array(feature_names)[keep_cols]), rotation=45)
plt.title('Feature Values')
plt.xlabel('Features')
plt.ylabel('Feature Values')
plt.legend(labels_values)
n_subplot += 1
plt.subplot(n_instances, n_cols, n_subplot)
plt.bar(ticks, fscore)
if threshold is not None:
plt.plot(np.ones(len(ticks)) * threshold, 'r')
if feature_names is not None:
plt.xticks(ticks=ticks, labels=list(np.array(feature_names)[keep_cols]), rotation=45)
plt.title('Feature Level Outlier Score')
plt.xlabel('Features')
plt.ylabel('Outlier Score')
plt.legend(labels_scores)
n_subplot += 1
plt.tight_layout()
plt.show()
def plot_feature_outlier_ts(od_preds: Dict,
X: np.ndarray,
threshold: Union[float, int, list, np.ndarray],
window: tuple = None,
t: np.ndarray = None,
X_orig: np.ndarray = None,
width: float = .2,
figsize: tuple = (20, 8),
ylim: tuple = (None, None)
) -> None:
"""
Plot feature wise outlier scores for time series data.
Parameters
----------
od_preds
Output of an outlier detector's prediction.
X
Time series to apply outlier detection to.
threshold
Threshold used to classify outliers or adversarial instances.
window
Start and end timestep to plot.
t
Timesteps.
X_orig
Optional original time series without outliers.
width
Column width for bar charts.
figsize
Tuple for the figure size.
ylim
Min and max y-axis values for the outlier scores.
"""
if window is not None:
t_start, t_end = window
else:
t_start, t_end = 0, X.shape[0]
if len(X.shape) == 1:
n_features = 1
else:
n_features = X.shape[1]
if t is None:
t = np.arange(X.shape[0])
ticks = t[t_start:t_end]
# check if feature level scores available
if isinstance(od_preds['data']['feature_score'], np.ndarray):
scores = od_preds['data']['feature_score']
else:
scores = od_preds['data']['instance_score'].reshape(-1, 1)
n_cols = 2
fig, axes = plt.subplots(nrows=n_features, ncols=n_cols, figsize=figsize)
n_subplot = 1
for i in range(n_features):
plt.subplot(n_features, n_cols, n_subplot)
if i == 0 and X_orig is not None:
plt.title('Original vs. perturbed data')
elif i == 0:
plt.title('Data')
plt.plot(ticks, X[t_start:t_end, i], marker='*', markersize=4, label='Data with Outliers')
if X_orig is not None:
plt.plot(ticks, X_orig[t_start:t_end, i], marker='o', markersize=4, label='Data without Outliers')
plt.xlabel('Time')
plt.ylabel('Observation')
plt.legend()
n_subplot += 1
plt.subplot(n_features, n_cols, n_subplot)
if i == 0:
plt.title('Outlier Score per Timestep')
plt.bar(ticks, scores[t_start:t_end, i], width=width, color='g', align='center', label='Outlier Score')
if isinstance(threshold, (float, int)):
thr = threshold
else:
thr = threshold[i]
plt.plot(ticks, np.ones(len(ticks)) * thr, 'r', label='Threshold')
plt.xlabel('Time')
plt.ylabel('Outlier Score')
plt.legend()
plt.ylim(ylim)
n_subplot += 1
plt.show()
def plot_roc(roc_data: Dict[str, Dict[str, np.ndarray]], figsize: tuple = (10, 5)) -> None:
"""
Plot ROC curve.
Parameters
----------
roc_data
Dictionary with as key the label to show in the legend and as value another dictionary with as
keys `scores` and `labels` with respectively the outlier scores and outlier labels.
figsize
Figure size.
"""
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=figsize)
for k, v in roc_data.items():
fpr, tpr, thresholds = roc_curve(v['labels'], v['scores'])
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='{}: AUC={:.4f}'.format(k, roc_auc))
plt.plot([0, 1], [0, 1], color='black', lw=1, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('{}'.format('ROC curve'))
plt.legend(loc="lower right", ncol=1)
plt.grid()
plt.show()
| 11,959 | 31.588556 | 111 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/distance.py
|
import numpy as np
from sklearn.manifold import MDS
from typing import Any, Dict, Tuple
def norm(x: np.ndarray, p: int) -> np.ndarray:
"""
Compute p-norm across the features of a batch of instances.
Parameters
----------
x
Batch of instances of shape [N, features].
p
Power of the norm.
Returns
-------
Array where p-norm is applied to the features.
"""
return (x ** p).sum(axis=1) ** (1 / p)
def pairwise_distance(x: np.ndarray, y: np.ndarray, p: int = 2) -> np.ndarray:
"""
Compute pairwise distance between 2 samples.
Parameters
----------
x
Batch of instances of shape [Nx, features].
y
Batch of instances of shape [Ny, features].
p
Power of the norm used to compute the distance.
Returns
-------
[Nx, Ny] matrix with pairwise distances.
"""
assert len(x.shape) == len(y.shape) and len(x.shape) == 2 and x.shape[-1] == y.shape[-1]
diff = x.reshape(x.shape + (1,)) - y.T.reshape((1,) + y.T.shape) # [Nx,F,1]-[1,F,Ny]=[Nx,F,Ny]
dist = norm(diff, p) # [Nx,Ny]
return dist
def cityblock_batch(X: np.ndarray, y: np.ndarray) -> np.ndarray:
"""
Calculate the L1 distances between a batch of arrays X and an array of the same shape y.
Parameters
----------
X
Batch of arrays to calculate the distances from
y
Array to calculate the distance to
Returns
-------
Array of distances from each array in X to y
"""
X_dim = len(X.shape)
y_dim = len(y.shape)
if X_dim == y_dim:
assert y.shape[0] == 1, 'y must have batch size equal to 1'
else:
assert X.shape[1:] == y.shape, 'X and y must have matching shapes'
return np.abs(X - y).sum(axis=tuple(np.arange(1, X_dim))).reshape(X.shape[0], -1)
def mvdm(X: np.ndarray, y: np.ndarray, cat_vars: dict, alpha: int = 1) -> Dict[Any, np.ndarray]:
"""
Calculate the pair-wise distances between categories of a categorical variable using
the Modified Value Difference Measure based on Cost et al (1993).
https://link.springer.com/article/10.1023/A:1022664626993
Parameters
----------
X
Batch of arrays.
y
Batch of labels or predictions.
cat_vars
Dict with as keys the categorical columns and as optional values
the number of categories per categorical variable.
alpha
Power of absolute difference between conditional probabilities.
Returns
-------
Dict with as keys the categorical columns and as values the pairwise distance matrix for the variable.
"""
# TODO: handle triangular inequality
# infer number of categories per categorical variable
n_y = len(np.unique(y))
cat_cols = list(cat_vars.keys())
for col in cat_cols:
if cat_vars[col] is not None:
continue
cat_vars[col] = len(np.unique(X[:, col]))
# conditional probabilities and pairwise distance matrix
d_pair = {}
for col, n_cat in cat_vars.items():
d_pair_col = np.zeros([n_cat, n_cat])
p_cond_col = np.zeros([n_cat, n_y])
for i in range(n_cat):
idx = np.where(X[:, col] == i)[0]
for i_y in range(n_y):
p_cond_col[i, i_y] = np.sum(y[idx] == i_y) / (y[idx].shape[0] + 1e-12)
for i in range(n_cat):
j = 0
while j < i: # symmetrical matrix
d_pair_col[i, j] = np.sum(np.abs(p_cond_col[i, :] - p_cond_col[j, :]) ** alpha)
j += 1
d_pair_col += d_pair_col.T
d_pair[col] = d_pair_col
return d_pair
def abdm(X: np.ndarray, cat_vars: dict, cat_vars_bin: dict = dict()) -> dict:
"""
Calculate the pair-wise distances between categories of a categorical variable using
the Association-Based Distance Metric based on Le et al (2005).
http://www.jaist.ac.jp/~bao/papers/N26.pdf
Parameters
----------
X
Batch of arrays.
cat_vars
Dict with as keys the categorical columns and as optional values
the number of categories per categorical variable.
cat_vars_bin
Dict with as keys the binned numerical columns and as optional values
the number of bins per variable.
Returns
-------
Dict with as keys the categorical columns and as values the pairwise distance matrix for the variable.
"""
# TODO: handle triangular inequality
# ensure numerical stability
eps = 1e-12
# infer number of categories per categorical variable
cat_cols = list(cat_vars.keys())
for col in cat_cols:
if cat_vars[col] is not None:
continue
cat_vars[col] = len(np.unique(X[:, col]))
# combine dict for categorical with binned features
cat_vars_combined = {**cat_vars, **cat_vars_bin}
d_pair: Dict = {}
X_cat_eq: Dict = {}
for col, n_cat in cat_vars.items():
X_cat_eq[col] = []
for i in range(n_cat): # for each category in categorical variable, store instances of each category
idx = np.where(X[:, col] == i)[0]
X_cat_eq[col].append(X[idx, :])
# conditional probabilities, also use the binned numerical features
p_cond = []
for col_t, n_cat_t in cat_vars_combined.items():
if col == col_t:
continue
p_cond_t = np.zeros([n_cat_t, n_cat])
for i in range(n_cat_t):
for j, X_cat_j in enumerate(X_cat_eq[col]):
idx = np.where(X_cat_j[:, col_t] == i)[0]
p_cond_t[i, j] = len(idx) / (X_cat_j.shape[0] + eps)
p_cond.append(p_cond_t)
# pairwise distance matrix
d_pair_col = np.zeros([n_cat, n_cat])
for i in range(n_cat):
j = 0
while j < i:
d_ij_tmp = 0
for p in p_cond: # loop over other categorical variables
for t in range(p.shape[0]): # loop over categories of each categorical variable
a, b = p[t, i], p[t, j]
d_ij_t = a * np.log((a + eps) / (b + eps)) + b * np.log((b + eps) / (a + eps)) # KL divergence
d_ij_tmp += d_ij_t
d_pair_col[i, j] = d_ij_tmp
j += 1
d_pair_col += d_pair_col.T
d_pair[col] = d_pair_col
return d_pair
def multidim_scaling(d_pair: dict,
n_components: int = 2,
use_metric: bool = True,
standardize_cat_vars: bool = True,
feature_range: tuple = None,
smooth: float = 1.,
center: bool = True,
update_feature_range: bool = True) -> Tuple[dict, tuple]:
"""
Apply multidimensional scaling to pairwise distance matrices.
Parameters
----------
d_pair
Dict with as keys the column index of the categorical variables and as values
a pairwise distance matrix for the categories of the variable.
n_components
Number of dimensions in which to immerse the dissimilarities.
use_metric
If True, perform metric MDS; otherwise, perform nonmetric MDS.
standardize_cat_vars
Standardize numerical values of categorical variables if True.
feature_range
Tuple with min and max ranges to allow for perturbed instances. Min and max ranges can be floats or
numpy arrays with dimension (1 x nb of features) for feature-wise ranges.
smooth
Smoothing exponent between 0 and 1 for the distances. Lower values of l will smooth the difference in
distance metric between different features.
center
Whether to center the scaled distance measures. If False, the min distance for each feature
except for the feature with the highest raw max distance will be the lower bound of the
feature range, but the upper bound will be below the max feature range.
update_feature_range
Update feature range with scaled values.
Returns
-------
Dict with multidimensional scaled version of pairwise distance matrices.
"""
d_abs = {}
d_min, d_max = 1e10, 0
for k, v in d_pair.items():
# distance smoothening
v **= smooth
# fit multi-dimensional scaler
mds = MDS(n_components=n_components, max_iter=5000, eps=1e-9, random_state=0, n_init=4,
dissimilarity="precomputed", metric=use_metric)
d_fit = mds.fit(v)
emb = d_fit.embedding_ # coordinates in embedding space
# use biggest single observation Frobenius norm as origin
origin = np.argsort(np.linalg.norm(emb, axis=1))[-1]
# calculate distance from origin for each category
d_origin = np.linalg.norm(emb - emb[origin].reshape(1, -1), axis=1)
# assign to category
d_abs[k] = d_origin
# update min and max distance
d_min_k, d_max_k = d_origin.min(), d_origin.max()
d_min = d_min_k if d_min_k < d_min else d_min
d_max = d_max_k if d_max_k > d_max else d_max
d_abs_scaled = {}
new_feature_range = tuple([f.copy() for f in feature_range])
for k, v in d_abs.items():
if standardize_cat_vars: # scale numerical values for the category
d_scaled = (v - v.mean()) / (v.std() + 1e-12)
else: # scale by overall min and max
try:
rng = (feature_range[0][0, k], feature_range[1][0, k])
except TypeError:
raise TypeError('Feature-wise min and max ranges need to be specified.')
d_scaled = (v - d_min) / (d_max - d_min) * (rng[1] - rng[0]) + rng[0]
if center: # center the numerical feature values between the min and max feature range
d_scaled -= .5 * (d_scaled.max() + d_scaled.min())
if update_feature_range:
new_feature_range[0][0, k] = d_scaled.min()
new_feature_range[1][0, k] = d_scaled.max()
d_abs_scaled[k] = d_scaled # scaled distance from the origin for each category
if update_feature_range:
feature_range = new_feature_range
return d_abs_scaled, feature_range
| 10,244 | 35.589286 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/mapping.py
|
import numpy as np
from typing import Tuple, List
def ohe2ord_shape(shape: tuple, cat_vars: dict = None, is_ohe: bool = False) -> tuple:
"""
Infer shape of instance if the categorical variables have ordinal instead of on-hot encoding.
Parameters
----------
shape
Instance shape, starting with batch dimension.
cat_vars
Dict with as keys the categorical columns and as values
the number of categories per categorical variable.
is_ohe
Whether instance is OHE.
Returns
-------
Tuple with shape of instance with ordinal encoding of categorical variables.
"""
if not is_ohe:
return shape
else:
n_cols_ohe = 0
for _, v in cat_vars.items():
n_cols_ohe += v - 1
shape = (shape[0],) + (shape[-1] - n_cols_ohe,)
return shape
def ord2num(data: np.ndarray, dist: dict) -> np.ndarray:
"""
Transform categorical into numerical values using a mapping.
Parameters
----------
data
Numpy array with the categorical data.
dist
Dict with as keys the categorical variables and as values
the numerical value for each category.
Returns
-------
Numpy array with transformed categorical data into numerical values.
"""
rng = data.shape[0]
X = data.astype(np.float32, copy=True)
for k, v in dist.items():
cat_col = X[:, k].copy()
cat_col = np.array([v[int(cat_col[i])] for i in range(rng)])
if type(X) == np.matrix:
X[:, k] = cat_col.reshape(-1, 1)
else:
X[:, k] = cat_col
return X.astype(np.float32)
def num2ord(data: np.ndarray, dist: dict) -> np.ndarray:
"""
Transform numerical values into categories using the map calculated under the fit method.
Parameters
----------
data
Numpy array with the numerical data.
dist
Dict with as keys the categorical variables and as values
the numerical value for each category.
Returns
-------
Numpy array with transformed numerical data into categories.
"""
X = data.copy()
for k, v in dist.items():
num_col = np.repeat(X[:, k].reshape(-1, 1), v.shape[0], axis=1)
diff = np.abs(num_col - v.reshape(1, -1))
X[:, k] = np.argmin(diff, axis=1)
return X
def ord2ohe(X_ord: np.ndarray, cat_vars_ord: dict) -> Tuple[np.ndarray, dict]:
"""
Convert ordinal to one-hot encoded variables.
Parameters
----------
X_ord
Data with mixture of ordinal encoded and numerical variables.
cat_vars_ord
Dict with as keys the categorical columns and as values
the number of categories per categorical variable.
Returns
-------
One-hot equivalent of ordinal encoded data and dict with categorical columns and number of categories.
"""
n, cols = X_ord.shape
ord_vars_keys = list(cat_vars_ord.keys())
X_list = []
c = 0
k = 0
cat_vars_ohe = {}
while c < cols:
if c in ord_vars_keys:
v = cat_vars_ord[c]
X_ohe_c = np.zeros((n, v), dtype=np.float32)
X_ohe_c[np.arange(n), X_ord[:, c].astype(int)] = 1.
cat_vars_ohe[k] = v
k += v
X_list.append(X_ohe_c)
else:
X_list.append(X_ord[:, c].reshape(n, 1))
k += 1
c += 1
X_ohe = np.concatenate(X_list, axis=1)
return X_ohe, cat_vars_ohe
def ohe2ord(X_ohe: np.ndarray, cat_vars_ohe: dict) -> Tuple[np.ndarray, dict]:
"""
Convert one-hot encoded variables to ordinal encodings.
Parameters
----------
X_ohe
Data with mixture of one-hot encoded and numerical variables.
cat_vars_ohe
Dict with as keys the first column index for each one-hot encoded categorical variable
and as values the number of categories per categorical variable.
Returns
-------
Ordinal equivalent of one-hot encoded data and dict with categorical columns and number of categories.
"""
n, cols = X_ohe.shape
ohe_vars_keys = list(cat_vars_ohe.keys())
X_list: List = []
c = 0
cat_vars_ord = {}
while c < cols:
if c in ohe_vars_keys:
v = cat_vars_ohe[c]
X_ohe_c = X_ohe[:, c:c + v]
assert int(np.sum(X_ohe_c, axis=1).sum()) == n
X_ord_c = np.argmax(X_ohe_c, axis=1)
cat_vars_ord[len(X_list)] = v
X_list.append(X_ord_c.reshape(n, 1))
c += v
continue
X_list.append(X_ohe[:, c].reshape(n, 1))
c += 1
X_ord = np.concatenate(X_list, axis=1)
return X_ord, cat_vars_ord
| 4,684 | 28.651899 | 106 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/saving/saving.py
|
# This submodule provides a link for the legacy alibi_detect.utils.saving location of load_detector and save_detector.
# TODO: Remove in future
from alibi_detect.saving import load_detector as _load_detector, save_detector as _save_detector
from alibi_detect.base import ConfigurableDetector, Detector
from typing import Union
import warnings
import os
def save_detector(
detector: Union[Detector, ConfigurableDetector],
filepath: Union[str, os.PathLike], legacy: bool = False) -> None:
"""
Save outlier, drift or adversarial detector.
Parameters
----------
detector
Detector object.
filepath
Save directory.
legacy
Whether to save in the legacy .dill format instead of via a config.toml file. Default is `False`.
"""
warnings.warn("This function has been moved to alibi_detect.saving.save_detector()."
"This legacy link will be removed in a future version", DeprecationWarning)
return _save_detector(detector, filepath, legacy)
def load_detector(filepath: Union[str, os.PathLike], **kwargs) -> Union[Detector, ConfigurableDetector]:
"""
Load outlier, drift or adversarial detector.
Parameters
----------
filepath
Load directory.
Returns
-------
Loaded outlier or adversarial detector object.
"""
warnings.warn("This function has been moved to alibi_detect.saving.load_detector()."
"This legacy link will be removed in a future version", DeprecationWarning)
return _load_detector(filepath, **kwargs)
| 1,576 | 32.553191 | 118 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/saving/__init__.py
|
from alibi_detect.utils.saving.saving import save_detector, load_detector # noqa
| 82 | 40.5 | 81 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tests/test_data.py
|
from itertools import product
import numpy as np
import pytest
from alibi_detect.utils.data import create_outlier_batch, Bunch
N, F = 1000, 4
X = np.random.rand(N, F)
y = np.zeros(N,)
y[:int(.5 * N)] = 1
n_samples = [50, 100]
perc_outlier = [10, 50]
tests = list(product(n_samples, perc_outlier))
n_tests = len(tests)
@pytest.fixture
def batch_params(request):
return tests[request.param]
@pytest.mark.parametrize('batch_params', list(range(n_tests)), indirect=True)
def test_outlier_batch(batch_params):
n_samples, perc_outlier = batch_params
batch = create_outlier_batch(X, y, n_samples, perc_outlier)
assert isinstance(batch, Bunch)
assert batch.data.shape == (n_samples, F)
assert batch.target.shape == (n_samples,)
assert batch.target_names == ['normal', 'outlier']
assert int(100 * batch.target.sum() / n_samples) == perc_outlier
| 875 | 26.375 | 77 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tests/test_distance.py
|
import numpy as np
from scipy.spatial.distance import cityblock
from itertools import product
import pytest
from alibi_detect.utils.distance import pairwise_distance, abdm, cityblock_batch, mvdm, multidim_scaling
n_features = [2, 5]
n_instances = [(100, 100), (100, 75)]
tests_pairwise = list(product(n_features, n_instances))
n_tests_pairwise = len(tests_pairwise)
@pytest.fixture
def pairwise_params(request):
return tests_pairwise[request.param]
@pytest.mark.parametrize('pairwise_params', list(range(n_tests_pairwise)), indirect=True)
def test_pairwise(pairwise_params):
n_features, n_instances = pairwise_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
np.random.seed(0)
x = np.random.random(xshape).astype('float32')
y = np.random.random(yshape).astype('float32')
dist_xx = pairwise_distance(x, x)
dist_xy = pairwise_distance(x, y)
assert dist_xx.shape == (xshape[0], xshape[0])
assert dist_xy.shape == n_instances
assert dist_xx.trace() == 0.
dims = np.array([1, 10, 50])
shapes = list(product(dims, dims))
n_tests = len(dims) ** 2
@pytest.fixture
def random_matrix(request):
shape = shapes[request.param]
matrix = np.random.rand(*shape)
return matrix
@pytest.mark.parametrize('random_matrix', list(range(n_tests)), indirect=True)
def test_cityblock_batch(random_matrix):
X = random_matrix
y = X[np.random.choice(X.shape[0])]
batch_dists = cityblock_batch(X, y)
single_dists = np.array([cityblock(x, y) for x in X]).reshape(X.shape[0], -1)
assert np.allclose(batch_dists, single_dists)
n_cat = [2, 3, 4]
n_labels = [2, 3]
n_items = [20, 50, 100]
cols = [1, 5]
tests = list(product(n_cat, n_labels, n_items, cols))
n_tests = len(tests)
@pytest.fixture
def cats_and_labels(request):
cat, label, items, cols = tests[request.param]
cats = np.random.randint(0, cat, items * cols).reshape(-1, cols)
labels = np.random.randint(0, label, items).reshape(-1, 1)
return cats, labels
@pytest.mark.parametrize('cats_and_labels', list(range(n_tests)), indirect=True)
def test_abdm_mvdm(cats_and_labels):
X, y = cats_and_labels
n_cols = X.shape[1]
cat_vars = {i: None for i in range(n_cols)}
if n_cols > 1:
d_pair = abdm(X, cat_vars)
else:
d_pair = mvdm(X, y, cat_vars)
assert list(d_pair.keys()) == list(cat_vars.keys())
for k, v in d_pair.items():
assert v.shape == (cat_vars[k], cat_vars[k])
assert v.min() >= 0
Xy = (4, 2, 100, 5)
idx = np.where([t == Xy for t in tests])[0].item()
feature_range = ((np.ones((1, 5)) * -1).astype(np.float32),
(np.ones((1, 5))).astype(np.float32))
@pytest.mark.parametrize('cats_and_labels,rng,update_rng,center',
[(idx, feature_range, False, False),
(idx, feature_range, True, False),
(idx, feature_range, False, True),
(idx, feature_range, True, True)],
indirect=['cats_and_labels'])
def test_multidim_scaling(cats_and_labels, rng, update_rng, center):
# compute pairwise distance
X, y = cats_and_labels
n_cols = X.shape[1]
cat_vars = {i: None for i in range(n_cols)}
d_pair = abdm(X, cat_vars)
# apply multidimensional scaling
d_abs, new_rng = multidim_scaling(d_pair,
feature_range=rng,
update_feature_range=update_rng,
center=center
)
assert list(d_abs.keys()) == list(cat_vars.keys())
if update_rng:
assert (new_rng[0] != rng[0]).any()
assert (new_rng[1] != rng[1]).any()
else:
assert (new_rng[0] == rng[0]).all()
assert (new_rng[1] == rng[1]).all()
for k, v in d_abs.items():
assert v.shape[0] == d_pair[k].shape[0]
if center:
assert (v.max() + v.min()) - (rng[1][0, k] + rng[0][0, k]) < 1e-5
| 4,041 | 31.861789 | 104 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tests/test_saving_legacy.py
|
"""
Tests for saving/loading of detectors with legacy .dill state_dict. As legacy save/load functionality becomes
deprecated, these tests will be removed, and more tests will be added to test_saving.py.
"""
from alibi_detect.utils.missing_optional_dependency import MissingDependency
from functools import partial
import numpy as np
import pytest
from sklearn.model_selection import StratifiedKFold
from tempfile import TemporaryDirectory
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from typing import Callable
from alibi_detect.ad import AdversarialAE, ModelDistillation
from alibi_detect.cd import ChiSquareDrift, ClassifierDrift, KSDrift, MMDDrift, TabularDrift
from alibi_detect.cd.tensorflow import UAE, preprocess_drift
from alibi_detect.models.tensorflow.autoencoder import DecoderLSTM, EncoderLSTM
from alibi_detect.od import (IForest, LLR, Mahalanobis, OutlierAEGMM, OutlierVAE, OutlierVAEGMM,
OutlierProphet, SpectralResidual, OutlierSeq2Seq, OutlierAE)
from alibi_detect.saving import save_detector, load_detector
input_dim = 4
latent_dim = 2
n_gmm = 2
threshold = 10.
threshold_drift = .55
n_folds_drift = 5
samples = 6
seq_len = 10
p_val = .05
X_ref = np.random.rand(samples * input_dim).reshape(samples, input_dim)
X_ref_cat = np.tile(np.array([np.arange(samples)] * input_dim).T, (2, 1))
X_ref_mix = X_ref.copy()
X_ref_mix[:, 0] = np.tile(np.array(np.arange(samples // 2)), (1, 2)).T[:, 0]
n_permutations = 10
# define encoder and decoder
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(input_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(latent_dim, activation=None)
]
)
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(input_dim, activation=tf.nn.sigmoid)
]
)
kwargs = {'encoder_net': encoder_net,
'decoder_net': decoder_net}
preprocess_fn = partial(preprocess_drift, model=UAE(encoder_net=encoder_net))
gmm_density_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim + 2,)),
Dense(10, activation=tf.nn.relu),
Dense(n_gmm, activation=tf.nn.softmax)
]
)
threshold_net = tf.keras.Sequential(
[
InputLayer(input_shape=(seq_len, latent_dim)),
Dense(5, activation=tf.nn.relu)
]
)
# define model
inputs = tf.keras.Input(shape=(input_dim,))
outputs = tf.keras.layers.Dense(2, activation=tf.nn.softmax)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
detector = [
AdversarialAE(threshold=threshold,
model=model,
**kwargs),
ModelDistillation(threshold=threshold,
model=model,
distilled_model=model),
IForest(threshold=threshold),
LLR(threshold=threshold, model=model),
Mahalanobis(threshold=threshold),
OutlierAEGMM(threshold=threshold,
gmm_density_net=gmm_density_net,
n_gmm=n_gmm,
**kwargs),
OutlierVAE(threshold=threshold,
latent_dim=latent_dim,
samples=samples,
**kwargs),
OutlierAE(threshold=threshold,
**kwargs),
OutlierVAEGMM(threshold=threshold,
gmm_density_net=gmm_density_net,
n_gmm=n_gmm,
latent_dim=latent_dim,
samples=samples,
**kwargs),
SpectralResidual(threshold=threshold,
window_amp=10,
window_local=10),
OutlierSeq2Seq(input_dim,
seq_len,
threshold=threshold,
threshold_net=threshold_net,
latent_dim=latent_dim),
KSDrift(X_ref,
p_val=p_val,
preprocess_x_ref=False,
preprocess_fn=preprocess_fn),
MMDDrift(X_ref,
p_val=p_val,
preprocess_x_ref=False,
preprocess_fn=preprocess_fn,
configure_kernel_from_x_ref=True,
n_permutations=n_permutations),
ChiSquareDrift(X_ref_cat,
p_val=p_val,
preprocess_x_ref=True),
TabularDrift(X_ref_mix,
p_val=p_val,
categories_per_feature={0: None},
preprocess_x_ref=True),
ClassifierDrift(X_ref,
model=model,
p_val=p_val,
n_folds=n_folds_drift,
train_size=None)
]
if not isinstance(OutlierProphet, MissingDependency):
detector.append(
OutlierProphet(threshold=.7,
growth='logistic')
)
n_tests = len(detector)
@pytest.fixture
def select_detector(request):
return detector[request.param]
@pytest.mark.parametrize('select_detector', list(range(n_tests)), indirect=True)
def test_save_load(select_detector):
det = select_detector
det_name = det.meta['name']
with TemporaryDirectory() as temp_dir:
temp_dir += '/'
save_detector(det, temp_dir, legacy=True)
det_load = load_detector(temp_dir)
det_load_name = det_load.meta['name']
assert det_load_name == det_name
if not type(det_load) in [
OutlierProphet, ChiSquareDrift, ClassifierDrift, KSDrift, MMDDrift, TabularDrift
]:
assert det_load.threshold == det.threshold == threshold
if type(det_load) in [OutlierVAE, OutlierVAEGMM]:
assert det_load.samples == det.samples == samples
if type(det_load) == AdversarialAE or type(det_load) == ModelDistillation:
for layer in det_load.model.layers:
assert not layer.trainable
if type(det_load) == OutlierAEGMM:
assert isinstance(det_load.aegmm.encoder, tf.keras.Sequential)
assert isinstance(det_load.aegmm.decoder, tf.keras.Sequential)
assert isinstance(det_load.aegmm.gmm_density, tf.keras.Sequential)
assert isinstance(det_load.aegmm, tf.keras.Model)
assert det_load.aegmm.n_gmm == n_gmm
elif type(det_load) == OutlierVAEGMM:
assert isinstance(det_load.vaegmm.encoder.encoder_net, tf.keras.Sequential)
assert isinstance(det_load.vaegmm.decoder, tf.keras.Sequential)
assert isinstance(det_load.vaegmm.gmm_density, tf.keras.Sequential)
assert isinstance(det_load.vaegmm, tf.keras.Model)
assert det_load.vaegmm.latent_dim == latent_dim
assert det_load.vaegmm.n_gmm == n_gmm
elif type(det_load) in [AdversarialAE, OutlierAE]:
assert isinstance(det_load.ae.encoder.encoder_net, tf.keras.Sequential)
assert isinstance(det_load.ae.decoder.decoder_net, tf.keras.Sequential)
assert isinstance(det_load.ae, tf.keras.Model)
elif type(det_load) == ModelDistillation:
assert isinstance(det_load.model, tf.keras.Sequential) or isinstance(det_load.model, tf.keras.Model)
assert (isinstance(det_load.distilled_model, tf.keras.Sequential) or
isinstance(det_load.distilled_model, tf.keras.Model))
elif type(det_load) == OutlierVAE:
assert isinstance(det_load.vae.encoder.encoder_net, tf.keras.Sequential)
assert isinstance(det_load.vae.decoder.decoder_net, tf.keras.Sequential)
assert isinstance(det_load.vae, tf.keras.Model)
assert det_load.vae.latent_dim == latent_dim
elif type(det_load) == Mahalanobis:
assert det_load.clip is None
assert det_load.mean == det_load.C == det_load.n == 0
assert det_load.meta['detector_type'] == 'outlier'
assert det_load.meta['online']
elif type(det_load) == OutlierProphet:
assert det_load.model.interval_width == .7
assert det_load.model.growth == 'logistic'
assert det_load.meta['data_type'] == 'time-series'
elif type(det_load) == SpectralResidual:
assert det_load.window_amp == 10
assert det_load.window_local == 10
elif type(det_load) == OutlierSeq2Seq:
assert isinstance(det_load.seq2seq, tf.keras.Model)
assert isinstance(det_load.seq2seq.threshold_net, tf.keras.Sequential)
assert isinstance(det_load.seq2seq.encoder, EncoderLSTM)
assert isinstance(det_load.seq2seq.decoder, DecoderLSTM)
assert det_load.latent_dim == latent_dim
assert det_load.threshold == threshold
assert det_load.shape == (-1, seq_len, input_dim)
elif type(det_load) == KSDrift:
assert det_load.n_features == latent_dim
assert det_load.p_val == p_val
assert (det_load.x_ref == X_ref).all()
assert isinstance(det_load.preprocess_fn, Callable)
assert det_load.preprocess_fn.func.__name__ == 'preprocess_drift'
elif type(det_load) in [ChiSquareDrift, TabularDrift]:
assert isinstance(det_load.x_ref_categories, dict)
assert det_load.p_val == p_val
x = X_ref_cat.copy() if isinstance(det_load, ChiSquareDrift) else X_ref_mix.copy()
assert (det_load.x_ref == x).all()
elif type(det_load) == MMDDrift:
assert not det_load._detector.infer_sigma
assert det_load._detector.n_permutations == n_permutations
assert det_load._detector.p_val == p_val
assert (det_load._detector.x_ref == X_ref).all()
assert isinstance(det_load._detector.preprocess_fn, Callable)
assert det_load._detector.preprocess_fn.func.__name__ == 'preprocess_drift'
elif type(det_load) == ClassifierDrift:
assert det_load._detector.p_val == p_val
assert (det_load._detector.x_ref == X_ref).all()
assert isinstance(det_load._detector.skf, StratifiedKFold)
assert isinstance(det_load._detector.train_kwargs, dict)
assert isinstance(det_load._detector.model, tf.keras.Model)
elif type(det_load) == LLR:
assert isinstance(det_load.dist_s, tf.keras.Model)
assert isinstance(det_load.dist_b, tf.keras.Model)
assert not det_load.sequential
assert not det_load.has_log_prob
| 10,367 | 40.806452 | 112 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tests/conftest.py
|
import pytest
@pytest.fixture
def seed(pytestconfig):
"""
Returns the random seed set by pytest-randomly.
"""
return pytestconfig.getoption("randomly_seed")
| 175 | 16.6 | 51 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tests/test_mapping.py
|
import numpy as np
import alibi_detect.utils.mapping as mp
X_ohe = np.array([[0, 1, 0.1, 1, 0, 0.2]]).astype(np.float32)
shape_ohe = X_ohe.shape
cat_vars_ohe = {0: 2, 3: 2}
is_ohe = True
X_ord = np.array([[1., 0.1, 0., 0.2]]).astype(np.float32)
shape_ord = X_ord.shape
cat_vars_ord = {0: 2, 2: 2}
dist = {0: np.array([.3, .4]),
2: np.array([.5, .6, .7])}
X_num = np.array([[.4, .1, .5, .2]]).astype(np.float32)
def test_mapping_fn():
# ohe_to_ord_shape
assert mp.ohe2ord_shape(shape_ohe, cat_vars_ohe, is_ohe=True) == shape_ord
# ohe_to_ord
X_ohe_to_ord, cat_vars_ohe_to_ord = mp.ohe2ord(X_ohe, cat_vars_ohe)
assert (X_ohe_to_ord == X_ord).all() and cat_vars_ohe_to_ord == cat_vars_ord
# ord_to_ohe
X_ord_to_ohe, cat_vars_ord_to_ohe = mp.ord2ohe(X_ord, cat_vars_ord)
assert (X_ord_to_ohe == X_ohe).all() and cat_vars_ohe == cat_vars_ord_to_ohe
# ord_to_num
X_ord_to_num = mp.ord2num(X_ord, dist)
assert (X_num == X_ord_to_num).all()
# num_to_ord
X_num_to_ord = mp.num2ord(X_num, dist)
assert (X_ord == X_num_to_ord).all()
| 1,095 | 28.621622 | 80 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tests/test_sampling.py
|
from itertools import product
import numpy as np
import pytest
from alibi_detect.utils.sampling import reservoir_sampling
n_X_ref = [5, 10, 100]
n_X = [2, 5, 100]
reservoir_size = [10, 500]
n = [100, 1000]
n_features = 5
tests_sampling = list(product(n_X_ref, n_X, reservoir_size, n))
n_tests = len(tests_sampling)
@pytest.fixture
def update_sampling(request):
return tests_sampling[request.param]
@pytest.mark.parametrize('update_sampling', list(range(n_tests)), indirect=True)
def test_reservoir_sampling(update_sampling):
n_X_ref, n_X, reservoir_size, n = update_sampling
if n_X_ref > reservoir_size:
return
X_ref = np.random.rand(n_X_ref * n_features).reshape(n_X_ref, n_features)
X = np.random.rand(n_X * n_features).reshape(n_X, n_features)
X_reservoir = reservoir_sampling(X_ref, X, reservoir_size, n)
n_reservoir = X_reservoir.shape[0]
assert n_reservoir <= reservoir_size
if n_reservoir < reservoir_size:
assert n_reservoir == n_X_ref + n_X
assert (X_reservoir[:n_X_ref] == X_ref).all()
assert (X_reservoir[-n_X:] == X).all()
| 1,108 | 31.617647 | 80 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tests/test_discretize.py
|
from itertools import product
import numpy as np
import pytest
from alibi_detect.utils.discretizer import Discretizer
x = np.random.rand(10, 4)
n_features = x.shape[1]
feature_names = [str(_) for _ in range(n_features)]
categorical_features = [[], [1, 3]]
percentiles = [list(np.arange(25, 100, 25)), list(np.arange(10, 100, 10))]
tests = list(product(categorical_features, percentiles))
n_tests = len(tests)
@pytest.fixture
def cats_and_percentiles(request):
cat, perc = tests[request.param]
return cat, perc
@pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True)
def test_discretizer(cats_and_percentiles):
cat, perc = cats_and_percentiles
disc = Discretizer(x, cat, feature_names, perc)
to_disc = list(disc.names.keys())
assert len(to_disc) == (x.shape[1] - len(cat))
x_disc = disc.discretize(x)
for k, v in disc.names.items():
assert len(v) <= len(perc) + 1
assert callable(disc.lambdas[k])
assert (x_disc[:, k].min() == 0).all()
assert (x_disc[:, k].max() == len(perc)).all()
for i in range(x.shape[1]):
if i not in to_disc:
assert (x_disc[:, i] == x[:, i]).all()
| 1,195 | 30.473684 | 85 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tests/test_random.py
|
from alibi_detect.utils._random import set_seed, get_seed, fixed_seed
import numpy as np
import tensorflow as tf
import torch
def test_set_get_seed(seed):
"""
Tests the set_seed and get_seed fuctions.
"""
# Check initial seed within test is the one set by pytest-randomly
current_seed = get_seed()
assert current_seed == seed
# Set another seed and check
new_seed = seed + 42
set_seed(new_seed)
current_seed = get_seed()
assert current_seed == new_seed
def test_fixed_seed(seed):
"""
Tests the fixed_seed context manager.
"""
n = 5 # Length of random number sequences
nums0 = []
tmp_seed = seed + 42
with fixed_seed(tmp_seed):
# Generate a sequence of random numbers
for i in range(n):
nums0.append(np.random.normal([1]))
nums0.append(tf.random.normal([1]))
nums0.append(torch.normal(torch.tensor([1.0])))
# Check seed unchanged after RNG calls
assert get_seed() == tmp_seed
# Generate another sequence of random numbers with same seed, and check equal
nums1 = []
tmp_seed = seed + 42
with fixed_seed(tmp_seed):
for i in range(n):
nums1.append(np.random.normal([1]))
nums1.append(tf.random.normal([1]))
nums1.append(torch.normal(torch.tensor([1.0])))
assert nums0 == nums1
# Generate another sequence of random numbers with different seed, and check not equal
nums2 = []
tmp_seed = seed + 99
with fixed_seed(tmp_seed):
for i in range(n):
nums2.append(np.random.normal([1]))
nums2.append(tf.random.normal([1]))
nums2.append(torch.normal(torch.tensor([1.0])))
assert nums1 != nums2
# Check seeds were reset upon exit of context managers
assert get_seed() == seed
| 1,848 | 28.822581 | 90 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tests/test_backend_verify.py
|
import pytest
from alibi_detect.utils.missing_optional_dependency import ERROR_TYPES
from alibi_detect.utils.frameworks import BackendValidator, HAS_BACKEND
class TestBackendValidator:
"""Test the BackendValidator class."""
def setup_method(self):
# mock missing dependency error for non existent module
ERROR_TYPES['backend_B'] = 'optional-deps'
ERROR_TYPES['backend_E'] = 'backend_E'
ERROR_TYPES['backend_D'] = 'backend_D'
HAS_BACKEND['backend_A'] = True
HAS_BACKEND['backend_B'] = False
HAS_BACKEND['backend_C'] = True
HAS_BACKEND['backend_D'] = False
HAS_BACKEND['backend_E'] = False
def teardown_method(self):
# remove mock missing dependency error for other tests
del HAS_BACKEND['backend_A']
del HAS_BACKEND['backend_B']
del HAS_BACKEND['backend_C']
del HAS_BACKEND['backend_D']
del HAS_BACKEND['backend_E']
del ERROR_TYPES['backend_B']
del ERROR_TYPES['backend_E']
del ERROR_TYPES['backend_D']
def test_backend_verifier_error_msg(self):
"""Check correct error messages
Check correct error messages are issued when detectors are initialized with incorrect or missing dependencies.
"""
options = {
'backend_1': ['backend_A', 'backend_B'],
'backend_2': ['backend_C'],
'backend_3': ['backend_D', 'backend_E'],
}
# If backend not an option, ensure NotImplemented error is raised by BackendValidator
with pytest.raises(NotImplementedError) as err:
BackendValidator(backend_options=options, construct_name='test').verify_backend('test')
assert str(err.value) == ('test backend not implemented. Use one of `backend_1`, '
'`backend_2` and `backend_3` instead.')
# If backend is an option, but dependencies are missing ensure ImportError raised by BackendValidator
with pytest.raises(ImportError) as err:
BackendValidator(backend_options=options, construct_name='test').verify_backend('backend_1')
assert str(err.value) == ('`backend_B` not installed. Cannot initialize and run test with backend_1 backend.'
' The necessary missing dependencies can be installed using '
'`pip install alibi-detect[optional-deps]`.')
# If backend is an option, and dependencies are met no errors are raised
BackendValidator(backend_options=options, construct_name='test').verify_backend('backend_2')
# If backend is an option, but multiple dependencies are missing ensure ImportError raised by BackendValidator
# including error message listing missing dependencies.
with pytest.raises(ImportError) as err:
BackendValidator(backend_options=options, construct_name='test').verify_backend('backend_3')
assert ('`backend_D` and `backend_E` not installed. Cannot initialize and run test with backend_3 backend. '
'The necessary missing dependencies can be installed using `pip install '
'alibi-detect[backend_D backend_E]`.') == str(err.value)
| 3,234 | 47.283582 | 118 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tests/mocked_opt_dep.py
|
import non_existent_module # noqa: F401
class MockedClassMissingRequiredDeps:
def __init__(self):
self.opt_dep = "opt_dep"
def mocked_function_missing_required_deps():
pass
class MockedClassMissingMultipleRequiredDeps:
def __init__(self):
pass
| 279 | 16.5 | 45 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tests/test_import_optional.py
|
import pytest
from alibi_detect.utils.missing_optional_dependency import import_optional, MissingDependency, ERROR_TYPES
class TestImportOptional:
"""Test the import_optional function."""
def setup_method(self):
# mock missing dependency error for non existent module
ERROR_TYPES['non_existent_module'] = 'optional-deps'
def teardown_method(self):
# remove mock missing dependency error for other tests
del ERROR_TYPES['non_existent_module']
def test_import_optional_module(self):
"""Test import_optional correctly imports installed module."""
requests = import_optional('requests')
assert requests.__version__
def test_import_optional_names(self):
"""Test import_optional correctly imports names from installed module."""
from requests import get, post
get2, post2 = import_optional('requests', names=['get', 'post'])
assert get2 == get
assert get2.__name__ == 'get'
assert post2 == post
assert post2.__name__ == 'post'
def test_import_optional_module_missing(self):
"""Test import_optional correctly replaces module that doesn't exist with MissingDependency."""
package = import_optional('alibi_detect.utils.tests.mocked_opt_dep')
assert isinstance(package, MissingDependency)
with pytest.raises(ImportError) as err:
package.__version__ # noqa
assert 'alibi_detect.utils.tests.mocked_opt_dep' in str(err.value)
assert 'pip install alibi-detect[optional-deps]' in str(err.value)
with pytest.raises(ImportError) as err:
package(0, 'test') # noqa
assert 'alibi_detect.utils.tests.mocked_opt_dep' in str(err.value)
assert 'pip install alibi-detect[optional-deps]' in str(err.value)
def test_import_optional_names_missing(self):
"""Test import_optional correctly replaces names from module that doesn't exist with MissingDependencies."""
MockedClassMissingRequiredDeps, mocked_function_missing_required_deps = import_optional(
'alibi_detect.utils.tests.mocked_opt_dep',
names=['MockedClassMissingRequiredDeps', 'mocked_function_missing_required_deps'])
assert isinstance(MockedClassMissingRequiredDeps, MissingDependency)
with pytest.raises(ImportError) as err:
MockedClassMissingRequiredDeps.__version__ # noqa
assert 'MockedClassMissingRequiredDeps' in str(err.value)
assert 'pip install alibi-detect[optional-deps]' in str(err.value)
assert isinstance(mocked_function_missing_required_deps, MissingDependency)
with pytest.raises(ImportError) as err:
mocked_function_missing_required_deps.__version__ # noqa
assert 'mocked_function_missing_required_deps' in str(err.value)
assert 'pip install alibi-detect[optional-deps]' in str(err.value)
| 2,918 | 46.852459 | 116 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tests/test_perturbation.py
|
from functools import reduce
from itertools import product
from operator import mul
import numpy as np
import pytest
from alibi_detect.utils.data import Bunch
from alibi_detect.utils.perturbation import apply_mask, inject_outlier_ts
from alibi_detect.utils.tensorflow.perturbation import mutate_categorical
x = np.random.rand(20 * 20 * 3).reshape(1, 20, 20, 3)
mask_size = [(2, 2), (8, 8)]
n_masks = [1, 10]
channels = [[0], [0, 1, 2]]
mask_type = ['uniform', 'normal', 'zero']
clip_rng = [(0, 1), (.25, .75)]
tests = list(product(mask_size, n_masks, channels, mask_type, clip_rng))
n_tests = len(tests)
@pytest.fixture
def apply_mask_params(request):
mask_size, n_masks, channels, mask_type, clip_rng = tests[request.param]
return mask_size, n_masks, channels, mask_type, clip_rng
@pytest.mark.parametrize('apply_mask_params', list(range(n_tests)), indirect=True)
def test_apply_mask(apply_mask_params):
mask_size, n_masks, channels, mask_type, clip_rng = apply_mask_params
X_mask, mask = apply_mask(x,
mask_size=mask_size,
n_masks=n_masks,
channels=channels,
mask_type=mask_type,
clip_rng=clip_rng
)
assert X_mask.shape[0] == mask.shape[0] == n_masks
total_masked = n_masks * mask_size[0] * mask_size[1] * len(channels)
if mask_type == 'zero':
assert (mask == X_mask).astype(int).sum() == total_masked
else:
assert clip_rng[0] <= X_mask.min() and clip_rng[1] >= X_mask.max()
assert (X_mask == np.clip(x + mask, clip_rng[0], clip_rng[1])).astype(int).sum() \
== reduce(mul, list(x.shape)) * n_masks
N = 1000
x_ts = [np.random.rand(N).reshape(-1, 1), np.random.rand(3 * N).reshape(-1, 3)]
perc_outlier = [0, 10, 20]
min_std = [0, 1]
tests_ts = list(product(x_ts, perc_outlier, min_std))
n_tests_ts = len(tests_ts)
@pytest.fixture
def inject_outlier_ts_params(request):
return tests_ts[request.param]
@pytest.mark.parametrize('inject_outlier_ts_params', list(range(n_tests_ts)), indirect=True)
def test_inject_outlier_ts(inject_outlier_ts_params):
X, perc_outlier, min_std = inject_outlier_ts_params
data = inject_outlier_ts(X, perc_outlier, perc_window=10, n_std=2., min_std=min_std)
assert isinstance(data, Bunch)
X_outlier, is_outlier = data.data, data.target
assert X_outlier.shape[0] == N == is_outlier.shape[0]
assert perc_outlier - 5 < is_outlier.mean() * 100 < perc_outlier + 5
X_diff = (X_outlier != X).astype(int).sum(axis=1)
idx_diff = np.where(X_diff != 0)[0]
idx_outlier = np.where(is_outlier != 0)[0]
if perc_outlier > 0:
assert (idx_diff == idx_outlier).all()
else:
assert not idx_diff and not idx_outlier
rate = [0., .1, .2]
x_mutate = [np.zeros(10000), np.zeros((10, 10, 10, 1))]
feature_range = [(0, 1), (0, 2)]
tests_mutate = list(product(rate, x_mutate, feature_range))
n_tests_mutate = len(tests_mutate)
@pytest.fixture
def mutate_params(request):
return tests_mutate[request.param]
@pytest.mark.parametrize('mutate_params', list(range(n_tests_mutate)), indirect=True)
def test_mutate(mutate_params):
rate, x_mutate, feature_range = mutate_params
x_pert = mutate_categorical(x_mutate, rate, feature_range=feature_range).numpy()
delta = ((x_mutate - x_pert) != 0).astype(int)
eps = rate * .5
assert rate - eps <= delta.sum() / np.prod(x_mutate.shape) <= rate + eps
if rate > 0.:
assert x_pert.min() == feature_range[0] and x_pert.max() == feature_range[1]
| 3,642 | 35.79798 | 92 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tests/test_statstest.py
|
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from alibi_detect.utils.tensorflow import GaussianRBF, mmd2
from alibi_detect.utils.statstest import fdr, permutation_test
q_val = [.05, .1, .25]
n_p = 1000
p_vals = [
{'is_below': True, 'p_val': np.zeros(n_p)},
{'is_below': False, 'p_val': np.zeros(n_p)}
]
tests_fdr = list(product(q_val, p_vals))
n_tests_fdr = len(tests_fdr)
@pytest.fixture
def fdr_params(request):
return tests_fdr[request.param]
@pytest.mark.parametrize('fdr_params', list(range(n_tests_fdr)), indirect=True)
def test_fdr(fdr_params):
q_val, p_vals = fdr_params
if p_vals['is_below'] and p_vals['p_val'].max() == 0:
p_val = p_vals['p_val'] + q_val - 1e-5
elif not p_vals['is_below'] and p_vals['p_val'].max() == 0:
p_val = p_vals['p_val'] + q_val
else:
p_val = p_vals['p_val'].copy()
below_threshold, thresholds = fdr(p_val, q_val)
assert below_threshold == p_vals['is_below']
assert isinstance(thresholds, (np.ndarray, float))
n_features = [2]
n_instances = [(100, 100), (100, 75)]
n_permutations = [10]
mult = [1, 5]
tests_permutation = list(product(n_features, n_instances, n_permutations, mult))
n_tests_permutation = len(tests_permutation)
@pytest.fixture
def permutation_params(request):
return tests_permutation[request.param]
@pytest.mark.parametrize('permutation_params', list(range(n_tests_permutation)), indirect=True)
def test_permutation(permutation_params):
n_features, n_instances, n_permutations, mult = permutation_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
np.random.seed(0)
x = np.random.random(xshape).astype(np.float32)
y = np.random.random(yshape).astype(np.float32) * mult
def metric_fn(x, y):
return mmd2(x, y, kernel=GaussianRBF(sigma=tf.ones(1))).numpy()
p_val, dist, dist_permutations = permutation_test(
x, y, n_permutations=n_permutations, metric=metric_fn
)
if mult == 1:
assert p_val > .2
elif mult > 1:
assert p_val <= .2
assert np.where(dist_permutations >= dist)[0].shape[0] / n_permutations == p_val
| 2,191 | 30.768116 | 95 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/state/state.py
|
import os
from pathlib import Path
import logging
from abc import ABC
from typing import Union, Tuple
import numpy as np
from alibi_detect.utils.frameworks import Framework
from alibi_detect.utils.state._pytorch import save_state_dict as _save_state_dict_pt,\
load_state_dict as _load_state_dict_pt
logger = logging.getLogger(__name__)
class StateMixin(ABC):
"""
Utility class that provides methods to save and load stateful attributes to disk.
"""
t: int
online_state_keys: Tuple[str, ...]
def _set_state_dir(self, dirpath: Union[str, os.PathLike]):
"""
Set the directory path to store state in, and create an empty directory if it doesn't already exist.
Parameters
----------
dirpath
The directory to save state file inside.
"""
self.state_dir = Path(dirpath)
self.state_dir.mkdir(parents=True, exist_ok=True)
def save_state(self, filepath: Union[str, os.PathLike]):
"""
Save a detector's state to disk in order to generate a checkpoint.
Parameters
----------
filepath
The directory to save state to.
"""
self._set_state_dir(filepath)
suffix = '.pt' if hasattr(self, 'backend') and self.backend == Framework.PYTORCH else '.npz'
_save_state_dict(self, self.online_state_keys, self.state_dir.joinpath('state' + suffix))
logger.info('Saved state for t={} to {}'.format(self.t, self.state_dir))
def load_state(self, filepath: Union[str, os.PathLike]):
"""
Load the detector's state from disk, in order to restart from a checkpoint previously generated with
`save_state`.
Parameters
----------
filepath
The directory to load state from.
"""
self._set_state_dir(filepath)
suffix = '.pt' if hasattr(self, 'backend') and self.backend == Framework.PYTORCH else '.npz'
_load_state_dict(self, self.state_dir.joinpath('state' + suffix), raise_error=True)
logger.info('State loaded for t={} from {}'.format(self.t, self.state_dir))
def _save_state_dict(detector: StateMixin, keys: tuple, filepath: Path):
"""
Utility function to save a detector's state dictionary to a filepath.
Parameters
----------
detector
The detector to extract state attributes from.
keys
Tuple of state dict keys to populate dictionary with.
filepath
The file to save state dictionary to.
"""
# Construct state dictionary
state_dict = {key: getattr(detector, key, None) for key in keys}
# Save to disk
if filepath.suffix == '.pt':
_save_state_dict_pt(state_dict, filepath)
else:
np.savez(filepath, **state_dict)
def _load_state_dict(detector: StateMixin, filepath: Path, raise_error: bool = True):
"""
Utility function to load a detector's state dictionary from a filepath, and update the detectors attributes with
the values in the state dictionary.
Parameters
----------
detector
The detector to update.
filepath
File to load state dictionary from.
raise_error
Whether to raise an error if a file is not found at `filepath`. Otherwise, raise a warning and skip loading.
Returns
-------
None. The detector is updated inplace.
"""
if filepath.is_file():
if filepath.suffix == '.pt':
state_dict = _load_state_dict_pt(filepath)
else:
state_dict = np.load(str(filepath))
for key, value in state_dict.items():
setattr(detector, key, value)
else:
if raise_error:
raise FileNotFoundError('State file not found at {}.'.format(filepath))
else:
logger.warning('State file not found at {}. Skipping loading of state.'.format(filepath))
| 3,881 | 32.756522 | 116 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/state/__init__.py
|
from alibi_detect.utils.state.state import StateMixin, _save_state_dict, _load_state_dict
__all__ = [
"StateMixin",
"_save_state_dict",
"_load_state_dict",
]
| 171 | 20.5 | 89 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/state/_pytorch/state.py
|
"""
Submodule to handle saving and loading of detector state dictionaries when the dictionaries contain `torch.Tensor`'s.
"""
from pathlib import Path
import torch
def save_state_dict(state_dict: dict, filepath: Path):
"""
Utility function to save a detector's state dictionary to a filepath using `torch.save`.
Parameters
----------
state_dict
The state dictionary to save.
filepath
Directory to save state dictionary to.
"""
# Save to disk
torch.save(state_dict, filepath)
def load_state_dict(filepath: Path) -> dict:
"""
Utility function to load a detector's state dictionary from a filepath with `torch.load`.
Parameters
----------
filepath
Directory to load state dictionary from.
Returns
-------
The loaded state dictionary.
"""
return torch.load(filepath)
| 870 | 22.540541 | 117 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/state/_pytorch/__init__.py
|
from alibi_detect.utils.missing_optional_dependency import import_optional
save_state_dict, load_state_dict = import_optional(
'alibi_detect.utils.state._pytorch.state',
names=['save_state_dict', 'load_state_dict']
)
__all__ = [
"save_state_dict",
"load_state_dict",
]
| 288 | 21.230769 | 74 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/keops/kernels.py
|
from pykeops.torch import LazyTensor
import torch
import torch.nn as nn
from typing import Callable, Optional, Union
from alibi_detect.utils.frameworks import Framework
from alibi_detect.utils._types import Literal
from copy import deepcopy
def sigma_mean(x: LazyTensor, y: LazyTensor, dist: LazyTensor, n_min: int = 100) -> torch.Tensor:
"""
Set bandwidth to the mean distance between instances x and y.
Parameters
----------
x
LazyTensor of instances with dimension [Nx, 1, features] or [batch_size, Nx, 1, features].
The singleton dimension is necessary for broadcasting.
y
LazyTensor of instances with dimension [1, Ny, features] or [batch_size, 1, Ny, features].
The singleton dimension is necessary for broadcasting.
dist
LazyTensor with dimensions [Nx, Ny] or [batch_size, Nx, Ny] containing the
pairwise distances between `x` and `y`.
n_min
In order to check whether x equals y after squeezing the singleton dimensions, we check if the
diagonal of the distance matrix (which is a lazy tensor from which the diagonal cannot be directly extracted)
consists of all zeros. We do this by computing the k-min distances and k-argmin indices over the
columns of the distance matrix. We then check if the distances on the diagonal of the distance matrix
are all zero or not. If they are all zero, then we do not use these distances (zeros) when computing
the mean pairwise distance as bandwidth. If Nx becomes very large, it is advised to set `n_min`
to a low enough value to avoid OOM issues. By default we set it to 100 instances.
Returns
-------
The computed bandwidth, `sigma`.
"""
batched = len(dist.shape) == 3
if not batched:
nx, ny = dist.shape
axis = 1
else:
batch_size, nx, ny = dist.shape
axis = 2
n_mean = nx * ny
if nx == ny:
n_min = min(n_min, nx) if isinstance(n_min, int) else nx
d_min, id_min = dist.Kmin_argKmin(n_min, axis=axis)
if batched:
d_min, id_min = d_min[0], id_min[0] # first instance in permutation test contains the original data
rows, cols = torch.where(id_min.cpu() == torch.arange(nx)[:, None])
if (d_min[rows, cols] == 0.).all():
n_mean = nx * (nx - 1)
dist_sum = dist.sum(1).sum(1)[0] if batched else dist.sum(1).sum().unsqueeze(-1)
sigma = (.5 * dist_sum / n_mean) ** .5
return sigma
class GaussianRBF(nn.Module):
def __init__(
self,
sigma: Optional[torch.Tensor] = None,
init_sigma_fn: Optional[Callable] = None,
trainable: bool = False
) -> None:
"""
Gaussian RBF kernel: k(x,y) = exp(-(1/(2*sigma^2)||x-y||^2). A forward pass takes
a batch of instances x and y and returns the kernel matrix.
x can be of shape [Nx, 1, features] or [batch_size, Nx, 1, features].
y can be of shape [1, Ny, features] or [batch_size, 1, Ny, features].
The returned kernel matrix can be of shape [Nx, Ny] or [batch_size, Nx, Ny].
x, y and the returned kernel matrix are all lazy tensors.
Parameters
----------
sigma
Bandwidth used for the kernel. Needn't be specified if being inferred or trained.
Can pass multiple values to eval kernel with and then average.
init_sigma_fn
Function used to compute the bandwidth `sigma`. Used when `sigma` is to be inferred.
The function's signature should match :py:func:`~alibi_detect.utils.keops.kernels.sigma_mean`,
meaning that it should take in the lazy tensors `x`, `y` and `dist` and return a tensor `sigma`.
trainable
Whether or not to track gradients w.r.t. `sigma` to allow it to be trained.
"""
super().__init__()
init_sigma_fn = sigma_mean if init_sigma_fn is None else init_sigma_fn
self.config = {'sigma': sigma, 'trainable': trainable, 'init_sigma_fn': init_sigma_fn}
if sigma is None:
self.log_sigma = nn.Parameter(torch.empty(1), requires_grad=trainable)
self.init_required = True
else:
sigma = sigma.reshape(-1) # [Ns,]
self.log_sigma = nn.Parameter(sigma.log(), requires_grad=trainable)
self.init_required = False
self.init_sigma_fn = init_sigma_fn
self.trainable = trainable
@property
def sigma(self) -> torch.Tensor:
return self.log_sigma.exp()
def forward(self, x: LazyTensor, y: LazyTensor, infer_sigma: bool = False) -> LazyTensor:
dist = ((x - y) ** 2).sum(-1)
if infer_sigma or self.init_required:
if self.trainable and infer_sigma:
raise ValueError("Gradients cannot be computed w.r.t. an inferred sigma value")
sigma = self.init_sigma_fn(x, y, dist)
with torch.no_grad():
self.log_sigma.copy_(sigma.log().clone())
self.init_required = False
gamma = 1. / (2. * self.sigma ** 2)
gamma = LazyTensor(gamma[None, None, :]) if len(dist.shape) == 2 else LazyTensor(gamma[None, None, None, :])
kernel_mat = (- gamma * dist).exp()
if len(dist.shape) < len(gamma.shape):
kernel_mat = kernel_mat.sum(-1) / len(self.sigma)
return kernel_mat
def get_config(self) -> dict:
"""
Returns a serializable config dict (excluding the input_sigma_fn, which is serialized in alibi_detect.saving).
"""
cfg = deepcopy(self.config)
if isinstance(cfg['sigma'], torch.Tensor):
cfg['sigma'] = cfg['sigma'].detach().cpu().numpy().tolist()
cfg.update({'flavour': Framework.KEOPS.value})
return cfg
@classmethod
def from_config(cls, config):
"""
Instantiates a kernel from a config dictionary.
Parameters
----------
config
A kernel config dictionary.
"""
config.pop('flavour')
return cls(**config)
class DeepKernel(nn.Module):
def __init__(
self,
proj: nn.Module,
kernel_a: Union[nn.Module, Literal['rbf']] = 'rbf',
kernel_b: Optional[Union[nn.Module, Literal['rbf']]] = 'rbf',
eps: Union[float, Literal['trainable']] = 'trainable'
) -> None:
"""
Computes similarities as k(x,y) = (1-eps)*k_a(proj(x), proj(y)) + eps*k_b(x,y).
A forward pass takes an already projected batch of instances x_proj and y_proj and optionally
(if k_b is present) a batch of instances x and y and returns the kernel matrix.
x_proj can be of shape [Nx, 1, features_proj] or [batch_size, Nx, 1, features_proj].
y_proj can be of shape [1, Ny, features_proj] or [batch_size, 1, Ny, features_proj].
x can be of shape [Nx, 1, features] or [batch_size, Nx, 1, features].
y can be of shape [1, Ny, features] or [batch_size, 1, Ny, features].
The returned kernel matrix can be of shape [Nx, Ny] or [batch_size, Nx, Ny].
x, y and the returned kernel matrix are all lazy tensors.
Parameters
----------
proj
The projection to be applied to the inputs before applying kernel_a
kernel_a
The kernel to apply to the projected inputs. Defaults to a Gaussian RBF with trainable bandwidth.
kernel_b
The kernel to apply to the raw inputs. Defaults to a Gaussian RBF with trainable bandwidth.
Set to None in order to use only the deep component (i.e. eps=0).
eps
The proportion (in [0,1]) of weight to assign to the kernel applied to raw inputs. This can be
either specified or set to 'trainable'. Only relavent if kernel_b is not None.
"""
super().__init__()
self.config = {'proj': proj, 'kernel_a': kernel_a, 'kernel_b': kernel_b, 'eps': eps}
if kernel_a == 'rbf':
kernel_a = GaussianRBF(trainable=True)
if kernel_b == 'rbf':
kernel_b = GaussianRBF(trainable=True)
self.kernel_a: Callable = kernel_a
self.kernel_b: Callable = kernel_b
self.proj = proj
if kernel_b is not None:
self._init_eps(eps)
def _init_eps(self, eps: Union[float, Literal['trainable']]) -> None:
if isinstance(eps, float):
if not 0 < eps < 1:
raise ValueError("eps should be in (0,1)")
self.logit_eps = nn.Parameter(torch.tensor(eps).logit(), requires_grad=False)
elif eps == 'trainable':
self.logit_eps = nn.Parameter(torch.tensor(0.))
else:
raise NotImplementedError("eps should be 'trainable' or a float in (0,1)")
@property
def eps(self) -> torch.Tensor:
return self.logit_eps.sigmoid() if self.kernel_b is not None else torch.tensor(0.)
def forward(self, x_proj: LazyTensor, y_proj: LazyTensor, x: Optional[LazyTensor] = None,
y: Optional[LazyTensor] = None) -> LazyTensor:
similarity = self.kernel_a(x_proj, y_proj)
if self.kernel_b is not None:
similarity = (1-self.eps)*similarity + self.eps*self.kernel_b(x, y)
return similarity
def get_config(self) -> dict:
return deepcopy(self.config)
@classmethod
def from_config(cls, config):
return cls(**config)
| 9,448 | 42.543779 | 118 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/keops/__init__.py
|
from alibi_detect.utils.missing_optional_dependency import import_optional
GaussianRBF, DeepKernel = import_optional(
'alibi_detect.utils.keops.kernels',
names=['GaussianRBF', 'DeepKernel']
)
__all__ = [
"GaussianRBF",
"DeepKernel"
]
| 253 | 18.538462 | 74 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/keops/tests/test_kernels_keops.py
|
from itertools import product
import numpy as np
from alibi_detect.utils.frameworks import has_keops
import pytest
import torch
import torch.nn as nn
if has_keops:
from pykeops.torch import LazyTensor
from alibi_detect.utils.keops import DeepKernel, GaussianRBF
sigma = [None, np.array([1.]), np.array([1., 2.])]
n_features = [5, 10]
n_instances = [(100, 100), (100, 75)]
batch_size = [None, 5]
trainable = [True, False]
tests_gk = list(product(sigma, n_features, n_instances, batch_size, trainable))
n_tests_gk = len(tests_gk)
@pytest.fixture
def gaussian_kernel_params(request):
return tests_gk[request.param]
@pytest.mark.skipif(not has_keops, reason='Skipping since pykeops is not installed.')
@pytest.mark.parametrize('gaussian_kernel_params', list(range(n_tests_gk)), indirect=True)
def test_gaussian_kernel(gaussian_kernel_params):
sigma, n_features, n_instances, batch_size, trainable = gaussian_kernel_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
if batch_size:
xshape = (batch_size, ) + xshape
yshape = (batch_size, ) + yshape
sigma = sigma if sigma is None else torch.from_numpy(sigma).float()
x = torch.from_numpy(np.random.random(xshape)).float()
y = torch.from_numpy(np.random.random(yshape)).float()
if batch_size:
x_lazy, y_lazy = LazyTensor(x[:, :, None, :]), LazyTensor(y[:, None, :, :])
x_lazy2 = LazyTensor(x[:, None, :, :])
else:
x_lazy, y_lazy = LazyTensor(x[:, None, :]), LazyTensor(y[None, :, :])
x_lazy2 = LazyTensor(x[None, :, :])
kernel = GaussianRBF(sigma=sigma, trainable=trainable)
infer_sigma = True if sigma is None else False
if trainable and infer_sigma:
with pytest.raises(ValueError):
kernel(x_lazy, y_lazy, infer_sigma=infer_sigma)
else:
k_xy = kernel(x_lazy, y_lazy, infer_sigma=infer_sigma)
k_xx = kernel(x_lazy, x_lazy2, infer_sigma=infer_sigma)
k_xy_shape = n_instances
k_xx_shape = (n_instances[0], n_instances[0])
axis = 1
if batch_size:
k_xy_shape = (batch_size, ) + k_xy_shape
k_xx_shape = (batch_size, ) + k_xx_shape
axis = 2
assert k_xy.shape == k_xy_shape and k_xx.shape == k_xx_shape
k_xx_argmax = k_xx.argmax(axis=axis)
k_xx_min, k_xy_min = k_xx.min(axis=axis), k_xy.min(axis=axis)
if batch_size:
k_xx_argmax, k_xx_min, k_xy_min = k_xx_argmax[0], k_xx_min[0], k_xy_min[0]
assert (torch.arange(n_instances[0]) == k_xx_argmax.cpu().view(-1)).all()
assert (k_xx_min >= 0.).all() and (k_xy_min >= 0.).all()
if has_keops:
class MyKernel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: LazyTensor, y: LazyTensor) -> LazyTensor:
return (- ((x - y) ** 2).sum(-1)).exp()
n_features = [5]
n_instances = [(100, 100), (100, 75)]
kernel_a = ['GaussianRBF', 'MyKernel']
kernel_b = ['GaussianRBF', 'MyKernel', None]
eps = [0.5, 'trainable']
tests_dk = list(product(n_features, n_instances, kernel_a, kernel_b, eps))
n_tests_dk = len(tests_dk)
@pytest.fixture
def deep_kernel_params(request):
return tests_dk[request.param]
@pytest.mark.skipif(not has_keops, reason='Skipping since pykeops is not installed.')
@pytest.mark.parametrize('deep_kernel_params', list(range(n_tests_dk)), indirect=True)
def test_deep_kernel(deep_kernel_params):
n_features, n_instances, kernel_a, kernel_b, eps = deep_kernel_params
proj = nn.Linear(n_features, n_features)
kernel_a = MyKernel() if kernel_a == 'MyKernel' else GaussianRBF(trainable=True)
if kernel_b == 'MyKernel':
kernel_b = MyKernel()
elif kernel_b == 'GaussianRBF':
kernel_b = GaussianRBF(trainable=True)
kernel = DeepKernel(proj, kernel_a=kernel_a, kernel_b=kernel_b, eps=eps)
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
x = torch.as_tensor(np.random.random(xshape).astype('float32'))
y = torch.as_tensor(np.random.random(yshape).astype('float32'))
x_proj, y_proj = kernel.proj(x), kernel.proj(y)
x2_proj, x_proj = LazyTensor(x_proj[None, :, :]), LazyTensor(x_proj[:, None, :])
y2_proj, y_proj = LazyTensor(y_proj[None, :, :]), LazyTensor(y_proj[:, None, :])
if kernel_b:
x2, x = LazyTensor(x[None, :, :]), LazyTensor(x[:, None, :])
y2, y = LazyTensor(y[None, :, :]), LazyTensor(y[:, None, :])
else:
x, x2, y, y2 = None, None, None, None
k_xy = kernel(x_proj, y2_proj, x, y2)
k_yx = kernel(y_proj, x2_proj, y, x2)
k_xx = kernel(x_proj, x2_proj, x, x2)
assert k_xy.shape == n_instances and k_xx.shape == (xshape[0], xshape[0])
assert (k_xx.Kmin_argKmin(1, axis=1)[0] > 0.).all()
assert (torch.abs(k_xy.sum(1).sum(1) - k_yx.t().sum(1).sum(1)) < 1e-5).all()
| 4,889 | 39.081967 | 90 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/fetching/fetching.py
|
import logging
import os
from io import BytesIO
from pathlib import Path
from typing import Union, TYPE_CHECKING, Tuple
import dill
import requests
from requests import RequestException
import tensorflow as tf
from tensorflow.python.keras import backend
from alibi_detect.models.tensorflow import PixelCNN
from alibi_detect.saving import load_detector
if TYPE_CHECKING:
# Import the true objects directly for typechecking. (See note in CONTRIBUTING.md in Optional Dependencies section)
from alibi_detect.ad.adversarialae import AdversarialAE # noqa
from alibi_detect.ad.model_distillation import ModelDistillation # noqa
from alibi_detect.base import BaseDetector # noqa
from alibi_detect.od.llr import LLR # noqa
from alibi_detect.od.isolationforest import IForest # noqa
from alibi_detect.od.mahalanobis import Mahalanobis # noqa
from alibi_detect.od.aegmm import OutlierAEGMM # noqa
from alibi_detect.od.ae import OutlierAE # noqa
from alibi_detect.od.prophet import OutlierProphet # noqa
from alibi_detect.od.seq2seq import OutlierSeq2Seq # noqa
from alibi_detect.od.vae import OutlierVAE # noqa
from alibi_detect.od.vaegmm import OutlierVAEGMM # noqa
from alibi_detect.od.sr import SpectralResidual # noqa
from alibi_detect.utils.url import _join_url
# do not extend pickle dispatch table so as not to change pickle behaviour
dill.extend(use_dill=False)
logger = logging.getLogger(__name__)
Data = Union[
'BaseDetector',
'AdversarialAE',
'ModelDistillation',
'IForest',
'LLR',
'Mahalanobis',
'OutlierAEGMM',
'OutlierAE',
'OutlierProphet',
'OutlierSeq2Seq',
'OutlierVAE',
'OutlierVAEGMM',
'SpectralResidual'
]
"""Number of seconds to wait for URL requests before raising an error."""
TIMEOUT = 10
def get_pixelcnn_default_kwargs():
dist = PixelCNN(
image_shape=(28, 28, 1),
num_resnet=5,
num_hierarchies=2,
num_filters=32,
num_logistic_mix=1,
receptive_field_dims=(3, 3),
dropout_p=.3,
l2_weight=0.
)
KWARGS_PIXELCNN = {
'dist_s': dist,
'dist_b': dist.copy(),
'input_shape': (28, 28, 1)
}
return KWARGS_PIXELCNN
def fetch_tf_model(dataset: str, model: str) -> tf.keras.Model:
"""
Fetch pretrained tensorflow models from the google cloud bucket.
Parameters
----------
dataset
Dataset trained on.
model
Model name.
Returns
-------
Pretrained tensorflow model.
"""
url = 'https://storage.googleapis.com/seldon-models/alibi-detect/classifier/'
path_model = _join_url(url, [dataset, model, 'model.h5'])
save_path = tf.keras.utils.get_file(Path(model + '.h5').resolve(), path_model)
if dataset == 'cifar10' and model == 'resnet56':
custom_objects = {'backend': backend}
else:
custom_objects = None
clf = tf.keras.models.load_model(save_path, custom_objects=custom_objects)
return clf
def fetch_enc_dec(url: str, filepath: Union[str, os.PathLike]) -> None:
"""
Download encoder and decoder networks.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
"""
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
if not model_path.is_dir():
model_path.mkdir(parents=True, exist_ok=True)
# encoder and decoder
tf.keras.utils.get_file(
model_path.joinpath('encoder_net.h5'),
_join_url(url_models, 'encoder_net.h5')
)
tf.keras.utils.get_file(
model_path.joinpath('decoder_net.h5'),
_join_url(url_models, 'decoder_net.h5')
)
def fetch_ae(url: str, filepath: Union[str, os.PathLike]) -> None:
"""
Download AE outlier detector.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
"""
fetch_enc_dec(url, filepath)
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
# encoder and decoder
tf.keras.utils.get_file(
model_path.joinpath('checkpoint'),
_join_url(url_models, 'checkpoint')
)
tf.keras.utils.get_file(
model_path.joinpath('ae.ckpt.index'),
_join_url(url_models, 'ae.ckpt.index')
)
tf.keras.utils.get_file(
model_path.joinpath('ae.ckpt.data-00000-of-00001'),
_join_url(url_models, 'ae.ckpt.data-00000-of-00001')
)
def fetch_ad_ae(url: str, filepath: Union[str, os.PathLike], state_dict: dict) -> None:
"""
Download AE adversarial detector.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
state_dict
Dictionary containing the detector's parameters.
"""
fetch_enc_dec(url, filepath)
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
tf.keras.utils.get_file(
model_path.joinpath('model.h5'),
_join_url(url_models, 'model.h5')
)
tf.keras.utils.get_file(
model_path.joinpath('checkpoint'),
_join_url(url_models, 'checkpoint')
)
tf.keras.utils.get_file(
model_path.joinpath('ae.ckpt.index'),
_join_url(url_models, 'ae.ckpt.index')
)
tf.keras.utils.get_file(
model_path.joinpath('ae.ckpt.data-00000-of-00002'),
_join_url(url_models, 'ae.ckpt.data-00000-of-00002')
)
tf.keras.utils.get_file(
model_path.joinpath('ae.ckpt.data-00001-of-00002'),
_join_url(url_models, 'ae.ckpt.data-00001-of-00002')
)
hidden_layer_kld = state_dict['hidden_layer_kld']
if hidden_layer_kld:
for i, (_, _) in enumerate(hidden_layer_kld.items()):
hl = 'model_hl_' + str(i)
tf.keras.utils.get_file(
model_path.joinpath(hl + '.ckpt.index'),
_join_url(url_models, hl + '.ckpt.index')
)
tf.keras.utils.get_file(
model_path.joinpath(hl + '.ckpt.data-00000-of-00002'),
_join_url(url_models, hl + '.ckpt.data-00000-of-00002')
)
tf.keras.utils.get_file(
model_path.joinpath(hl + '.ckpt.data-00001-of-00002'),
_join_url(url_models, hl + '.ckpt.data-00001-of-00002')
)
def fetch_ad_md(url: str, filepath: Union[str, os.PathLike]) -> None:
"""
Download model and distilled model.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
"""
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
if not model_path.is_dir():
model_path.mkdir(parents=True, exist_ok=True)
# encoder and decoder
tf.keras.utils.get_file(
model_path.joinpath('model.h5'),
_join_url(url_models, 'model.h5')
)
tf.keras.utils.get_file(
model_path.joinpath('distilled_model.h5'),
_join_url(url_models, 'distilled_model.h5')
)
def fetch_aegmm(url: str, filepath: Union[str, os.PathLike]) -> None:
"""
Download AEGMM outlier detector.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
"""
# save encoder and decoder
fetch_enc_dec(url, filepath)
# save GMM network
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
tf.keras.utils.get_file(
model_path.joinpath('gmm_density_net.h5'),
_join_url(url_models, 'gmm_density_net.h5')
)
tf.keras.utils.get_file(
model_path.joinpath('checkpoint'),
_join_url(url_models, 'checkpoint')
)
tf.keras.utils.get_file(
model_path.joinpath('aegmm.ckpt.index'),
_join_url(url_models, 'aegmm.ckpt.index')
)
tf.keras.utils.get_file(
model_path.joinpath('aegmm.ckpt.data-00000-of-00001'),
_join_url(url_models, 'aegmm.ckpt.data-00000-of-00001')
)
def fetch_vae(url: str, filepath: Union[str, os.PathLike]) -> None:
"""
Download VAE outlier detector.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
"""
fetch_enc_dec(url, filepath)
# save VAE weights
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
tf.keras.utils.get_file(
model_path.joinpath('checkpoint'),
_join_url(url_models, 'checkpoint')
)
tf.keras.utils.get_file(
model_path.joinpath('vae.ckpt.index'),
_join_url(url_models, 'vae.ckpt.index')
)
tf.keras.utils.get_file(
model_path.joinpath('vae.ckpt.data-00000-of-00001'),
_join_url(url_models, 'vae.ckpt.data-00000-of-00001')
)
def fetch_vaegmm(url: str, filepath: Union[str, os.PathLike]) -> None:
"""
Download VAEGMM outlier detector.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
"""
# save encoder and decoder
fetch_enc_dec(url, filepath)
# save GMM network
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
tf.keras.utils.get_file(
model_path.joinpath('gmm_density_net.h5'),
_join_url(url_models, 'gmm_density_net.h5')
)
tf.keras.utils.get_file(
model_path.joinpath('checkpoint'),
_join_url(url_models, 'checkpoint')
)
tf.keras.utils.get_file(
model_path.joinpath('vaegmm.ckpt.index'),
_join_url(url_models, 'vaegmm.ckpt.index')
)
tf.keras.utils.get_file(
model_path.joinpath('vaegmm.ckpt.data-00000-of-00001'),
_join_url(url_models, 'vaegmm.ckpt.data-00000-of-00001')
)
def fetch_seq2seq(url: str, filepath: Union[str, os.PathLike]) -> None:
"""
Download sequence-to-sequence outlier detector.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
"""
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
if not model_path.is_dir():
model_path.mkdir(parents=True, exist_ok=True)
# save seq2seq
tf.keras.utils.get_file(
model_path.joinpath('checkpoint'),
_join_url(url_models, 'checkpoint')
)
tf.keras.utils.get_file(
model_path.joinpath('seq2seq.ckpt.index'),
_join_url(url_models, 'seq2seq.ckpt.index')
)
tf.keras.utils.get_file(
model_path.joinpath('seq2seq.ckpt.data-00000-of-00001'),
_join_url(url_models, 'seq2seq.ckpt.data-00000-of-00001')
)
# save threshold network
tf.keras.utils.get_file(
model_path.joinpath('threshold_net.h5'),
_join_url(url_models, 'threshold_net.h5')
)
def fetch_llr(url: str, filepath: Union[str, os.PathLike]) -> str:
"""
Download Likelihood Ratio outlier detector.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
"""
url_models = _join_url(url, 'model')
model_path = Path(filepath).joinpath('model').resolve()
if not model_path.is_dir():
model_path.mkdir(parents=True, exist_ok=True)
try:
tf.keras.utils.get_file(
model_path.joinpath('model_s.h5'),
_join_url(url_models, 'model_s.h5')
)
tf.keras.utils.get_file(
model_path.joinpath('model_b.h5'),
_join_url(url_models, 'model_b.h5')
)
model_type = 'weights'
return model_type
except Exception:
tf.keras.utils.get_file(
model_path.joinpath('model.h5'),
_join_url(url_models, 'model.h5')
)
tf.keras.utils.get_file(
model_path.joinpath('model_background.h5'),
_join_url(url_models, 'model_background.h5')
)
return 'model'
def fetch_state_dict(url: str, filepath: Union[str, os.PathLike],
save_state_dict: bool = True) -> Tuple[dict, dict]:
"""
Fetch the metadata and state/hyperparameter values of pre-trained detectors.
Parameters
----------
url
URL to fetch detector from.
filepath
Local directory to save detector to.
save_state_dict
Whether to save the state dict locally.
Returns
-------
Detector metadata and state.
"""
# Check if metadata stored as dill or pickle
try:
url_meta = _join_url(url, 'meta.dill')
resp = requests.get(url_meta, timeout=TIMEOUT)
resp.raise_for_status()
suffix = '.dill'
except RequestException:
try:
url_meta = _join_url(url, 'meta.pickle')
resp = requests.get(url_meta, timeout=TIMEOUT)
resp.raise_for_status()
suffix = '.pickle'
except RequestException:
logger.exception('Timed out while searching for meta.dill or meta.pickle files at %s.', url)
raise
# Load metadata and state_dict
meta = dill.load(BytesIO(resp.content))
try:
url_state = _join_url(url, meta['name'] + suffix)
resp = requests.get(url_state)
resp.raise_for_status()
except RequestException:
logger.exception('Timed out while searching for corresponding state file at %s.', url)
raise
state_dict = dill.load(BytesIO(resp.content))
# Save state
if save_state_dict:
filepath = Path(filepath)
with open(filepath.joinpath('meta.dill'), 'wb') as f:
dill.dump(meta, f)
with open(filepath.joinpath(meta['name'] + '.dill'), 'wb') as f:
dill.dump(state_dict, f)
return meta, state_dict
def fetch_detector(filepath: Union[str, os.PathLike],
detector_type: str,
dataset: str,
detector_name: str,
model: str = None) -> Data:
"""
Fetch an outlier or adversarial detector from a google bucket, save it locally and return
the initialised detector.
Parameters
----------
filepath
Local directory to save detector to.
detector_type
`outlier` or `adversarial`.
dataset
Dataset of pre-trained detector. E.g. `kddcup`, `cifar10` or `ecg`.
detector_name
Name of the detector in the bucket.
model
Classification model used for adversarial detection.
Returns
-------
Initialised pre-trained detector.
"""
# create path (if needed)
filepath = Path(filepath)
if not filepath.is_dir():
filepath.mkdir(parents=True, exist_ok=True)
logger.warning('Directory {} does not exist and is now created.'.format(filepath))
# create url of detector
url = 'https://storage.googleapis.com/seldon-models/alibi-detect/'
if detector_type == 'adversarial':
url = _join_url(url, ['ad', dataset, model, detector_name])
elif detector_type == 'outlier':
url = _join_url(url, ['od', detector_name, dataset])
# fetch the metadata and state dict
meta, state_dict = fetch_state_dict(url, filepath, save_state_dict=True)
# load detector
name = meta['name']
kwargs: dict = {}
if name == 'OutlierAE':
fetch_ae(url, filepath)
elif name == 'OutlierAEGMM':
fetch_aegmm(url, filepath)
elif name == 'OutlierVAE':
fetch_vae(url, filepath)
elif name == 'OutlierVAEGMM':
fetch_vaegmm(url, filepath)
elif name == 'OutlierSeq2Seq':
fetch_seq2seq(url, filepath)
elif name == 'AdversarialAE':
fetch_ad_ae(url, filepath, state_dict)
if model == 'resnet56':
kwargs = {'custom_objects': {'backend': backend}}
elif name == 'ModelDistillation':
fetch_ad_md(url, filepath)
if model == 'resnet56':
kwargs = {'custom_objects': {'backend': backend}}
elif name == 'LLR':
model_type = fetch_llr(url, filepath)
if model_type == 'weights':
kwargs = get_pixelcnn_default_kwargs()
detector = load_detector(filepath, **kwargs)
return detector # type: ignore[return-value] # load_detector returns drift detectors but `Data` doesn't inc. them
# TODO - above type ignore can be removed once all detectors use the config based approach.
| 16,646 | 30.115888 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/fetching/__init__.py
|
from alibi_detect.utils.missing_optional_dependency import import_optional
fetch_detector, fetch_tf_model = import_optional('alibi_detect.utils.fetching.fetching',
names=['fetch_detector', 'fetch_tf_model'])
__all__ = ['fetch_tf_model', 'fetch_detector']
| 306 | 42.857143 | 92 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/pytorch/losses.py
|
import torch
def hinge_loss(preds: torch.Tensor) -> torch.Tensor:
"L(pred) = max(0, 1-pred) averaged over multiple preds"
linear_inds = preds < 1
return (((1 - preds)*linear_inds).sum(0))/len(preds)
| 213 | 25.75 | 59 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/pytorch/kernels.py
|
import numpy as np
import torch
from torch import nn
from . import distance
from typing import Optional, Union, Callable
from alibi_detect.utils.frameworks import Framework
def sigma_median(x: torch.Tensor, y: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
"""
Bandwidth estimation using the median heuristic :cite:t:`Gretton2012`.
Parameters
----------
x
Tensor of instances with dimension [Nx, features].
y
Tensor of instances with dimension [Ny, features].
dist
Tensor with dimensions [Nx, Ny], containing the pairwise distances between `x` and `y`.
Returns
-------
The computed bandwidth, `sigma`.
"""
n = min(x.shape[0], y.shape[0])
n = n if (x[:n] == y[:n]).all() and x.shape == y.shape else 0
n_median = n + (np.prod(dist.shape) - n) // 2 - 1
sigma = (.5 * dist.flatten().sort().values[int(n_median)].unsqueeze(dim=-1)) ** .5
return sigma
class GaussianRBF(nn.Module):
def __init__(
self,
sigma: Optional[torch.Tensor] = None,
init_sigma_fn: Optional[Callable] = None,
trainable: bool = False
) -> None:
"""
Gaussian RBF kernel: k(x,y) = exp(-(1/(2*sigma^2)||x-y||^2). A forward pass takes
a batch of instances x [Nx, features] and y [Ny, features] and returns the kernel
matrix [Nx, Ny].
Parameters
----------
sigma
Bandwidth used for the kernel. Needn't be specified if being inferred or trained.
Can pass multiple values to eval kernel with and then average.
init_sigma_fn
Function used to compute the bandwidth `sigma`. Used when `sigma` is to be inferred.
The function's signature should match :py:func:`~alibi_detect.utils.pytorch.kernels.sigma_median`,
meaning that it should take in the tensors `x`, `y` and `dist` and return `sigma`. If `None`, it is set to
:func:`~alibi_detect.utils.pytorch.kernels.sigma_median`.
trainable
Whether or not to track gradients w.r.t. `sigma` to allow it to be trained.
"""
super().__init__()
init_sigma_fn = sigma_median if init_sigma_fn is None else init_sigma_fn
self.config = {'sigma': sigma, 'trainable': trainable, 'init_sigma_fn': init_sigma_fn}
if sigma is None:
self.log_sigma = nn.Parameter(torch.empty(1), requires_grad=trainable)
self.init_required = True
else:
sigma = sigma.reshape(-1) # [Ns,]
self.log_sigma = nn.Parameter(sigma.log(), requires_grad=trainable)
self.init_required = False
self.init_sigma_fn = init_sigma_fn
self.trainable = trainable
@property
def sigma(self) -> torch.Tensor:
return self.log_sigma.exp()
def forward(self, x: Union[np.ndarray, torch.Tensor], y: Union[np.ndarray, torch.Tensor],
infer_sigma: bool = False) -> torch.Tensor:
x, y = torch.as_tensor(x), torch.as_tensor(y)
dist = distance.squared_pairwise_distance(x.flatten(1), y.flatten(1)) # [Nx, Ny]
if infer_sigma or self.init_required:
if self.trainable and infer_sigma:
raise ValueError("Gradients cannot be computed w.r.t. an inferred sigma value")
sigma = self.init_sigma_fn(x, y, dist)
with torch.no_grad():
self.log_sigma.copy_(sigma.log().clone())
self.init_required = False
gamma = 1. / (2. * self.sigma ** 2) # [Ns,]
# TODO: do matrix multiplication after all?
kernel_mat = torch.exp(- torch.cat([(g * dist)[None, :, :] for g in gamma], dim=0)) # [Ns, Nx, Ny]
return kernel_mat.mean(dim=0) # [Nx, Ny]
def get_config(self) -> dict:
"""
Returns a serializable config dict (excluding the input_sigma_fn, which is serialized in alibi_detect.saving).
"""
cfg = self.config.copy()
if isinstance(cfg['sigma'], torch.Tensor):
cfg['sigma'] = cfg['sigma'].detach().cpu().numpy().tolist()
cfg.update({'flavour': Framework.PYTORCH.value})
return cfg
@classmethod
def from_config(cls, config):
"""
Instantiates a kernel from a config dictionary.
Parameters
----------
config
A kernel config dictionary.
"""
config.pop('flavour')
return cls(**config)
class DeepKernel(nn.Module):
"""
Computes similarities as k(x,y) = (1-eps)*k_a(proj(x), proj(y)) + eps*k_b(x,y).
A forward pass takes a batch of instances x [Nx, features] and y [Ny, features] and returns
the kernel matrix [Nx, Ny].
Parameters
----------
proj
The projection to be applied to the inputs before applying kernel_a
kernel_a
The kernel to apply to the projected inputs. Defaults to a Gaussian RBF with trainable bandwidth.
kernel_b
The kernel to apply to the raw inputs. Defaults to a Gaussian RBF with trainable bandwidth.
Set to None in order to use only the deep component (i.e. eps=0).
eps
The proportion (in [0,1]) of weight to assign to the kernel applied to raw inputs. This can be
either specified or set to 'trainable'. Only relavent if kernel_b is not None.
"""
def __init__(
self,
proj: nn.Module,
kernel_a: Union[nn.Module, str] = 'rbf',
kernel_b: Optional[Union[nn.Module, str]] = 'rbf',
eps: Union[float, str] = 'trainable'
) -> None:
super().__init__()
self.config = {'proj': proj, 'kernel_a': kernel_a, 'kernel_b': kernel_b, 'eps': eps}
if kernel_a == 'rbf':
kernel_a = GaussianRBF(trainable=True)
if kernel_b == 'rbf':
kernel_b = GaussianRBF(trainable=True)
self.kernel_a = kernel_a
self.kernel_b = kernel_b
self.proj = proj
if kernel_b is not None:
self._init_eps(eps)
def _init_eps(self, eps: Union[float, str]) -> None:
if isinstance(eps, float):
if not 0 < eps < 1:
raise ValueError("eps should be in (0,1)")
self.logit_eps = nn.Parameter(torch.tensor(eps).logit(), requires_grad=False)
elif eps == 'trainable':
self.logit_eps = nn.Parameter(torch.tensor(0.))
else:
raise NotImplementedError("eps should be 'trainable' or a float in (0,1)")
@property
def eps(self) -> torch.Tensor:
return self.logit_eps.sigmoid() if self.kernel_b is not None else torch.tensor(0.)
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
similarity = self.kernel_a(self.proj(x), self.proj(y)) # type: ignore[operator]
if self.kernel_b is not None:
similarity = (1-self.eps)*similarity + self.eps*self.kernel_b(x, y) # type: ignore[operator]
return similarity
def get_config(self) -> dict:
return self.config.copy()
@classmethod
def from_config(cls, config):
return cls(**config)
| 7,088 | 37.737705 | 118 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/pytorch/prediction.py
|
from functools import partial
from typing import Callable, Optional, Type, Union
import numpy as np
import torch
import torch.nn as nn
from alibi_detect.utils.pytorch.misc import get_device
from alibi_detect.utils.prediction import tokenize_transformer
def predict_batch(x: Union[list, np.ndarray, torch.Tensor], model: Union[Callable, nn.Module, nn.Sequential],
device: Optional[torch.device] = None, batch_size: int = int(1e10), preprocess_fn: Callable = None,
dtype: Union[Type[np.generic], torch.dtype] = np.float32) -> Union[np.ndarray, torch.Tensor, tuple]:
"""
Make batch predictions on a model.
Parameters
----------
x
Batch of instances.
model
PyTorch model.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either torch.device('cuda') or torch.device('cpu').
batch_size
Batch size used during prediction.
preprocess_fn
Optional preprocessing function for each batch.
dtype
Model output type, e.g. np.float32 or torch.float32.
Returns
-------
Numpy array, torch tensor or tuples of those with model outputs.
"""
device = get_device(device)
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
n = len(x)
n_minibatch = int(np.ceil(n / batch_size))
return_np = not isinstance(dtype, torch.dtype)
return_list = False
preds: Union[list, tuple] = []
with torch.no_grad():
for i in range(n_minibatch):
istart, istop = i * batch_size, min((i + 1) * batch_size, n)
x_batch = x[istart:istop]
if isinstance(preprocess_fn, Callable): # type: ignore
x_batch = preprocess_fn(x_batch)
preds_tmp = model(x_batch.to(device)) # type: ignore
if isinstance(preds_tmp, (list, tuple)):
if len(preds) == 0: # init tuple with lists to store predictions
preds = tuple([] for _ in range(len(preds_tmp)))
return_list = isinstance(preds_tmp, list)
for j, p in enumerate(preds_tmp):
if device.type == 'cuda' and isinstance(p, torch.Tensor):
p = p.cpu()
preds[j].append(p if not return_np or isinstance(p, np.ndarray) else p.numpy())
elif isinstance(preds_tmp, (np.ndarray, torch.Tensor)):
if device.type == 'cuda' and isinstance(preds_tmp, torch.Tensor):
preds_tmp = preds_tmp.cpu()
preds.append(preds_tmp if not return_np or isinstance(preds_tmp, np.ndarray) # type: ignore
else preds_tmp.numpy())
else:
raise TypeError(f'Model output type {type(preds_tmp)} not supported. The model output '
f'type needs to be one of list, tuple, np.ndarray or torch.Tensor.')
concat = partial(np.concatenate, axis=0) if return_np else partial(torch.cat, dim=0) # type: ignore[arg-type]
out: Union[tuple, np.ndarray, torch.Tensor] = tuple(concat(p) for p in preds) if isinstance(preds, tuple) \
else concat(preds)
if return_list:
out = list(out) # type: ignore[assignment]
return out # TODO: update return type with list
def predict_batch_transformer(x: Union[list, np.ndarray], model: Union[nn.Module, nn.Sequential],
tokenizer: Callable, max_len: int, device: Optional[torch.device] = None,
batch_size: int = int(1e10), dtype: Union[Type[np.generic], torch.dtype] = np.float32) \
-> Union[np.ndarray, torch.Tensor, tuple]:
"""
Make batch predictions using a transformers tokenizer and model.
Parameters
----------
x
Batch of instances.
model
PyTorch model.
tokenizer
Tokenizer for model.
max_len
Max sequence length for tokens.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either torch.device('cuda') or torch.device('cpu').
batch_size
Batch size used during prediction.
dtype
Model output type, e.g. np.float32 or torch.float32.
Returns
-------
Numpy array or torch tensor with model outputs.
"""
preprocess_fn = partial(tokenize_transformer, tokenizer=tokenizer, max_len=max_len, backend='pt')
return predict_batch(x, model, device=device, preprocess_fn=preprocess_fn, batch_size=batch_size, dtype=dtype)
| 4,639 | 42.364486 | 118 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/pytorch/misc.py
|
import logging
from typing import Optional, Union, Type
import torch
logger = logging.getLogger(__name__)
def zero_diag(mat: torch.Tensor) -> torch.Tensor:
"""
Set the diagonal of a matrix to 0
Parameters
----------
mat
A 2D square matrix
Returns
-------
A 2D square matrix with zeros along the diagonal
"""
return mat - torch.diag(mat.diag())
def quantile(sample: torch.Tensor, p: float, type: int = 7, sorted: bool = False) -> float:
"""
Estimate a desired quantile of a univariate distribution from a vector of samples
Parameters
----------
sample
A 1D vector of values
p
The desired quantile in (0,1)
type
The method for computing the quantile.
See https://wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample
sorted
Whether or not the vector is already sorted into ascending order
Returns
-------
An estimate of the quantile
"""
N = len(sample)
if len(sample.shape) != 1:
raise ValueError("Quantile estimation only supports vectors of univariate samples.")
if not 1/N <= p <= (N-1)/N:
raise ValueError(f"The {p}-quantile should not be estimated using only {N} samples.")
sorted_sample = sample if sorted else sample.sort().values
if type == 6:
h = (N+1)*p
elif type == 7:
h = (N-1)*p + 1
elif type == 8:
h = (N+1/3)*p + 1/3
h_floor = int(h)
quantile = sorted_sample[h_floor-1]
if h_floor != h:
quantile += (h - h_floor)*(sorted_sample[h_floor]-sorted_sample[h_floor-1])
return float(quantile)
def get_device(device: Optional[Union[str, torch.device]] = None) -> torch.device:
"""
Instantiates a PyTorch device object.
Parameters
----------
device
Either `None`, a str ('gpu' or 'cpu') indicating the device to choose, or an already instantiated device
object. If `None`, the GPU is selected if it is detected, otherwise the CPU is used as a fallback.
Returns
-------
The instantiated device object.
"""
if isinstance(device, torch.device): # Already a torch device
return device
else: # Instantiate device
if device is None or device.lower() in ['gpu', 'cuda']:
torch_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if torch_device.type == 'cpu':
logger.warning('No GPU detected, fall back on CPU.')
else:
torch_device = torch.device('cpu')
if device.lower() != 'cpu':
logger.warning('Requested device not recognised, fall back on CPU.')
return torch_device
def get_optimizer(name: str = 'Adam') -> Type[torch.optim.Optimizer]:
"""
Get an optimizer class from its name.
Parameters
----------
name
Name of the optimizer.
Returns
-------
The optimizer class.
"""
optimizer = getattr(torch.optim, name, None)
if optimizer is None:
raise NotImplementedError(f"Optimizer {name} not implemented.")
return optimizer
| 3,133 | 26.017241 | 112 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/pytorch/data.py
|
import numpy as np
import torch
from typing import Tuple, Union
Indexable = Union[np.ndarray, torch.Tensor, list]
class TorchDataset(torch.utils.data.Dataset):
def __init__(self, *indexables: Union[Tuple[Indexable, ...], Indexable]) -> None:
self.indexables = indexables
def __getitem__(self, idx: int) -> Union[Tuple[Indexable, ...], Indexable]:
output = tuple(indexable[idx] for indexable in self.indexables)
return output if len(output) > 1 else output[0]
def __len__(self) -> int:
return len(self.indexables[0])
| 565 | 30.444444 | 85 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/pytorch/__init__.py
|
from alibi_detect.utils.missing_optional_dependency import import_optional
TorchDataset = import_optional(
'alibi_detect.utils.pytorch.data',
names=['TorchDataset']
)
mmd2, mmd2_from_kernel_matrix, squared_pairwise_distance, permed_lsdds, batch_compute_kernel_matrix = import_optional(
'alibi_detect.utils.pytorch.distance',
names=['mmd2', 'mmd2_from_kernel_matrix', 'squared_pairwise_distance',
'permed_lsdds', 'batch_compute_kernel_matrix']
)
GaussianRBF, DeepKernel = import_optional(
'alibi_detect.utils.pytorch.kernels',
names=['GaussianRBF', 'DeepKernel']
)
predict_batch, predict_batch_transformer = import_optional(
'alibi_detect.utils.pytorch.prediction',
names=['predict_batch', 'predict_batch_transformer']
)
get_device, quantile, zero_diag = import_optional(
'alibi_detect.utils.pytorch.misc',
names=['get_device', 'quantile', 'zero_diag']
)
__all__ = [
"batch_compute_kernel_matrix",
"mmd2",
"mmd2_from_kernel_matrix",
"squared_pairwise_distance",
"GaussianRBF",
"DeepKernel",
"permed_lsdds",
"predict_batch",
"predict_batch_transformer",
"get_device",
"quantile",
"zero_diag",
"TorchDataset"
]
| 1,218 | 26.088889 | 118 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/pytorch/distance.py
|
import logging
import torch
from torch import nn
import numpy as np
from typing import Callable, List, Tuple, Optional, Union
logger = logging.getLogger(__name__)
@torch.jit.script
def squared_pairwise_distance(x: torch.Tensor, y: torch.Tensor, a_min: float = 1e-30) -> torch.Tensor:
"""
PyTorch pairwise squared Euclidean distance between samples x and y.
Parameters
----------
x
Batch of instances of shape [Nx, features].
y
Batch of instances of shape [Ny, features].
a_min
Lower bound to clip distance values.
Returns
-------
Pairwise squared Euclidean distance [Nx, Ny].
"""
x2 = x.pow(2).sum(dim=-1, keepdim=True)
y2 = y.pow(2).sum(dim=-1, keepdim=True)
dist = torch.addmm(y2.transpose(-2, -1), x, y.transpose(-2, -1), alpha=-2).add_(x2)
return dist.clamp_min_(a_min)
def batch_compute_kernel_matrix(
x: Union[list, np.ndarray, torch.Tensor],
y: Union[list, np.ndarray, torch.Tensor],
kernel: Union[nn.Module, nn.Sequential],
device: torch.device = None,
batch_size: int = int(1e10),
preprocess_fn: Callable[..., torch.Tensor] = None,
) -> torch.Tensor:
"""
Compute the kernel matrix between x and y by filling in blocks of size
batch_size x batch_size at a time.
Parameters
----------
x
Reference set.
y
Test set.
kernel
PyTorch module.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either torch.device('cuda') or torch.device('cpu').
batch_size
Batch size used during prediction.
preprocess_fn
Optional preprocessing function for each batch.
Returns
-------
Kernel matrix in the form of a torch tensor
"""
if device is None:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if type(x) != type(y):
raise ValueError("x and y should be of the same type")
if isinstance(x, np.ndarray):
x, y = torch.from_numpy(x), torch.from_numpy(y)
n_x, n_y = len(x), len(y)
n_batch_x, n_batch_y = int(np.ceil(n_x / batch_size)), int(np.ceil(n_y / batch_size))
with torch.no_grad():
k_is: List[torch.Tensor] = []
for i in range(n_batch_x):
istart, istop = i * batch_size, min((i + 1) * batch_size, n_x)
x_batch = x[istart:istop]
if preprocess_fn is not None:
x_batch = preprocess_fn(x_batch)
x_batch = x_batch.to(device) # type: ignore
k_ijs: List[torch.Tensor] = []
for j in range(n_batch_y):
jstart, jstop = j * batch_size, min((j + 1) * batch_size, n_y)
y_batch = y[jstart:jstop]
if preprocess_fn is not None:
y_batch = preprocess_fn(y_batch)
y_batch = y_batch.to(device) # type: ignore
k_ijs.append(kernel(x_batch, y_batch).cpu())
k_is.append(torch.cat(k_ijs, 1))
k_mat = torch.cat(k_is, 0)
return k_mat
def mmd2_from_kernel_matrix(kernel_mat: torch.Tensor, m: int, permute: bool = False,
zero_diag: bool = True) -> torch.Tensor:
"""
Compute maximum mean discrepancy (MMD^2) between 2 samples x and y from the
full kernel matrix between the samples.
Parameters
----------
kernel_mat
Kernel matrix between samples x and y.
m
Number of instances in y.
permute
Whether to permute the row indices. Used for permutation tests.
zero_diag
Whether to zero out the diagonal of the kernel matrix.
Returns
-------
MMD^2 between the samples from the kernel matrix.
"""
n = kernel_mat.shape[0] - m
if zero_diag:
kernel_mat = kernel_mat - torch.diag(kernel_mat.diag())
if permute:
idx = torch.randperm(kernel_mat.shape[0])
kernel_mat = kernel_mat[idx][:, idx]
k_xx, k_yy, k_xy = kernel_mat[:-m, :-m], kernel_mat[-m:, -m:], kernel_mat[-m:, :-m]
c_xx, c_yy = 1 / (n * (n - 1)), 1 / (m * (m - 1))
mmd2 = c_xx * k_xx.sum() + c_yy * k_yy.sum() - 2. * k_xy.mean()
return mmd2
def mmd2(x: torch.Tensor, y: torch.Tensor, kernel: Callable) -> float:
"""
Compute MMD^2 between 2 samples.
Parameters
----------
x
Batch of instances of shape [Nx, features].
y
Batch of instances of shape [Ny, features].
kernel
Kernel function.
Returns
-------
MMD^2 between the samples x and y.
"""
n, m = x.shape[0], y.shape[0]
c_xx, c_yy = 1 / (n * (n - 1)), 1 / (m * (m - 1))
k_xx, k_yy, k_xy = kernel(x, x), kernel(y, y), kernel(x, y)
return c_xx * (k_xx.sum() - k_xx.trace()) + c_yy * (k_yy.sum() - k_yy.trace()) - 2. * k_xy.mean()
def permed_lsdds(
k_all_c: torch.Tensor,
x_perms: List[torch.Tensor],
y_perms: List[torch.Tensor],
H: torch.Tensor,
H_lam_inv: Optional[torch.Tensor] = None,
lam_rd_max: float = 0.2,
return_unpermed: bool = False,
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""
Compute LSDD estimates from kernel matrix across various ref and test window samples
Parameters
----------
k_all_c
Kernel matrix of similarities between all samples and the kernel centers.
x_perms
List of B reference window index vectors
y_perms
List of B test window index vectors
H
Special (scaled) kernel matrix of similarities between kernel centers
H_lam_inv
Function of H corresponding to a particular regulariation parameter lambda.
See Eqn 11 of Bu et al. (2017)
lam_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2. Only relavent if H_lam_inv is not supplied.
return_unpermed
Whether or not to return value corresponding to unpermed order defined by k_all_c
Returns
-------
Vector of B LSDD estimates for each permutation, H_lam_inv which may have been inferred, and optionally \
the unpermed LSDD estimate.
"""
# Compute (for each bootstrap) the average distance to each kernel center (Eqn 7)
k_xc_perms = torch.stack([k_all_c[x_inds] for x_inds in x_perms], 0)
k_yc_perms = torch.stack([k_all_c[y_inds] for y_inds in y_perms], 0)
h_perms = k_xc_perms.mean(1) - k_yc_perms.mean(1)
if H_lam_inv is None:
# We perform the initialisation for multiple candidate lambda values and pick the largest
# one for which the relative difference (RD) between two difference estimates is below lambda_rd_max.
# See Appendix A
candidate_lambdas = [1/(4**i) for i in range(10)] # TODO: More principled selection
H_plus_lams = torch.stack(
[H+torch.eye(H.shape[0], device=H.device)*can_lam for can_lam in candidate_lambdas], 0
)
H_plus_lam_invs = torch.inverse(H_plus_lams)
H_plus_lam_invs = H_plus_lam_invs.permute(1, 2, 0) # put lambdas in final axis
omegas = torch.einsum('jkl,bk->bjl', H_plus_lam_invs, h_perms) # (Eqn 8)
h_omegas = torch.einsum('bj,bjl->bl', h_perms, omegas)
omega_H_omegas = torch.einsum('bkl,bkl->bl', torch.einsum('bjl,jk->bkl', omegas, H), omegas)
rds = (1 - (omega_H_omegas/h_omegas)).mean(0)
less_than_rd_inds = (rds < lam_rd_max).nonzero()
if len(less_than_rd_inds) == 0:
repeats = k_all_c.shape[0] - torch.unique(k_all_c, dim=0).shape[0]
if repeats > 0:
msg = "Too many repeat instances for LSDD-based detection. \
Try using MMD-based detection instead"
else:
msg = "Unknown error. Try using MMD-based detection instead"
raise ValueError(msg)
lam_index = less_than_rd_inds[0]
lam = candidate_lambdas[lam_index]
logger.info(f"Using lambda value of {lam:.2g} with RD of {float(rds[lam_index]):.2g}")
H_plus_lam_inv = H_plus_lam_invs[:, :, lam_index.item()]
H_lam_inv = 2*H_plus_lam_inv - (H_plus_lam_inv.transpose(0, 1) @ H @ H_plus_lam_inv) # (below Eqn 11)
# Now to compute an LSDD estimate for each permutation
lsdd_perms = (h_perms * (H_lam_inv @ h_perms.transpose(0, 1)).transpose(0, 1)).sum(-1) # (Eqn 11)
if return_unpermed:
n_x = x_perms[0].shape[0]
h = k_all_c[:n_x].mean(0) - k_all_c[n_x:].mean(0)
lsdd_unpermed = (h[None, :] * (H_lam_inv @ h[:, None]).transpose(0, 1)).sum()
return lsdd_perms, H_lam_inv, lsdd_unpermed
else:
return lsdd_perms, H_lam_inv
| 8,767 | 36.470085 | 110 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/pytorch/tests/test_data_pt.py
|
import numpy as np
import pytest
from alibi_detect.utils.pytorch.data import TorchDataset
# test on numpy array and list
n, f = 100, 5
shape = (n, f)
tests_ds = [list, np.ndarray]
n_tests_ds = len(tests_ds)
@pytest.fixture
def ds_params(request):
return tests_ds[request.param]
@pytest.mark.parametrize('ds_params', list(range(n_tests_ds)), indirect=True)
def test_torchdataset(ds_params):
xtype = ds_params
x = np.random.randn(*shape)
y = np.random.randn(*(n,))
if xtype == list:
x = list(x)
ds = TorchDataset(x, y)
for step, data in enumerate(ds):
pass
assert data[0].shape == (f,) and data[1].shape == ()
assert step == len(ds) - 1
| 692 | 22.896552 | 77 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/pytorch/tests/test_distance_pt.py
|
import numpy as np
from itertools import product
import pytest
import torch
from alibi_detect.utils.pytorch import GaussianRBF, mmd2, mmd2_from_kernel_matrix, permed_lsdds
from alibi_detect.utils.pytorch import squared_pairwise_distance, batch_compute_kernel_matrix
n_features = [2, 5]
n_instances = [(100, 100), (100, 75)]
tests_pairwise = list(product(n_features, n_instances))
n_tests_pairwise = len(tests_pairwise)
@pytest.fixture
def pairwise_params(request):
return tests_pairwise[request.param]
@pytest.mark.parametrize('pairwise_params', list(range(n_tests_pairwise)), indirect=True)
def test_pairwise(pairwise_params):
n_features, n_instances = pairwise_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
np.random.seed(0)
x = torch.from_numpy(np.random.random(xshape).astype('float32'))
y = torch.from_numpy(np.random.random(yshape).astype('float32'))
dist_xx = squared_pairwise_distance(x, x).numpy()
dist_xy = squared_pairwise_distance(x, y).numpy()
assert dist_xx.shape == (xshape[0], xshape[0])
assert dist_xy.shape == n_instances
np.testing.assert_almost_equal(dist_xx.trace(), 0., decimal=5)
tests_mmd = tests_pairwise
n_tests_mmd = n_tests_pairwise
@pytest.fixture
def mmd_params(request):
return tests_mmd[request.param]
@pytest.mark.parametrize('mmd_params', list(range(n_tests_mmd)), indirect=True)
def test_mmd(mmd_params):
n_features, n_instances = mmd_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
np.random.seed(0)
x = torch.from_numpy(np.random.random(xshape).astype('float32'))
y = torch.from_numpy(np.random.random(yshape).astype('float32'))
mmd_xx = mmd2(x, x, kernel=GaussianRBF(sigma=torch.ones(1)))
mmd_xy = mmd2(x, y, kernel=GaussianRBF(sigma=torch.ones(1)))
assert mmd_xy > mmd_xx
n_features = [2, 5]
n_instances = [(100, 100), (100, 75)]
batch_size = [1, 5]
tests_bckm = list(product(n_features, n_instances, batch_size))
n_tests_bckm = len(tests_bckm)
@pytest.fixture
def bckm_params(request):
return tests_bckm[request.param]
@pytest.mark.parametrize('bckm_params', list(range(n_tests_bckm)), indirect=True)
def test_bckm(bckm_params):
n_features, n_instances, batch_size = bckm_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
np.random.seed(0)
x = torch.from_numpy(np.random.random(xshape).astype('float32'))
y = torch.from_numpy(np.random.random(yshape).astype('float32'))
kernel = GaussianRBF(sigma=torch.tensor(1.))
kernel_mat = kernel(x, y).detach().numpy()
bc_kernel_mat = batch_compute_kernel_matrix(x, y, kernel, batch_size=batch_size).detach().numpy()
np.testing.assert_almost_equal(kernel_mat, bc_kernel_mat, decimal=6)
n = [10, 100]
m = [10, 100]
permute = [True, False]
zero_diag = [True, False]
tests_mmd_from_kernel_matrix = list(product(n, m, permute, zero_diag))
n_tests_mmd_from_kernel_matrix = len(tests_mmd_from_kernel_matrix)
@pytest.fixture
def mmd_from_kernel_matrix_params(request):
return tests_mmd_from_kernel_matrix[request.param]
@pytest.mark.parametrize('mmd_from_kernel_matrix_params',
list(range(n_tests_mmd_from_kernel_matrix)), indirect=True)
def test_mmd_from_kernel_matrix(mmd_from_kernel_matrix_params):
n, m, permute, zero_diag = mmd_from_kernel_matrix_params
n_tot = n + m
shape = (n_tot, n_tot)
kernel_mat = np.random.uniform(0, 1, size=shape)
kernel_mat_2 = kernel_mat.copy()
kernel_mat_2[-m:, :-m] = 1.
kernel_mat_2[:-m, -m:] = 1.
kernel_mat = torch.from_numpy(kernel_mat)
kernel_mat_2 = torch.from_numpy(kernel_mat_2)
if not zero_diag:
kernel_mat -= torch.diag(kernel_mat.diag())
kernel_mat_2 -= torch.diag(kernel_mat_2.diag())
mmd = mmd2_from_kernel_matrix(kernel_mat, m, permute=permute, zero_diag=zero_diag)
mmd_2 = mmd2_from_kernel_matrix(kernel_mat_2, m, permute=permute, zero_diag=zero_diag)
if not permute:
assert mmd_2.numpy() < mmd.numpy()
n = [10]
m = [10]
d = [3]
B = [20]
n_kcs = [5]
tests_permed_lsdds = list(product(n, m, d, B, n_kcs))
n_tests_permed_lsdds = len(tests_permed_lsdds)
@pytest.fixture
def permed_lsdds_params(request):
return tests_permed_lsdds[request.param]
@pytest.mark.parametrize('permed_lsdds_params',
list(range(n_tests_permed_lsdds)), indirect=True)
def test_permed_lsdds(permed_lsdds_params):
n, m, d, B, n_kcs = permed_lsdds_params
kcs = torch.randn(n_kcs, d)
x_ref = torch.randn(n, d)
x_cur = 10 + 0.2*torch.randn(m, d)
x_full = torch.cat([x_ref, x_cur], axis=0)
sigma = torch.tensor((1.,))
k_all_c = GaussianRBF(sigma)(x_full, kcs)
H = GaussianRBF(np.sqrt(2.)*sigma)(kcs, kcs)
perms = [torch.randperm(n+m) for _ in range(B)]
x_perms = [perm[:n] for perm in perms]
y_perms = [perm[n:] for perm in perms]
lsdd_perms, H_lam_inv, lsdd_unpermed = permed_lsdds(
k_all_c, x_perms, y_perms, H, return_unpermed=True
)
assert int((lsdd_perms > lsdd_unpermed).sum()) == 0
assert H_lam_inv.shape == (n_kcs, n_kcs)
| 5,181 | 32.869281 | 101 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/pytorch/tests/test_misc_pt.py
|
from itertools import product
import pytest
import torch
import numpy as np
from alibi_detect.utils.pytorch import zero_diag, quantile
def test_zero_diag():
ones = torch.ones(10, 10)
ones_zd = zero_diag(ones)
assert ones_zd.shape == (10, 10)
assert float(ones_zd.trace()) == 0
assert float(ones_zd.sum()) == 90
type = [6, 7, 8]
sorted = [True, False]
tests_quantile = list(product(type, sorted))
n_tests_quantile = len(tests_quantile)
@pytest.fixture
def quantile_params(request):
return tests_quantile[request.param]
@pytest.mark.parametrize('quantile_params', list(range(n_tests_quantile)), indirect=True)
def test_quantile(quantile_params):
type, sorted = quantile_params
sample = (0.5+torch.arange(1e6))/1e6
if not sorted:
sample = sample[torch.randperm(len(sample))]
np.testing.assert_almost_equal(quantile(sample, 0.001, type=type, sorted=sorted), 0.001, decimal=6)
np.testing.assert_almost_equal(quantile(sample, 0.999, type=type, sorted=sorted), 0.999, decimal=6)
assert quantile(torch.ones(100), 0.42, type=type, sorted=sorted) == 1
with pytest.raises(ValueError):
quantile(torch.ones(10), 0.999, type=type, sorted=sorted)
with pytest.raises(ValueError):
quantile(torch.ones(100, 100), 0.5, type=type, sorted=sorted)
| 1,317 | 29.651163 | 103 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/pytorch/tests/test_prediction_pt.py
|
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Tuple, Union
from alibi_detect.utils.pytorch import predict_batch
n, n_features, n_classes, latent_dim = 100, 10, 5, 2
x = np.zeros((n, n_features), dtype=np.float32)
class MyModel(nn.Module):
def __init__(self, multi_out: bool = False):
super(MyModel, self).__init__()
self.dense = nn.Linear(n_features, n_classes)
self.multi_out = multi_out
def forward(self, x: torch.Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
out = self.dense(x)
if not self.multi_out:
return out
else:
return out, out
AutoEncoder = nn.Sequential(
nn.Linear(n_features, latent_dim),
nn.Linear(latent_dim, n_features)
)
def id_fn(x: Union[np.ndarray, torch.Tensor, list]) -> Union[np.ndarray, torch.Tensor]:
if isinstance(x, list):
return torch.from_numpy(np.concatenate(x, axis=0))
else:
return x
# model, batch size, dtype, preprocessing function, list as input
tests_predict = [
(MyModel(multi_out=False), 2, np.float32, None, False),
(MyModel(multi_out=False), int(1e10), np.float32, None, False),
(MyModel(multi_out=False), int(1e10), torch.float32, None, False),
(MyModel(multi_out=True), int(1e10), torch.float32, None, False),
(MyModel(multi_out=False), int(1e10), np.float32, id_fn, False),
(AutoEncoder, 2, np.float32, None, False),
(AutoEncoder, int(1e10), np.float32, None, False),
(AutoEncoder, int(1e10), torch.float32, None, False),
(id_fn, 2, np.float32, None, False),
(id_fn, 2, torch.float32, None, False),
(id_fn, 2, np.float32, id_fn, True),
]
n_tests = len(tests_predict)
@pytest.fixture
def predict_batch_params(request):
return tests_predict[request.param]
@pytest.mark.parametrize('predict_batch_params', list(range(n_tests)), indirect=True)
def test_predict_batch(predict_batch_params):
model, batch_size, dtype, preprocess_fn, to_list = predict_batch_params
x_batch = [x] if to_list else x
preds = predict_batch(x_batch, model, batch_size=batch_size, preprocess_fn=preprocess_fn, dtype=dtype)
if isinstance(preds, tuple):
preds = preds[0]
assert preds.dtype == dtype
if isinstance(model, nn.Sequential) or hasattr(model, '__name__') and model.__name__ == 'id_fn':
assert preds.shape == x.shape
elif isinstance(model, nn.Module):
assert preds.shape == (n, n_classes)
| 2,498 | 33.232877 | 106 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/pytorch/tests/test_kernels_pt.py
|
from itertools import product
import numpy as np
import pytest
import torch
from torch import nn
from alibi_detect.utils.pytorch import GaussianRBF, DeepKernel
sigma = [None, np.array([1.]), np.array([1., 2.])]
n_features = [5, 10]
n_instances = [(100, 100), (100, 75)]
trainable = [True, False]
tests_gk = list(product(sigma, n_features, n_instances, trainable))
n_tests_gk = len(tests_gk)
@pytest.fixture
def gaussian_kernel_params(request):
return tests_gk[request.param]
@pytest.mark.parametrize('gaussian_kernel_params', list(range(n_tests_gk)), indirect=True)
def test_gaussian_kernel(gaussian_kernel_params):
sigma, n_features, n_instances, trainable = gaussian_kernel_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
sigma = sigma if sigma is None else torch.from_numpy(sigma)
x = torch.from_numpy(np.random.random(xshape)).float()
y = torch.from_numpy(np.random.random(yshape)).float()
kernel = GaussianRBF(sigma=sigma, trainable=trainable)
infer_sigma = True if sigma is None else False
if trainable and infer_sigma:
with pytest.raises(Exception):
kernel(x, y, infer_sigma=infer_sigma)
else:
k_xy = kernel(x, y, infer_sigma=infer_sigma).detach().numpy()
k_xx = kernel(x, x, infer_sigma=infer_sigma).detach().numpy()
assert k_xy.shape == n_instances and k_xx.shape == (xshape[0], xshape[0])
np.testing.assert_almost_equal(k_xx.trace(), xshape[0], decimal=4)
assert (k_xx > 0.).all() and (k_xy > 0.).all()
class MyKernel(nn.Module): # TODO: Support then test models using keras functional API
def __init__(self, n_features: int):
super().__init__()
self.linear = nn.Linear(n_features, 20)
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return torch.einsum('ji,ki->jk', self.linear(x), self.linear(y))
n_features = [5, 10]
n_instances = [(100, 100), (100, 75)]
kernel_a = [GaussianRBF(trainable=True), MyKernel]
kernel_b = [GaussianRBF(trainable=True), MyKernel, None]
eps = [0.5, 'trainable']
tests_dk = list(product(n_features, n_instances, kernel_a, kernel_b, eps))
n_tests_dk = len(tests_dk)
@pytest.fixture
def deep_kernel_params(request):
return tests_dk[request.param]
@pytest.mark.parametrize('deep_kernel_params', list(range(n_tests_dk)), indirect=True)
def test_deep_kernel(deep_kernel_params):
n_features, n_instances, kernel_a, kernel_b, eps = deep_kernel_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
x = torch.as_tensor(np.random.random(xshape).astype('float32'))
y = torch.as_tensor(np.random.random(yshape).astype('float32'))
proj = nn.Linear(n_features, n_features)
kernel_a = kernel_a(n_features) if kernel_a == MyKernel else kernel_a
kernel_b = kernel_b(n_features) if kernel_b == MyKernel else kernel_b
kernel = DeepKernel(proj, kernel_a=kernel_a, kernel_b=kernel_b, eps=eps)
k_xy = kernel(x, y).detach().numpy()
k_yx = kernel(y, x).detach().numpy()
k_xx = kernel(x, x).detach().numpy()
assert k_xy.shape == n_instances and k_xx.shape == (xshape[0], xshape[0])
assert (np.diag(k_xx) > 0.).all()
np.testing.assert_almost_equal(k_xy, np.transpose(k_yx), decimal=5)
| 3,294 | 38.22619 | 90 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tensorflow/kernels.py
|
import tensorflow as tf
import numpy as np
from . import distance
from typing import Optional, Union, Callable
from scipy.special import logit
from alibi_detect.utils.frameworks import Framework
def sigma_median(x: tf.Tensor, y: tf.Tensor, dist: tf.Tensor) -> tf.Tensor:
"""
Bandwidth estimation using the median heuristic :cite:t:`Gretton2012`.
Parameters
----------
x
Tensor of instances with dimension [Nx, features].
y
Tensor of instances with dimension [Ny, features].
dist
Tensor with dimensions [Nx, Ny], containing the pairwise distances between `x` and `y`.
Returns
-------
The computed bandwidth, `sigma`.
"""
n = min(x.shape[0], y.shape[0])
n = n if tf.reduce_all(x[:n] == y[:n]) and x.shape == y.shape else 0
n_median = n + (tf.math.reduce_prod(dist.shape) - n) // 2 - 1
sigma = tf.expand_dims((.5 * tf.sort(tf.reshape(dist, (-1,)))[n_median]) ** .5, axis=0)
return sigma
class GaussianRBF(tf.keras.Model):
def __init__(
self,
sigma: Optional[tf.Tensor] = None,
init_sigma_fn: Optional[Callable] = None,
trainable: bool = False
) -> None:
"""
Gaussian RBF kernel: k(x,y) = exp(-(1/(2*sigma^2)||x-y||^2). A forward pass takes
a batch of instances x [Nx, features] and y [Ny, features] and returns the kernel
matrix [Nx, Ny].
Parameters
----------
sigma
Bandwidth used for the kernel. Needn't be specified if being inferred or trained.
Can pass multiple values to eval kernel with and then average.
init_sigma_fn
Function used to compute the bandwidth `sigma`. Used when `sigma` is to be inferred.
The function's signature should match :py:func:`~alibi_detect.utils.tensorflow.kernels.sigma_median`,
meaning that it should take in the tensors `x`, `y` and `dist` and return `sigma`. If `None`, it is set to
:func:`~alibi_detect.utils.tensorflow.kernels.sigma_median`.
trainable
Whether or not to track gradients w.r.t. sigma to allow it to be trained.
"""
super().__init__()
init_sigma_fn = sigma_median if init_sigma_fn is None else init_sigma_fn
self.config = {'sigma': sigma, 'trainable': trainable, 'init_sigma_fn': init_sigma_fn}
if sigma is None:
self.log_sigma = tf.Variable(np.empty(1), dtype=tf.keras.backend.floatx(), trainable=trainable)
self.init_required = True
else:
sigma = tf.cast(tf.reshape(sigma, (-1,)), dtype=tf.keras.backend.floatx()) # [Ns,]
self.log_sigma = tf.Variable(tf.math.log(sigma), trainable=trainable)
self.init_required = False
self.init_sigma_fn = init_sigma_fn
self.trainable = trainable
@property
def sigma(self) -> tf.Tensor:
return tf.math.exp(self.log_sigma)
def call(self, x: tf.Tensor, y: tf.Tensor, infer_sigma: bool = False) -> tf.Tensor:
y = tf.cast(y, x.dtype)
x, y = tf.reshape(x, (x.shape[0], -1)), tf.reshape(y, (y.shape[0], -1)) # flatten
dist = distance.squared_pairwise_distance(x, y) # [Nx, Ny]
if infer_sigma or self.init_required:
if self.trainable and infer_sigma:
raise ValueError("Gradients cannot be computed w.r.t. an inferred sigma value")
sigma = self.init_sigma_fn(x, y, dist)
self.log_sigma.assign(tf.math.log(sigma))
self.init_required = False
gamma = tf.constant(1. / (2. * self.sigma ** 2), dtype=x.dtype) # [Ns,]
# TODO: do matrix multiplication after all?
kernel_mat = tf.exp(- tf.concat([(g * dist)[None, :, :] for g in gamma], axis=0)) # [Ns, Nx, Ny]
return tf.reduce_mean(kernel_mat, axis=0) # [Nx, Ny]
def get_config(self) -> dict:
"""
Returns a serializable config dict (excluding the input_sigma_fn, which is serialized in alibi_detect.saving).
"""
cfg = self.config.copy()
if isinstance(cfg['sigma'], tf.Tensor):
cfg['sigma'] = cfg['sigma'].numpy().tolist()
cfg.update({'flavour': Framework.TENSORFLOW.value})
return cfg
@classmethod
def from_config(cls, config):
"""
Instantiates a kernel from a config dictionary.
Parameters
----------
config
A kernel config dictionary.
"""
config.pop('flavour')
return cls(**config)
class DeepKernel(tf.keras.Model):
"""
Computes similarities as k(x,y) = (1-eps)*k_a(proj(x), proj(y)) + eps*k_b(x,y).
A forward pass takes a batch of instances x [Nx, features] and y [Ny, features] and returns
the kernel matrix [Nx, Ny].
Parameters
----------
proj
The projection to be applied to the inputs before applying kernel_a
kernel_a
The kernel to apply to the projected inputs. Defaults to a Gaussian RBF with trainable bandwidth.
kernel_b
The kernel to apply to the raw inputs. Defaults to a Gaussian RBF with trainable bandwidth.
Set to None in order to use only the deep component (i.e. eps=0).
eps
The proportion (in [0,1]) of weight to assign to the kernel applied to raw inputs. This can be
either specified or set to 'trainable'. Only relavent is kernel_b is not None.
"""
def __init__(
self,
proj: tf.keras.Model,
kernel_a: Union[tf.keras.Model, str] = 'rbf',
kernel_b: Optional[Union[tf.keras.Model, str]] = 'rbf',
eps: Union[float, str] = 'trainable'
) -> None:
super().__init__()
self.config = {'proj': proj, 'kernel_a': kernel_a, 'kernel_b': kernel_b, 'eps': eps}
if kernel_a == 'rbf':
kernel_a = GaussianRBF(trainable=True)
if kernel_b == 'rbf':
kernel_b = GaussianRBF(trainable=True)
self.kernel_a = kernel_a
self.kernel_b = kernel_b
self.proj = proj
if kernel_b is not None:
self._init_eps(eps)
def _init_eps(self, eps: Union[float, str]) -> None:
if isinstance(eps, float):
if not 0 < eps < 1:
raise ValueError("eps should be in (0,1)")
eps = tf.constant(eps)
self.logit_eps = tf.Variable(tf.constant(logit(eps)), trainable=False)
elif eps == 'trainable':
self.logit_eps = tf.Variable(tf.constant(0.))
else:
raise NotImplementedError("eps should be 'trainable' or a float in (0,1)")
@property
def eps(self) -> tf.Tensor:
return tf.math.sigmoid(self.logit_eps) if self.kernel_b is not None else tf.constant(0.)
def call(self, x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
similarity = self.kernel_a(self.proj(x), self.proj(y)) # type: ignore[operator]
if self.kernel_b is not None:
similarity = (1-self.eps)*similarity + self.eps*self.kernel_b(x, y) # type: ignore[operator]
return similarity
def get_config(self) -> dict:
return self.config.copy()
@classmethod
def from_config(cls, config):
return cls(**config)
| 7,226 | 38.708791 | 118 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tensorflow/prediction.py
|
from functools import partial
from typing import Callable, Type, Union
import numpy as np
import tensorflow as tf
from alibi_detect.utils.prediction import tokenize_transformer
def predict_batch(x: Union[list, np.ndarray, tf.Tensor], model: Union[Callable, tf.keras.Model],
batch_size: int = int(1e10), preprocess_fn: Callable = None,
dtype: Union[Type[np.generic], tf.DType] = np.float32) -> Union[np.ndarray, tf.Tensor, tuple]:
"""
Make batch predictions on a model.
Parameters
----------
x
Batch of instances.
model
tf.keras model or one of the other permitted types defined in Data.
batch_size
Batch size used during prediction.
preprocess_fn
Optional preprocessing function for each batch.
dtype
Model output type, e.g. np.float32 or tf.float32.
Returns
-------
Numpy array, tensorflow tensor or tuples of those with model outputs.
"""
n = len(x)
n_minibatch = int(np.ceil(n / batch_size))
return_np = not isinstance(dtype, tf.DType)
return_list = False
preds: Union[list, tuple] = []
for i in range(n_minibatch):
istart, istop = i * batch_size, min((i + 1) * batch_size, n)
x_batch = x[istart:istop]
if isinstance(preprocess_fn, Callable): # type: ignore
x_batch = preprocess_fn(x_batch)
preds_tmp = model(x_batch)
if isinstance(preds_tmp, (list, tuple)):
if len(preds) == 0: # init tuple with lists to store predictions
preds = tuple([] for _ in range(len(preds_tmp)))
return_list = isinstance(preds_tmp, list)
for j, p in enumerate(preds_tmp):
preds[j].append(p if not return_np or isinstance(p, np.ndarray) else p.numpy())
elif isinstance(preds_tmp, (np.ndarray, tf.Tensor)):
preds.append(preds_tmp if not return_np or isinstance(preds_tmp, np.ndarray) # type: ignore
else preds_tmp.numpy())
else:
raise TypeError(f'Model output type {type(preds_tmp)} not supported. The model output '
f'type needs to be one of list, tuple, np.ndarray or tf.Tensor.')
concat = np.concatenate if return_np else tf.concat
out = tuple(concat(p, axis=0) for p in preds) if isinstance(preds, tuple) else concat(preds, axis=0)
if return_list:
out = list(out)
return out
def predict_batch_transformer(x: Union[list, np.ndarray], model: tf.keras.Model, tokenizer: Callable,
max_len: int, batch_size: int = int(1e10),
dtype: Union[Type[np.generic], tf.DType] = np.float32) \
-> Union[np.ndarray, tf.Tensor]:
"""
Make batch predictions using a transformers tokenizer and model.
Parameters
----------
x
Batch of instances.
model
Transformer model.
tokenizer
Tokenizer for model.
max_len
Max token length.
batch_size
Batch size.
dtype
Model output type, e.g. np.float32 or tf.float32.
Returns
-------
Numpy array or tensorflow tensor with model outputs.
"""
preprocess_fn = partial(tokenize_transformer, tokenizer=tokenizer, max_len=max_len, backend='tf')
return predict_batch(x, model, preprocess_fn=preprocess_fn, batch_size=batch_size, dtype=dtype)
| 3,418 | 36.988889 | 112 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tensorflow/perturbation.py
|
import numpy as np
import tensorflow as tf
def mutate_categorical(X: np.ndarray,
rate: float = None,
seed: int = 0,
feature_range: tuple = (0, 255)) -> tf.Tensor:
"""
Randomly change integer feature values to values within a set range
with a specified permutation rate.
Parameters
----------
X
Batch of data to be perturbed.
rate
Permutation rate (between 0 and 1).
seed
Random seed.
feature_range
Min and max range for perturbed features.
Returns
-------
Array with perturbed data.
"""
frange = (feature_range[0] + 1, feature_range[1] + 1)
shape = X.shape
n_samples = np.prod(shape)
mask = tf.random.categorical(
tf.math.log([[1. - rate, rate]]),
n_samples,
seed=seed,
dtype=tf.int32
)
mask = tf.reshape(mask, shape)
possible_mutations = tf.random.uniform(
shape,
minval=frange[0],
maxval=frange[1],
dtype=tf.int32,
seed=seed + 1
)
X = tf.math.floormod(tf.cast(X, tf.int32) + mask * possible_mutations, frange[1])
return tf.cast(X, tf.float32)
| 1,214 | 24.851064 | 85 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tensorflow/misc.py
|
import tensorflow as tf
def zero_diag(mat: tf.Tensor) -> tf.Tensor:
"""
Set the diagonal of a matrix to 0
Parameters
----------
mat
A 2D square matrix
Returns
-------
A 2D square matrix with zeros along the diagonal
"""
return mat - tf.linalg.diag(tf.linalg.diag_part(mat))
def quantile(sample: tf.Tensor, p: float, type: int = 7, sorted: bool = False) -> float:
"""
Estimate a desired quantile of a univariate distribution from a vector of samples
Parameters
----------
sample
A 1D vector of values
p
The desired quantile in (0,1)
type
The method for computing the quantile.
See https://wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample
sorted
Whether or not the vector is already sorted into ascending order
Returns
-------
An estimate of the quantile
"""
N = len(sample)
if len(sample.shape) != 1:
raise ValueError("Quantile estimation only supports vectors of univariate samples.")
if not 1/N <= p <= (N-1)/N:
raise ValueError(f"The {p}-quantile should not be estimated using only {N} samples.")
sorted_sample = sample if sorted else tf.sort(sample)
if type == 6:
h = (N+1)*p
elif type == 7:
h = (N-1)*p + 1
elif type == 8:
h = (N+1/3)*p + 1/3
h_floor = int(h)
quantile = sorted_sample[h_floor-1]
if h_floor != h:
quantile += (h - h_floor)*(sorted_sample[h_floor]-sorted_sample[h_floor-1])
return float(quantile)
def subset_matrix(mat: tf.Tensor, inds_0: tf.Tensor, inds_1: tf.Tensor) -> tf.Tensor:
"""
Take a matrix and return the submatrix correspond to provided row and column indices
Parameters
----------
mat
A 2D matrix
inds_0
A vector of row indices
inds_1
A vector of column indices
Returns
-------
A submatrix of shape (len(inds_0), len(inds_1))
"""
if len(mat.shape) != 2:
raise ValueError("Subsetting only supported for matrices (2D)")
subbed_rows = tf.gather(mat, inds_0, axis=0)
subbed_rows_cols = tf.gather(subbed_rows, inds_1, axis=1)
return subbed_rows_cols
def clone_model(model: tf.keras.Model) -> tf.keras.Model:
""" Clone a sequential, functional or subclassed tf.keras.Model. """
try: # sequential or functional model
return tf.keras.models.clone_model(model)
except ValueError: # subclassed model
try:
config = model.get_config()
except NotImplementedError:
config = {}
return model.__class__.from_config(config)
| 2,661 | 26.163265 | 93 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tensorflow/data.py
|
import numpy as np
import tensorflow as tf
from typing import Tuple, Union
Indexable = Union[np.ndarray, tf.Tensor, list]
class TFDataset(tf.keras.utils.Sequence):
def __init__(
self, *indexables: Indexable, batch_size: int = int(1e10), shuffle: bool = True,
) -> None:
self.indexables = indexables
self.batch_size = batch_size
self.shuffle = shuffle
def __getitem__(self, idx: int) -> Union[Tuple[Indexable, ...], Indexable]:
istart, istop = idx * self.batch_size, (idx + 1) * self.batch_size
output = tuple(indexable[istart:istop] for indexable in self.indexables)
return output if len(output) > 1 else output[0]
def __len__(self) -> int:
return len(self.indexables[0]) // self.batch_size
def on_epoch_end(self) -> None:
if self.shuffle:
perm = np.random.permutation(len(self.indexables[0]))
self.indexables = tuple(
[indexable[i] for i in perm] if isinstance(indexable, list) else indexable[perm]
for indexable in self.indexables
)
| 1,105 | 34.677419 | 96 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tensorflow/__init__.py
|
from alibi_detect.utils.missing_optional_dependency import import_optional
mmd2, mmd2_from_kernel_matrix, batch_compute_kernel_matrix, relative_euclidean_distance, squared_pairwise_distance, \
permed_lsdds = import_optional(
'alibi_detect.utils.tensorflow.distance',
names=['mmd2', 'mmd2_from_kernel_matrix', 'batch_compute_kernel_matrix', 'relative_euclidean_distance',
'squared_pairwise_distance', 'permed_lsdds']
)
GaussianRBF, DeepKernel = import_optional(
'alibi_detect.utils.tensorflow.kernels',
names=['GaussianRBF', 'DeepKernel']
)
predict_batch, predict_batch_transformer = import_optional(
'alibi_detect.utils.tensorflow.prediction',
names=['predict_batch', 'predict_batch_transformer']
)
zero_diag, quantile, subset_matrix = import_optional(
'alibi_detect.utils.tensorflow.misc',
names=['zero_diag', 'quantile', 'subset_matrix']
)
mutate_categorical = import_optional(
'alibi_detect.utils.tensorflow.perturbation',
names=['mutate_categorical']
)
TFDataset = import_optional(
'alibi_detect.utils.tensorflow.data',
names=['TFDataset']
)
__all__ = [
"batch_compute_kernel_matrix",
"mmd2",
"mmd2_from_kernel_matrix",
"relative_euclidean_distance",
"squared_pairwise_distance",
"GaussianRBF",
"DeepKernel",
"permed_lsdds",
"predict_batch",
"predict_batch_transformer",
"quantile",
"subset_matrix",
"zero_diag",
"mutate_categorical",
"TFDataset"
]
| 1,505 | 24.965517 | 117 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tensorflow/distance.py
|
import logging
import numpy as np
import tensorflow as tf
from typing import Callable, Tuple, List, Optional, Union
logger = logging.getLogger(__name__)
def squared_pairwise_distance(x: tf.Tensor, y: tf.Tensor, a_min: float = 1e-30, a_max: float = 1e30) -> tf.Tensor:
"""
TensorFlow pairwise squared Euclidean distance between samples x and y.
Parameters
----------
x
Batch of instances of shape [Nx, features].
y
Batch of instances of shape [Ny, features].
a_min
Lower bound to clip distance values.
a_max
Upper bound to clip distance values.
Returns
-------
Pairwise squared Euclidean distance [Nx, Ny].
"""
x2 = tf.reduce_sum(x ** 2, axis=-1, keepdims=True)
y2 = tf.reduce_sum(y ** 2, axis=-1, keepdims=True)
dist = x2 + tf.transpose(y2, (1, 0)) - 2. * x @ tf.transpose(y, (1, 0))
return tf.clip_by_value(dist, a_min, a_max)
def batch_compute_kernel_matrix(
x: Union[list, np.ndarray, tf.Tensor],
y: Union[list, np.ndarray, tf.Tensor],
kernel: Union[Callable, tf.keras.Model],
batch_size: int = int(1e10),
preprocess_fn: Callable = None,
) -> tf.Tensor:
"""
Compute the kernel matrix between x and y by filling in blocks of size
batch_size x batch_size at a time.
Parameters
----------
x
Reference set.
y
Test set.
kernel
tf.keras model
batch_size
Batch size used during prediction.
preprocess_fn
Optional preprocessing function for each batch.
Returns
-------
Kernel matrix in the form of a tensorflow tensor
"""
if type(x) != type(y):
raise ValueError("x and y should be of the same type")
n_x, n_y = len(x), len(y)
n_batch_x, n_batch_y = int(np.ceil(n_x / batch_size)), int(np.ceil(n_y / batch_size))
k_is = []
for i in range(n_batch_x):
istart, istop = i * batch_size, min((i + 1) * batch_size, n_x)
x_batch = x[istart:istop]
if isinstance(preprocess_fn, Callable): # type: ignore
x_batch = preprocess_fn(x_batch)
k_ijs = []
for j in range(n_batch_y):
jstart, jstop = j * batch_size, min((j + 1) * batch_size, n_y)
y_batch = y[jstart:jstop]
if isinstance(preprocess_fn, Callable): # type: ignore
y_batch = preprocess_fn(y_batch)
k_ijs.append(kernel(x_batch, y_batch))
k_is.append(tf.concat(k_ijs, axis=1))
k_mat = tf.concat(k_is, axis=0)
return k_mat
def mmd2_from_kernel_matrix(kernel_mat: tf.Tensor, m: int, permute: bool = False,
zero_diag: bool = True) -> tf.Tensor:
"""
Compute maximum mean discrepancy (MMD^2) between 2 samples x and y from the
full kernel matrix between the samples.
Parameters
----------
kernel_mat
Kernel matrix between samples x and y.
m
Number of instances in y.
permute
Whether to permute the row indices. Used for permutation tests.
zero_diag
Whether to zero out the diagonal of the kernel matrix.
Returns
-------
MMD^2 between the samples from the kernel matrix.
"""
n = kernel_mat.shape[0] - m
if zero_diag:
kernel_mat = kernel_mat - tf.linalg.diag(tf.linalg.diag_part(kernel_mat))
if permute:
idx = np.random.permutation(kernel_mat.shape[0])
kernel_mat = tf.gather(tf.gather(kernel_mat, indices=idx, axis=0), indices=idx, axis=1)
k_xx, k_yy, k_xy = kernel_mat[:-m, :-m], kernel_mat[-m:, -m:], kernel_mat[-m:, :-m]
c_xx, c_yy = 1 / (n * (n - 1)), 1 / (m * (m - 1))
mmd2 = c_xx * tf.reduce_sum(k_xx) + c_yy * tf.reduce_sum(k_yy) - 2. * tf.reduce_mean(k_xy)
return mmd2
def mmd2(x: tf.Tensor, y: tf.Tensor, kernel: Callable) -> float:
"""
Compute MMD^2 between 2 samples.
Parameters
----------
x
Batch of instances of shape [Nx, features].
y
Batch of instances of shape [Ny, features].
kernel
Kernel function.
Returns
-------
MMD^2 between the samples x and y.
"""
n, m = x.shape[0], y.shape[0]
c_xx, c_yy = 1 / (n * (n - 1)), 1 / (m * (m - 1))
k_xx, k_yy, k_xy = kernel(x, x), kernel(y, y), kernel(x, y)
return (c_xx * (tf.reduce_sum(k_xx) - tf.linalg.trace(k_xx)) +
c_yy * (tf.reduce_sum(k_yy) - tf.linalg.trace(k_yy)) - 2. * tf.reduce_mean(k_xy))
def relative_euclidean_distance(x: tf.Tensor, y: tf.Tensor, eps: float = 1e-12, axis: int = -1) -> tf.Tensor:
"""
Relative Euclidean distance.
Parameters
----------
x
Tensor used in distance computation.
y
Tensor used in distance computation.
eps
Epsilon added to denominator for numerical stability.
axis
Axis used to compute distance.
Returns
-------
Tensor with relative Euclidean distance across specified axis.
"""
denom = tf.concat([tf.reshape(tf.norm(x, ord=2, axis=axis), (-1, 1)),
tf.reshape(tf.norm(y, ord=2, axis=axis), (-1, 1))], axis=1)
dist = tf.norm(x - y, ord=2, axis=axis) / (tf.reduce_min(denom, axis=axis) + eps)
return dist
def permed_lsdds(
k_all_c: tf.Tensor,
x_perms: List[tf.Tensor],
y_perms: List[tf.Tensor],
H: tf.Tensor,
H_lam_inv: Optional[tf.Tensor] = None,
lam_rd_max: float = 0.2,
return_unpermed: bool = False,
) -> Union[Tuple[tf.Tensor, tf.Tensor], Tuple[tf.Tensor, tf.Tensor, tf.Tensor]]:
"""
Compute LSDD estimates from kernel matrix across various ref and test window samples
Parameters
----------
k_all_c
Kernel matrix of similarities between all samples and the kernel centers.
x_perms
List of B reference window index vectors
y_perms
List of B test window index vectors
H
Special (scaled) kernel matrix of similarities between kernel centers
H_lam_inv
Function of H corresponding to a particular regulariation parameter lambda.
See Eqn 11 of Bu et al. (2017)
lam_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2. Only relavent if H_lam_inv is not supplied.
return_unpermed
Whether or not to return value corresponding to unpermed order defined by k_all_c
Returns
-------
Vector of B LSDD estimates for each permutation, H_lam_inv which may have been inferred, and optionally \
the unpermed LSDD estimate.
"""
# Compute (for each bootstrap) the average distance to each kernel center (Eqn 7)
k_xc_perms = tf.stack([tf.gather(k_all_c, x_inds) for x_inds in x_perms], axis=0)
k_yc_perms = tf.stack([tf.gather(k_all_c, y_inds) for y_inds in y_perms], axis=0)
h_perms = tf.reduce_mean(k_xc_perms, axis=1) - tf.reduce_mean(k_yc_perms, axis=1)
if H_lam_inv is None:
# We perform the initialisation for multiple candidate lambda values and pick the largest
# one for which the relative difference (RD) between two difference estimates is below lambda_rd_max.
# See Appendix A
candidate_lambdas = [1/(4**i) for i in range(10)] # TODO: More principled selection
H_plus_lams = tf.stack([H+tf.eye(H.shape[0], dtype=H.dtype)*can_lam for can_lam in candidate_lambdas], axis=0)
H_plus_lam_invs = tf.transpose(tf.linalg.inv(H_plus_lams), [1, 2, 0]) # lambdas last
omegas = tf.einsum('jkl,bk->bjl', H_plus_lam_invs, h_perms) # (Eqn 8)
h_omegas = tf.einsum('bj,bjl->bl', h_perms, omegas)
omega_H_omegas = tf.einsum('bkl,bkl->bl', tf.einsum('bjl,jk->bkl', omegas, H), omegas)
rds = tf.reduce_mean(1 - (omega_H_omegas/h_omegas), axis=0)
less_than_rd_inds = tf.where(rds < lam_rd_max)
if len(less_than_rd_inds) == 0:
repeats = k_all_c.shape[0] - np.unique(k_all_c, axis=0).shape[0]
if repeats > 0:
msg = "Too many repeat instances for LSDD-based detection. \
Try using MMD-based detection instead"
else:
msg = "Unknown error. Try using MMD-based detection instead"
raise ValueError(msg)
lambda_index = int(less_than_rd_inds[0])
lam = candidate_lambdas[lambda_index]
logger.info(f"Using lambda value of {lam:.2g} with RD of {float(rds[lambda_index]):.2g}")
H_plus_lam_inv = tf.linalg.inv(H+lam*tf.eye(H.shape[0], dtype=H.dtype))
H_lam_inv = 2*H_plus_lam_inv - (tf.transpose(H_plus_lam_inv, [1, 0]) @ H @ H_plus_lam_inv) # (blw Eqn 11)
# Now to compute an LSDD estimate for each permutation
lsdd_perms = tf.reduce_sum(
h_perms * tf.transpose(H_lam_inv @ tf.transpose(h_perms, [1, 0]), [1, 0]), axis=1
) # (Eqn 11)
if return_unpermed:
n_x = x_perms[0].shape[0]
h = tf.reduce_mean(k_all_c[:n_x], axis=0) - tf.reduce_mean(k_all_c[n_x:], axis=0)
lsdd_unpermed = tf.reduce_sum(h[None, :] * tf.transpose(H_lam_inv @ h[:, None], [1, 0]))
return lsdd_perms, H_lam_inv, lsdd_unpermed
else:
return lsdd_perms, H_lam_inv
| 9,181 | 36.024194 | 118 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tensorflow/tests/test_data_tf.py
|
from itertools import product
import numpy as np
import pytest
from alibi_detect.utils.tensorflow.data import TFDataset
# test on numpy array and list
n, f = 100, 5
shape = (n, f)
xtype = [list, np.ndarray]
shuffle = [True, False]
batch_size = [2, 10]
tests_ds = list(product(xtype, batch_size, shuffle))
n_tests_ds = len(tests_ds)
@pytest.fixture
def ds_params(request):
return tests_ds[request.param]
@pytest.mark.parametrize('ds_params', list(range(n_tests_ds)), indirect=True)
def test_torchdataset(ds_params):
xtype, batch_size, shuffle = ds_params
x = np.random.randn(*shape)
y = np.random.randn(*(n,))
if xtype == list:
x = list(x)
ds = TFDataset(x, y, batch_size=batch_size, shuffle=shuffle)
for step, data in enumerate(ds):
pass
if xtype == list:
assert len(data[0]) == batch_size and data[0][0].shape == (f,)
else:
assert data[0].shape == (batch_size, f)
assert data[1].shape == (batch_size,)
assert step == len(ds) - 1
if not shuffle:
assert (data[0][-1] == x[-1 - (n % batch_size)]).all()
| 1,094 | 27.076923 | 77 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tensorflow/tests/test_prediction_tf.py
|
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from typing import Tuple, Union
from alibi_detect.utils.tensorflow import predict_batch
n, n_features, n_classes, latent_dim = 100, 10, 5, 2
x = np.zeros((n, n_features), dtype=np.float32)
class MyModel(tf.keras.Model):
def __init__(self, multi_out: bool = False):
super(MyModel, self).__init__()
self.dense = Dense(n_classes, activation='softmax')
self.multi_out = multi_out
def call(self, x: np.ndarray) -> Union[tf.Tensor, Tuple[tf.Tensor, tf.Tensor]]:
out = self.dense(x)
if not self.multi_out:
return out
else:
return out, out
AutoEncoder = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(latent_dim),
Dense(n_features)
]
)
def id_fn(x: Union[np.ndarray, tf.Tensor, list]) -> Union[np.ndarray, tf.Tensor]:
if isinstance(x, list):
return np.concatenate(x, axis=0)
else:
return x
# model, batch size, dtype, preprocessing function, list as input
tests_predict = [
(MyModel(multi_out=False), 2, np.float32, None, False),
(MyModel(multi_out=False), int(1e10), np.float32, None, False),
(MyModel(multi_out=False), int(1e10), tf.float32, None, False),
(MyModel(multi_out=True), int(1e10), tf.float32, None, False),
(MyModel(multi_out=False), int(1e10), np.float32, id_fn, False),
(AutoEncoder, 2, np.float32, None, False),
(AutoEncoder, int(1e10), np.float32, None, False),
(AutoEncoder, int(1e10), tf.float32, None, False),
(id_fn, 2, np.float32, None, False),
(id_fn, 2, tf.float32, None, False),
(id_fn, 2, np.float32, id_fn, True),
]
n_tests = len(tests_predict)
@pytest.fixture
def predict_batch_params(request):
return tests_predict[request.param]
@pytest.mark.parametrize('predict_batch_params', list(range(n_tests)), indirect=True)
def test_predict_batch(predict_batch_params):
model, batch_size, dtype, preprocess_fn, to_list = predict_batch_params
x_batch = [x] if to_list else x
preds = predict_batch(x_batch, model, batch_size=batch_size, preprocess_fn=preprocess_fn, dtype=dtype)
if isinstance(preds, tuple):
preds = preds[0]
assert preds.dtype == dtype
if isinstance(model, tf.keras.Sequential) or hasattr(model, '__name__') and model.__name__ == 'id_fn':
assert preds.shape == x.shape
elif isinstance(model, tf.keras.Model):
assert preds.shape == (n, n_classes)
| 2,557 | 32.657895 | 106 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tensorflow/tests/test_misc_tf.py
|
from itertools import product
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
import numpy as np
from alibi_detect.utils.tensorflow import zero_diag, quantile, subset_matrix
from alibi_detect.utils.tensorflow.misc import clone_model
def test_zero_diag():
ones = tf.ones((10, 10))
ones_zd = zero_diag(ones)
assert ones_zd.shape == (10, 10)
assert float(tf.linalg.trace(ones_zd)) == 0
assert float(tf.reduce_sum(ones_zd)) == 90
type = [6, 7, 8]
sorted = [True, False]
tests_quantile = list(product(type, sorted))
n_tests_quantile = len(tests_quantile)
@pytest.fixture
def quantile_params(request):
return tests_quantile[request.param]
@pytest.mark.parametrize('quantile_params', list(range(n_tests_quantile)), indirect=True)
def test_quantile(quantile_params):
type, sorted = quantile_params
sample = (0.5+tf.range(1e6))/1e6
if not sorted:
sample = tf.random.shuffle(sample)
np.testing.assert_almost_equal(quantile(sample, 0.001, type=type, sorted=sorted), 0.001, decimal=6)
np.testing.assert_almost_equal(quantile(sample, 0.999, type=type, sorted=sorted), 0.999, decimal=6)
assert quantile(tf.ones((100,)), 0.42, type=type, sorted=sorted) == 1
with pytest.raises(ValueError):
quantile(tf.ones((10,)), 0.999, type=type, sorted=sorted)
with pytest.raises(ValueError):
quantile(tf.ones((100, 100)), 0.5, type=type, sorted=sorted)
def test_subset_matrix():
mat = tf.range(5)[None, :] * tf.range(5)[:, None]
inds_0 = [2, 3]
inds_1 = [2, 1, 4]
sub_mat = subset_matrix(mat, tf.constant(inds_0), tf.constant(inds_1))
assert sub_mat.shape == (2, 3)
for i, ind_0 in enumerate(inds_0):
for j, ind_1 in enumerate(inds_1):
assert sub_mat[i, j] == ind_0 * ind_1
with pytest.raises(ValueError):
subset_matrix(tf.ones((10, 10, 10)), inds_0, inds_1)
with pytest.raises(ValueError):
subset_matrix(tf.ones((10,)), inds_0, inds_1)
n_in, n_out = 10, 5
# sequential model
model_seq = tf.keras.Sequential([InputLayer(n_in, ), Dense(n_out)])
# functional model
inputs = Input(n_in, )
outputs = Dense(n_out)(inputs)
model_func = tf.keras.Model(inputs=inputs, outputs=outputs)
# subclassed model
class Model(tf.keras.Model):
def __init__(self):
super().__init__()
self.dense = Dense(5)
def call(self, x):
return self.dense(x)
@classmethod
def from_config(cls, config):
return cls(**config)
model_sub = Model()
def test_clone_model():
model_seq_clone = clone_model(model_seq)
assert not (model_seq_clone.weights[0] == model_seq.weights[0]).numpy().any()
model_func_clone = clone_model(model_func)
assert not (model_func_clone.weights[0] == model_func.weights[0]).numpy().any()
model_sub_clone = clone_model(model_sub)
_ = model_sub(tf.zeros((1, 10)))
_ = model_sub_clone(tf.zeros((1, 10)))
assert not (model_sub_clone.weights[0] == model_sub.weights[0]).numpy().any()
| 3,046 | 29.168317 | 103 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tensorflow/tests/test_kernels_tf.py
|
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input
from alibi_detect.utils.tensorflow import GaussianRBF, DeepKernel
sigma = [None, np.array([1.]), np.array([1., 2.])]
n_features = [5, 10]
n_instances = [(100, 100), (100, 75)]
trainable = [True, False]
tests_gk = list(product(sigma, n_features, n_instances, trainable))
n_tests_gk = len(tests_gk)
@pytest.fixture
def gaussian_kernel_params(request):
return tests_gk[request.param]
@pytest.mark.parametrize('gaussian_kernel_params', list(range(n_tests_gk)), indirect=True)
def test_gaussian_kernel(gaussian_kernel_params):
sigma, n_features, n_instances, trainable = gaussian_kernel_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
x = tf.convert_to_tensor(np.random.random(xshape).astype('float32'))
y = tf.convert_to_tensor(np.random.random(yshape).astype('float32'))
kernel = GaussianRBF(sigma=sigma, trainable=trainable)
infer_sigma = True if sigma is None else False
if trainable and infer_sigma:
with pytest.raises(Exception):
kernel(x, y, infer_sigma=infer_sigma)
else:
k_xy = kernel(x, y, infer_sigma=infer_sigma).numpy()
k_xx = kernel(x, x, infer_sigma=infer_sigma).numpy()
assert k_xy.shape == n_instances and k_xx.shape == (xshape[0], xshape[0])
np.testing.assert_almost_equal(k_xx.trace(), xshape[0], decimal=4)
assert (k_xx > 0.).all() and (k_xy > 0.).all()
class MyKernel(tf.keras.Model): # TODO: Support then test models using keras functional API
def __init__(self, n_features: int):
super().__init__()
self.dense = Dense(20)
def call(self, x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
return tf.einsum('ji,ki->jk', self.dense(x), self.dense(y))
n_features = [5, 10]
n_instances = [(100, 100), (100, 75)]
kernel_a = [GaussianRBF(trainable=True), MyKernel]
kernel_b = [GaussianRBF(trainable=True), MyKernel, None]
eps = [0.5, 'trainable']
tests_dk = list(product(n_features, n_instances, kernel_a, kernel_b, eps))
n_tests_dk = len(tests_dk)
@pytest.fixture
def deep_kernel_params(request):
return tests_dk[request.param]
@pytest.mark.parametrize('deep_kernel_params', list(range(n_tests_dk)), indirect=True)
def test_deep_kernel(deep_kernel_params):
n_features, n_instances, kernel_a, kernel_b, eps = deep_kernel_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
x = tf.convert_to_tensor(np.random.random(xshape).astype('float32'))
y = tf.convert_to_tensor(np.random.random(yshape).astype('float32'))
proj = tf.keras.Sequential([Input(shape=(n_features,)), Dense(n_features)])
kernel_a = kernel_a(n_features) if kernel_a == MyKernel else kernel_a
kernel_b = kernel_b(n_features) if kernel_b == MyKernel else kernel_b
kernel = DeepKernel(proj, kernel_a=kernel_a, kernel_b=kernel_b, eps=eps)
k_xy = kernel(x, y).numpy()
k_yx = kernel(y, x).numpy()
k_xx = kernel(x, x).numpy()
assert k_xy.shape == n_instances and k_xx.shape == (xshape[0], xshape[0])
assert (np.diag(k_xx) > 0.).all()
np.testing.assert_almost_equal(k_xy, np.transpose(k_yx), decimal=5)
| 3,271 | 38.421687 | 92 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/utils/tensorflow/tests/test_distance_tf.py
|
import numpy as np
from itertools import product
import pytest
import tensorflow as tf
from alibi_detect.utils.tensorflow import GaussianRBF, mmd2, mmd2_from_kernel_matrix, permed_lsdds
from alibi_detect.utils.tensorflow import relative_euclidean_distance, squared_pairwise_distance
from alibi_detect.utils.tensorflow import batch_compute_kernel_matrix
n_features = [2, 5]
n_instances = [(100, 100), (100, 75)]
tests_pairwise = list(product(n_features, n_instances))
n_tests_pairwise = len(tests_pairwise)
@pytest.fixture
def pairwise_params(request):
return tests_pairwise[request.param]
@pytest.mark.parametrize('pairwise_params', list(range(n_tests_pairwise)), indirect=True)
def test_pairwise(pairwise_params):
n_features, n_instances = pairwise_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
np.random.seed(0)
x = tf.convert_to_tensor(np.random.random(xshape).astype('float32'))
y = tf.convert_to_tensor(np.random.random(yshape).astype('float32'))
dist_xx = squared_pairwise_distance(x, x).numpy()
dist_xy = squared_pairwise_distance(x, y).numpy()
assert dist_xx.shape == (xshape[0], xshape[0])
assert dist_xy.shape == n_instances
np.testing.assert_almost_equal(dist_xx.trace(), 0., decimal=5)
n_features = [2, 5]
n_instances = [(20, 20), (20, 15)]
batch_size = [1, 5]
tests_bckm = list(product(n_features, n_instances, batch_size))
n_tests_bckm = len(tests_bckm)
@pytest.fixture
def bckm_params(request):
return tests_bckm[request.param]
@pytest.mark.parametrize('bckm_params', list(range(n_tests_bckm)), indirect=True)
def test_bckm(bckm_params):
n_features, n_instances, batch_size = bckm_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
np.random.seed(0)
x = tf.convert_to_tensor(np.random.random(xshape).astype('float32'))
y = tf.convert_to_tensor(np.random.random(yshape).astype('float32'))
kernel = GaussianRBF(sigma=tf.constant(1.))
kernel_mat = kernel(x, y).numpy()
bc_kernel_mat = batch_compute_kernel_matrix(x, y, kernel, batch_size=batch_size).numpy()
np.testing.assert_almost_equal(kernel_mat, bc_kernel_mat, decimal=6)
tests_mmd = tests_pairwise
n_tests_mmd = n_tests_pairwise
@pytest.fixture
def mmd_params(request):
return tests_mmd[request.param]
@pytest.mark.parametrize('mmd_params', list(range(n_tests_mmd)), indirect=True)
def test_mmd(mmd_params):
n_features, n_instances = mmd_params
xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
np.random.seed(0)
x = tf.convert_to_tensor(np.random.random(xshape).astype('float32'))
y = tf.convert_to_tensor(np.random.random(yshape).astype('float32'))
mmd_xx = mmd2(x, x, kernel=GaussianRBF(sigma=tf.ones(1)))
mmd_xy = mmd2(x, y, kernel=GaussianRBF(sigma=tf.ones(1)))
assert mmd_xy > mmd_xx
n = [10, 100]
m = [10, 100]
permute = [True, False]
zero_diag = [True, False]
tests_mmd_from_kernel_matrix = list(product(n, m, permute, zero_diag))
n_tests_mmd_from_kernel_matrix = len(tests_mmd_from_kernel_matrix)
@pytest.fixture
def mmd_from_kernel_matrix_params(request):
return tests_mmd_from_kernel_matrix[request.param]
@pytest.mark.parametrize('mmd_from_kernel_matrix_params',
list(range(n_tests_mmd_from_kernel_matrix)), indirect=True)
def test_mmd_from_kernel_matrix(mmd_from_kernel_matrix_params):
n, m, permute, zero_diag = mmd_from_kernel_matrix_params
n_tot = n + m
shape = (n_tot, n_tot)
kernel_mat = np.random.uniform(0, 1, size=shape)
kernel_mat_2 = kernel_mat.copy()
kernel_mat_2[-m:, :-m] = 1.
kernel_mat_2[:-m, -m:] = 1.
kernel_mat = tf.convert_to_tensor(kernel_mat)
kernel_mat_2 = tf.convert_to_tensor(kernel_mat_2)
if not zero_diag:
kernel_mat -= tf.linalg.diag(tf.linalg.diag_part(kernel_mat))
kernel_mat_2 -= tf.linalg.diag(tf.linalg.diag_part(kernel_mat_2))
mmd = mmd2_from_kernel_matrix(kernel_mat, m, permute=permute, zero_diag=zero_diag)
mmd_2 = mmd2_from_kernel_matrix(kernel_mat_2, m, permute=permute, zero_diag=zero_diag)
if not permute:
assert mmd_2.numpy() < mmd.numpy()
def test_relative_euclidean_distance():
x = tf.convert_to_tensor(np.random.rand(5, 3))
y = tf.convert_to_tensor(np.random.rand(5, 3))
assert (relative_euclidean_distance(x, y).numpy() == relative_euclidean_distance(y, x).numpy()).all()
assert (relative_euclidean_distance(x, x).numpy() == relative_euclidean_distance(y, y).numpy()).all()
assert (relative_euclidean_distance(x, y).numpy() >= 0.).all()
n = [10]
m = [10]
d = [3]
B = [20]
n_kcs = [5]
tests_permed_lsdds = list(product(n, m, d, B, n_kcs))
n_tests_permed_lsdds = len(tests_permed_lsdds)
@pytest.fixture
def permed_lsdds_params(request):
return tests_permed_lsdds[request.param]
@pytest.mark.parametrize('permed_lsdds_params',
list(range(n_tests_permed_lsdds)), indirect=True)
def test_permed_lsdds(permed_lsdds_params):
n, m, d, B, n_kcs = permed_lsdds_params
kcs = tf.random.normal((n_kcs, d))
x_ref = tf.random.normal((n, d))
x_cur = 10 + 0.2*tf.random.normal((m, d))
x_full = tf.concat([x_ref, x_cur], axis=0)
sigma = tf.constant((1.,))
k_all_c = GaussianRBF(sigma)(x_full, kcs)
H = GaussianRBF(np.sqrt(2.)*sigma)(kcs, kcs)
perms = [tf.random.shuffle(tf.range(n+m)) for _ in range(B)]
x_perms = [perm[:n] for perm in perms]
y_perms = [perm[n:] for perm in perms]
lsdd_perms, H_lam_inv, lsdd_unpermed = permed_lsdds(
k_all_c, x_perms, y_perms, H, return_unpermed=True
)
assert int(tf.reduce_sum(tf.cast(lsdd_perms > lsdd_unpermed, float))) == 0
assert H_lam_inv.shape == (n_kcs, n_kcs)
| 5,789 | 34.304878 | 105 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/ad/model_distillation.py
|
import logging
from typing import Callable, Dict, Tuple, Union, cast
import numpy as np
import tensorflow as tf
from alibi_detect.base import (BaseDetector, FitMixin, ThresholdMixin,
adversarial_prediction_dict)
from alibi_detect.models.tensorflow.losses import loss_distillation
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils._types import OptimizerTF
from tensorflow.keras.losses import categorical_crossentropy, kld
logger = logging.getLogger(__name__)
class ModelDistillation(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
threshold: float = None,
distilled_model: tf.keras.Model = None,
model: tf.keras.Model = None,
loss_type: str = 'kld',
temperature: float = 1.,
data_type: str = None
) -> None:
"""
Model distillation concept drift and adversarial detector.
Parameters
----------
threshold
Threshold used for score to determine adversarial instances.
distilled_model
A tf.keras model to distill.
model
A trained tf.keras classification model.
loss_type
Loss for distillation. Supported: 'kld', 'xent'
temperature
Temperature used for model prediction scaling.
Temperature <1 sharpens the prediction probability distribution.
data_type
Optionally specifiy the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if threshold is None:
logger.warning('No threshold level set. Need to infer threshold using `infer_threshold`.')
self.threshold = threshold
self.model = model
for layer in self.model.layers: # freeze model layers
layer.trainable = False
if isinstance(distilled_model, tf.keras.Model):
self.distilled_model = distilled_model
else:
raise TypeError('No valid format detected for `distilled_model` (tf.keras.Model) ')
self.loss_type = loss_type
self.temperature = temperature
# set metadata
self.meta['detector_type'] = 'adversarial'
self.meta['data_type'] = data_type
self.meta['online'] = False
def fit(self,
X: np.ndarray,
loss_fn: tf.keras.losses = loss_distillation,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
epochs: int = 20,
batch_size: int = 128,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None,
preprocess_fn: Callable = None
) -> None:
"""
Train ModelDistillation detector.
Parameters
----------
X
Training batch.
loss_fn
Loss function used for training.
optimizer
Optimizer used for training.
epochs
Number of training epochs.
batch_size
Batch size used for training.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
preprocess_fn
Preprocessing function applied to each training batch.
"""
# train arguments
args = [self.distilled_model, loss_fn, X]
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
kwargs = {
'optimizer': optimizer,
'epochs': epochs,
'batch_size': batch_size,
'verbose': verbose,
'log_metric': log_metric,
'callbacks': callbacks,
'preprocess_fn': preprocess_fn,
'loss_fn_kwargs': {
'model': self.model,
'loss_type': self.loss_type,
'temperature': self.temperature
}
}
# train
trainer(*args, **kwargs)
def infer_threshold(self,
X: np.ndarray,
threshold_perc: float = 99.,
margin: float = 0.,
batch_size: int = int(1e10)
) -> None:
"""
Update threshold by a value inferred from the percentage of instances considered to be
adversarial in a sample of the dataset.
Parameters
----------
X
Batch of instances.
threshold_perc
Percentage of X considered to be normal based on the adversarial score.
margin
Add margin to threshold. Useful if adversarial instances have significantly higher scores and there
is no adversarial instance in X.
batch_size
Batch size used when computing scores.
"""
# compute adversarial scores
adv_score = self.score(X, batch_size=batch_size)
# update threshold
self.threshold = np.percentile(adv_score, threshold_perc) + margin
def score(self, X: np.ndarray, batch_size: int = int(1e10), return_predictions: bool = False) \
-> Union[np.ndarray, Tuple[np.ndarray, np.ndarray, np.ndarray]]:
"""
Compute adversarial scores.
Parameters
----------
X
Batch of instances to analyze.
batch_size
Batch size used when computing scores.
return_predictions
Whether to return the predictions of the classifier on the original and reconstructed instances.
Returns
-------
Array with adversarial scores for each instance in the batch.
"""
# model predictions
y = predict_batch(X, self.model, batch_size=batch_size)
y_distilled = predict_batch(X, self.distilled_model, batch_size=batch_size)
y = cast(np.ndarray, y) # help mypy out
y_distilled = cast(np.ndarray, y_distilled) # help mypy out
# scale predictions
if self.temperature != 1.:
y = y ** (1 / self.temperature)
y = (y / tf.reshape(tf.reduce_sum(y, axis=-1), (-1, 1))).numpy()
if self.loss_type == 'kld':
score = kld(y, y_distilled).numpy()
elif self.loss_type == 'xent':
score = categorical_crossentropy(y, y_distilled).numpy()
else:
raise NotImplementedError
if return_predictions:
return score, y, y_distilled
else:
return score
def predict(self, X: np.ndarray, batch_size: int = int(1e10), return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[str, np.ndarray]]:
"""
Predict whether instances are adversarial instances or not.
Parameters
----------
X
Batch of instances.
batch_size
Batch size used when computing scores.
return_instance_score
Whether to return instance level adversarial scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the adversarial predictions and instance level adversarial scores.
"""
score = self.score(X, batch_size=batch_size)
# values above threshold are adversarial
pred = (score > self.threshold).astype(int) # type: ignore
# populate output dict
ad = adversarial_prediction_dict()
ad['meta'] = self.meta
ad['data']['is_adversarial'] = pred
if return_instance_score:
ad['data']['instance_score'] = score
return ad
| 7,963 | 34.238938 | 111 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/ad/adversarialae.py
|
import logging
from typing import Callable, Dict, List, Tuple, Union, cast
import numpy as np
import tensorflow as tf
from alibi_detect.base import (BaseDetector, FitMixin, ThresholdMixin,
adversarial_correction_dict,
adversarial_prediction_dict)
from alibi_detect.models.tensorflow.autoencoder import AE
from alibi_detect.models.tensorflow.losses import loss_adv_ae
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils._types import OptimizerTF
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.losses import kld
from tensorflow.keras.models import Model
logger = logging.getLogger(__name__)
class DenseHidden(tf.keras.Model):
def __init__(self, model: tf.keras.Model, hidden_layer: int, output_dim: int, hidden_dim: int = None) -> None:
"""
Dense layer that extracts the feature map of a hidden layer in a model and computes
output probabilities over that layer.
Parameters
----------
model
tf.keras classification model.
hidden_layer
Hidden layer from model where feature map is extracted from.
output_dim
Output dimension for softmax layer.
hidden_dim
Dimension of optional additional dense layer.
"""
super(DenseHidden, self).__init__()
self.partial_model = Model(inputs=model.inputs, outputs=model.layers[hidden_layer].output)
for layer in self.partial_model.layers: # freeze model layers
layer.trainable = False
self.hidden_dim = hidden_dim
if hidden_dim is not None:
self.dense_layer = Dense(hidden_dim, activation=tf.nn.relu)
self.output_layer = Dense(output_dim, activation=tf.nn.softmax)
def call(self, x: tf.Tensor) -> tf.Tensor:
x = self.partial_model(x)
x = Flatten()(x)
if self.hidden_dim is not None:
x = self.dense_layer(x)
return self.output_layer(x)
class AdversarialAE(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
threshold: float = None,
ae: tf.keras.Model = None,
model: tf.keras.Model = None,
encoder_net: tf.keras.Model = None,
decoder_net: tf.keras.Model = None,
model_hl: List[tf.keras.Model] = None,
hidden_layer_kld: dict = None,
w_model_hl: list = None,
temperature: float = 1.,
data_type: str = None
) -> None:
"""
Autoencoder (AE) based adversarial detector.
Parameters
----------
threshold
Threshold used for adversarial score to determine adversarial instances.
ae
A trained tf.keras autoencoder model if available.
model
A trained tf.keras classification model.
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class if no 'ae' is specified.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class if no 'ae' is specified.
model_hl
List with tf.keras models for the hidden layer K-L divergence computation.
hidden_layer_kld
Dictionary with as keys the hidden layer(s) of the model which are extracted and used
during training of the AE, and as values the output dimension for the hidden layer.
w_model_hl
Weights assigned to the loss of each model in model_hl.
temperature
Temperature used for model prediction scaling.
Temperature <1 sharpens the prediction probability distribution.
data_type
Optionally specifiy the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if threshold is None:
logger.warning('No threshold level set. Need to infer threshold using `infer_threshold`.')
self.threshold = threshold
self.model = model
for layer in self.model.layers: # freeze model layers
layer.trainable = False
# check if model can be loaded, otherwise initialize AE model
if isinstance(ae, tf.keras.Model):
self.ae = ae
elif isinstance(encoder_net, tf.keras.Sequential) and isinstance(decoder_net, tf.keras.Sequential):
self.ae = AE(encoder_net, decoder_net) # define AE model
else:
raise TypeError('No valid format detected for `ae` (tf.keras.Model) '
'or `encoder_net` and `decoder_net` (tf.keras.Sequential).')
# intermediate feature map outputs for KLD and loss weights
self.hidden_layer_kld = hidden_layer_kld
if isinstance(model_hl, list):
self.model_hl = model_hl
elif isinstance(hidden_layer_kld, dict):
self.model_hl = []
for hidden_layer, output_dim in hidden_layer_kld.items():
self.model_hl.append(DenseHidden(self.model, hidden_layer, output_dim))
else:
self.model_hl = None
self.w_model_hl = w_model_hl
if self.w_model_hl is None and isinstance(self.model_hl, list):
self.w_model_hl = list(np.ones(len(self.model_hl)))
self.temperature = temperature
# set metadata
self.meta['detector_type'] = 'adversarial'
self.meta['data_type'] = data_type
self.meta['online'] = False
def fit(self,
X: np.ndarray,
loss_fn: tf.keras.losses = loss_adv_ae,
w_model: float = 1.,
w_recon: float = 0.,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
epochs: int = 20,
batch_size: int = 128,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None,
preprocess_fn: Callable = None
) -> None:
"""
Train Adversarial AE model.
Parameters
----------
X
Training batch.
loss_fn
Loss function used for training.
w_model
Weight on model prediction loss term.
w_recon
Weight on MSE reconstruction error loss term.
optimizer
Optimizer used for training.
epochs
Number of training epochs.
batch_size
Batch size used for training.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
preprocess_fn
Preprocessing function applied to each training batch.
"""
# train arguments
args = [self.ae, loss_fn, X]
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
kwargs = {
'optimizer': optimizer,
'epochs': epochs,
'batch_size': batch_size,
'verbose': verbose,
'log_metric': log_metric,
'callbacks': callbacks,
'preprocess_fn': preprocess_fn,
'loss_fn_kwargs': {
'model': self.model,
'model_hl': self.model_hl,
'w_model': w_model,
'w_recon': w_recon,
'w_model_hl': self.w_model_hl,
'temperature': self.temperature
}
}
# train
trainer(*args, **kwargs)
def infer_threshold(self,
X: np.ndarray,
threshold_perc: float = 99.,
margin: float = 0.,
batch_size: int = int(1e10)
) -> None:
"""
Update threshold by a value inferred from the percentage of instances considered to be
adversarial in a sample of the dataset.
Parameters
----------
X
Batch of instances.
threshold_perc
Percentage of X considered to be normal based on the adversarial score.
margin
Add margin to threshold. Useful if adversarial instances have significantly higher scores and there
is no adversarial instance in X.
batch_size
Batch size used when computing scores.
"""
# compute adversarial scores
adv_score = self.score(X, batch_size=batch_size)
# update threshold
self.threshold = np.percentile(adv_score, threshold_perc) + margin
def score(self, X: np.ndarray, batch_size: int = int(1e10), return_predictions: bool = False) \
-> Union[np.ndarray, Tuple[np.ndarray, np.ndarray, np.ndarray]]:
"""
Compute adversarial scores.
Parameters
----------
X
Batch of instances to analyze.
batch_size
Batch size used when computing scores.
return_predictions
Whether to return the predictions of the classifier on the original and reconstructed instances.
Returns
-------
Array with adversarial scores for each instance in the batch.
"""
# reconstructed instances
X_recon = predict_batch(X, self.ae, batch_size=batch_size)
# model predictions
y = predict_batch(X, self.model, batch_size=batch_size)
y_recon = predict_batch(X_recon, self.model, batch_size=batch_size)
y = cast(np.ndarray, y) # help mypy out
y_recon = cast(np.ndarray, y_recon) # help mypy out
# scale predictions
if self.temperature != 1.:
y = y ** (1 / self.temperature)
y = (y / tf.reshape(tf.reduce_sum(y, axis=-1), (-1, 1))).numpy()
adv_score = kld(y, y_recon).numpy()
# hidden layer predictions
if isinstance(self.model_hl, list):
for m, w in zip(self.model_hl, self.w_model_hl):
h = predict_batch(X, m, batch_size=batch_size)
h_recon = predict_batch(X_recon, m, batch_size=batch_size)
adv_score += w * kld(h, h_recon).numpy()
if return_predictions:
return adv_score, y, y_recon
else:
return adv_score
def predict(self, X: np.ndarray, batch_size: int = int(1e10), return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[str, np.ndarray]]:
"""
Predict whether instances are adversarial instances or not.
Parameters
----------
X
Batch of instances.
batch_size
Batch size used when computing scores.
return_instance_score
Whether to return instance level adversarial scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the adversarial predictions and instance level adversarial scores.
"""
adv_score = self.score(X, batch_size=batch_size)
# values above threshold are adversarial
adv_pred = (adv_score > self.threshold).astype(int) # type: ignore
# populate output dict
ad = adversarial_prediction_dict()
ad['meta'] = self.meta
ad['data']['is_adversarial'] = adv_pred
if return_instance_score:
ad['data']['instance_score'] = adv_score
return ad
def correct(self, X: np.ndarray, batch_size: int = int(1e10),
return_instance_score: bool = True, return_all_predictions: bool = True) \
-> Dict[Dict[str, str], Dict[str, np.ndarray]]:
"""
Correct adversarial instances if the adversarial score is above the threshold.
Parameters
----------
X
Batch of instances.
batch_size
Batch size used when computing scores.
return_instance_score
Whether to return instance level adversarial scores.
return_all_predictions
Whether to return the predictions on the original and the reconstructed data.
Returns
-------
Dict with corrected predictions and information whether an instance is adversarial or not.
"""
adv_score, y, y_recon = self.score(X, batch_size=batch_size, return_predictions=True)
# values above threshold are adversarial
adv_pred = (adv_score > self.threshold).astype(int)
idx_adv = np.where(adv_pred == 1)[0]
# correct predictions on adversarial instances
y = y.argmax(axis=-1)
y_recon = y_recon.argmax(axis=-1)
y_correct = y.copy()
y_correct[idx_adv] = y_recon[idx_adv]
# populate output dict
ad = adversarial_correction_dict()
ad['meta'] = self.meta
ad['data']['is_adversarial'] = adv_pred
if return_instance_score:
ad['data']['instance_score'] = adv_score
ad['data']['corrected'] = y_correct
if return_all_predictions:
ad['data']['no_defense'] = y
ad['data']['defense'] = y_recon
return ad
| 13,401 | 36.858757 | 114 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/ad/__init__.py
|
from alibi_detect.utils.missing_optional_dependency import import_optional
AdversarialAE = import_optional('alibi_detect.ad.adversarialae', names=['AdversarialAE'])
ModelDistillation = import_optional('alibi_detect.ad.model_distillation', names=['ModelDistillation'])
__all__ = [
"AdversarialAE",
"ModelDistillation"
]
| 329 | 32 | 102 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/ad/tests/test_admd.py
|
from itertools import product
import numpy as np
import pytest
from sklearn.datasets import load_iris
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from alibi_detect.ad import ModelDistillation
from alibi_detect.version import __version__
threshold = [None, 5.]
loss_type = ['kld', 'xent']
threshold_perc = [90.]
return_instance_score = [True, False]
tests = list(product(threshold, loss_type, threshold_perc, return_instance_score))
n_tests = len(tests)
# load iris data
X, y = load_iris(return_X_y=True)
X = X.astype(np.float32)
y = to_categorical(y)
input_dim = X.shape[1]
latent_dim = 2
# define and train model
inputs = tf.keras.Input(shape=(input_dim,))
outputs = tf.keras.layers.Dense(y.shape[1], activation=tf.nn.softmax)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer='adam', loss='categorical_crossentropy')
model.fit(X, y, batch_size=150, epochs=10)
@pytest.fixture
def adv_md_params(request):
return tests[request.param]
@pytest.mark.parametrize('adv_md_params', list(range(n_tests)), indirect=True)
def test_adv_md(adv_md_params):
# ModelDistillation parameters
threshold, loss_type, threshold_perc, return_instance_score = adv_md_params
# define ancillary model
layers = [tf.keras.layers.InputLayer(input_shape=(input_dim)),
tf.keras.layers.Dense(y.shape[1], activation=tf.nn.softmax)]
distilled_model = tf.keras.Sequential(layers)
# init ModelDistillation detector
admd = ModelDistillation(
threshold=threshold,
model=model,
distilled_model=distilled_model,
loss_type=loss_type
)
assert admd.threshold == threshold
assert admd.meta == {'name': 'ModelDistillation', 'detector_type': 'adversarial', 'data_type': None,
'online': False, 'version': __version__}
for layer in admd.model.layers:
assert not layer.trainable
# fit AdversarialVAE, infer threshold and compute scores
admd.fit(X, epochs=5, verbose=False)
admd.infer_threshold(X, threshold_perc=threshold_perc)
iscore = admd.score(X)
perc_score = 100 * (iscore < admd.threshold).astype(int).sum() / iscore.shape[0]
assert threshold_perc + 1 > perc_score > threshold_perc - 1
# make and check predictions
ad_preds = admd.predict(X, return_instance_score=return_instance_score)
assert ad_preds['meta'] == admd.meta
if return_instance_score:
assert ad_preds['data']['is_adversarial'].sum() == (ad_preds['data']['instance_score']
> admd.threshold).astype(int).sum()
else:
assert ad_preds['data']['instance_score'] is None
| 2,717 | 33.405063 | 104 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/ad/tests/test_adae.py
|
from itertools import product
import numpy as np
import pytest
from sklearn.datasets import load_iris
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from tensorflow.keras.utils import to_categorical
from alibi_detect.ad import AdversarialAE
from alibi_detect.version import __version__
threshold = [None, 5.]
w_model = [1., .5]
w_recon = [0., 1e-5]
threshold_perc = [90.]
return_instance_score = [True, False]
tests = list(product(threshold, w_model, w_recon, threshold_perc, return_instance_score))
n_tests = len(tests)
# load iris data
X, y = load_iris(return_X_y=True)
X = X.astype(np.float32)
y = to_categorical(y)
input_dim = X.shape[1]
latent_dim = 2
# define and train model
inputs = tf.keras.Input(shape=(input_dim,))
outputs = tf.keras.layers.Dense(y.shape[1], activation=tf.nn.softmax)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer='adam', loss='categorical_crossentropy')
model.fit(X, y, batch_size=150, epochs=10)
@pytest.fixture
def adv_ae_params(request):
return tests[request.param]
@pytest.mark.parametrize('adv_ae_params', list(range(n_tests)), indirect=True)
def test_adv_vae(adv_ae_params):
# AdversarialAE parameters
threshold, w_model, w_recon, threshold_perc, return_instance_score = adv_ae_params
# define encoder and decoder
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(input_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(latent_dim, activation=None)
]
)
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(input_dim, activation=tf.nn.sigmoid)
]
)
# init OutlierVAE
advae = AdversarialAE(
threshold=threshold,
model=model,
encoder_net=encoder_net,
decoder_net=decoder_net
)
assert advae.threshold == threshold
assert advae.meta == {'name': 'AdversarialAE', 'detector_type': 'adversarial', 'data_type': None,
'online': False, 'version': __version__}
for layer in advae.model.layers:
assert not layer.trainable
# fit AdversarialVAE, infer threshold and compute scores
advae.fit(X, w_model=w_model, w_recon=w_recon, epochs=5, verbose=False)
advae.infer_threshold(X, threshold_perc=threshold_perc)
iscore = advae.score(X)
perc_score = 100 * (iscore < advae.threshold).astype(int).sum() / iscore.shape[0]
assert threshold_perc + 1 > perc_score > threshold_perc - 1
# make and check predictions
ad_preds = advae.predict(X, return_instance_score=return_instance_score)
assert ad_preds['meta'] == advae.meta
if return_instance_score:
assert ad_preds['data']['is_adversarial'].sum() == (ad_preds['data']['instance_score']
> advae.threshold).astype(int).sum()
else:
assert ad_preds['data']['instance_score'] is None
| 3,044 | 31.741935 | 101 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.