repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
text-classification-cnn-rnn
|
text-classification-cnn-rnn-master/helper/__init__.py
| 0 | 0 | 0 |
py
|
|
text-classification-cnn-rnn
|
text-classification-cnn-rnn-master/data/cnews_loader.py
|
# coding: utf-8
import sys
from collections import Counter
import numpy as np
import tensorflow.keras as kr
if sys.version_info[0] > 2:
is_py3 = True
else:
reload(sys)
sys.setdefaultencoding("utf-8")
is_py3 = False
def native_word(word, encoding='utf-8'):
"""如果在python2下面使用python3训练的模型,可考虑调用此函数转化一下字符编码"""
if not is_py3:
return word.encode(encoding)
else:
return word
def native_content(content):
if not is_py3:
return content.decode('utf-8')
else:
return content
def open_file(filename, mode='r'):
"""
常用文件操作,可在python2和python3间切换.
mode: 'r' or 'w' for read or write
"""
if is_py3:
return open(filename, mode, encoding='utf-8', errors='ignore')
else:
return open(filename, mode)
def read_file(filename):
"""读取文件数据"""
contents, labels = [], []
with open_file(filename) as f:
for line in f:
try:
label, content = line.strip().split('\t')
if content:
contents.append(list(native_content(content)))
labels.append(native_content(label))
except:
pass
return contents, labels
def build_vocab(train_dir, vocab_dir, vocab_size=5000):
"""根据训练集构建词汇表,存储"""
data_train, _ = read_file(train_dir)
all_data = []
for content in data_train:
all_data.extend(content)
counter = Counter(all_data)
count_pairs = counter.most_common(vocab_size - 1)
words, _ = list(zip(*count_pairs))
# 添加一个 <PAD> 来将所有文本pad为同一长度
words = ['<PAD>'] + list(words)
open_file(vocab_dir, mode='w').write('\n'.join(words) + '\n')
def read_vocab(vocab_dir):
"""读取词汇表"""
# words = open_file(vocab_dir).read().strip().split('\n')
with open_file(vocab_dir) as fp:
# 如果是py2 则每个值都转化为unicode
words = [native_content(_.strip()) for _ in fp.readlines()]
word_to_id = dict(zip(words, range(len(words))))
return words, word_to_id
def read_category():
"""读取分类目录,固定"""
categories = ['体育', '财经', '房产', '家居', '教育', '科技', '时尚', '时政', '游戏', '娱乐']
categories = [native_content(x) for x in categories]
cat_to_id = dict(zip(categories, range(len(categories))))
return categories, cat_to_id
def to_words(content, words):
"""将id表示的内容转换为文字"""
return ''.join(words[x] for x in content)
def process_file(filename, word_to_id, cat_to_id, max_length=600):
"""将文件转换为id表示"""
contents, labels = read_file(filename)
data_id, label_id = [], []
for i in range(len(contents)):
data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id])
label_id.append(cat_to_id[labels[i]])
# 使用keras提供的pad_sequences来将文本pad为固定长度
x_pad = kr.preprocessing.sequence.pad_sequences(data_id, max_length)
y_pad = kr.utils.to_categorical(label_id, num_classes=len(cat_to_id)) # 将标签转换为one-hot表示
return x_pad, y_pad
def batch_iter(x, y, batch_size=64):
"""生成批次数据"""
data_len = len(x)
num_batch = int((data_len - 1) / batch_size) + 1
indices = np.random.permutation(np.arange(data_len))
x_shuffle = x[indices]
y_shuffle = y[indices]
for i in range(num_batch):
start_id = i * batch_size
end_id = min((i + 1) * batch_size, data_len)
yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]
| 3,386 | 25.255814 | 92 |
py
|
text-classification-cnn-rnn
|
text-classification-cnn-rnn-master/data/__init__.py
| 0 | 0 | 0 |
py
|
|
graph-ismorphism
|
graph-ismorphism-master/assets/time-complexity.py
|
"""
alganal.py
Description:
A utility program to plot algorithmic time complexity of a function.
Author: Mahesh Venkitachalam
Website: electronut.in
"""
from matplotlib import pyplot
import numpy as np
import timeit
from functools import partial
import random
def fconst(N):
"""
O(1) function
"""
x = 1
def flinear(N):
"""
O(n) function
"""
x = [i for i in range(N)]
def fsquare(N):
"""
O(n^2) function
"""
for i in range(N):
for j in range(N):
x = i*j
def fshuffle(N):
# O(N)
random.shuffle(list(range(N)))
def fsort(N):
x = list(range(N))
random.shuffle(x)
x.sort()
def plotTC(fn, nMin, nMax, nInc, nTests):
"""
Run timer and plot time complexity
"""
x = []
y = []
for i in range(nMin, nMax, nInc):
N = i
testNTimer = timeit.Timer(partial(fn, N))
t = testNTimer.timeit(number=nTests)
x.append(i)
y.append(t)
p1 = pyplot.plot(x, y, 'o')
#pyplot.legend([p1,], [fn.__name__, ])
# main() function
def main():
print('Analyzing Algorithms...')
plotTC(fconst, 10, 1000, 10, 10)
plotTC(flinear, 10, 1000, 10, 10)
plotTC(fsquare, 10, 1000, 10, 10)
#plotTC(fshuffle, 10, 1000, 1000, 10)
plotTC(fsort, 10, 1000, 10, 10)
# enable this in case you want to set y axis limits
#pyplot.ylim((-0.1, 0.5))
# show plot
pyplot.show()
# call main
if __name__ == '__main__':
main()
| 1,480 | 17.283951 | 68 |
py
|
graph-ismorphism
|
graph-ismorphism-master/program/main.py
|
#! /usr/bin/python2.7
"""
Main logic
"""
from tester import Tester
if __name__ == "__main__":
"""
Command line handling
"""
tester = Tester()
# tester.test_1() # Search
# tester.test_2() # Search
# tester.test_3() # Search
# tester.test_4() # Search
# tester.test_5() # Run Sat Solver
# tester.test_6() # Convert systems to graphs and time them
# tester.test_7() # Run Traces
# tester.test_8() # Recursive search
# tester.test_9() # Search
# tester.test_10() # Search
# tester.test_11() # Search
# tester.test_12() # Search
# tester.test_13() # Search
# tester.test_14() # Search
# tester.test_15() # Search
# tester.test_16() # Search
# tester.test_17() # Default
# tester.test_18() # Run graphs and plot
# tester.test_19() # Run graphs
tester.test_20() # Plot graphs
| 869 | 25.363636 | 64 |
py
|
graph-ismorphism
|
graph-ismorphism-master/program/gi.py
|
#! /usr/bin/python2.7
"""
Logic for generating and timing graphs using Traces package
"""
import datetime
import re
import signal
from handlers.exceptionhandler import signal_handler, TimeoutError
from handlers.filehandler import FileHandler
from handlers.processhandler import ProcessHandler
import networkx as nx
import numpy as np
import math
import pprint
class Gi(object):
def run_all_graphs(self, **kwargs):
"""
Run all graphs on dreadnaut
:param kwargs:
:return:
"""
graphs = self.load_graphs()
results = self.run_graphs(graphs, **kwargs)
return results
def run_graphs(self, graphs, **kwargs):
"""
Run a set of graphs
:param graphs:
:param kwargs:
:return:
"""
results = {}
for graph in graphs:
# Checking if it is a dimacs graph
# If so, run all other programs
# Else run traces
if "dimacs" in graph:
results[graph + "_bliss"] = self.run_graph(graphs[graph], graph, "bliss", **kwargs)
results[graph + "_conauto"] = self.run_graph(graphs[graph], graph, "conauto", **kwargs)
else:
results[graph + "_nauty"] = self.run_graph(graphs[graph], graph, "nauty", **kwargs)
results[graph] = self.run_graph(graphs[graph], graph, "traces", **kwargs)
return results
def run_graph(self, graphs, graph, program, **kwargs):
"""
Run instances in a graph
:param graph:
:param kwargs:
:return:
"""
fh = FileHandler()
ph = ProcessHandler()
run = ph.run_command("ls -v ./../assets/graphs_run/")
save = kwargs.get("save", False)
outstanding = kwargs.get("outstanding", False)
graph_name = self.get_filename(graph, program)
# Load previous results
if outstanding and graph_name in run:
graph_results = fh.read_from_file("./../assets/graphs_run/{0}".format(graph_name))
else:
graph_results = []
# Gather results
for graph_instance in graphs:
print "{0} {1}".format(graph_instance, datetime.datetime.now())
# Skip existing graph
if outstanding and graph_name in run:
if any(d['name'] == graph_instance for d in graph_results):
continue
kwargs["program"] = program
result = self.run_graph_instance(graph, graph_instance, **kwargs)
graph_results.append(result)
# Save
if save:
print "Saving..."
fh.write_to_file("./../assets/graphs_run/" + graph_name, graph_results)
return graph_results
def run_graph_instance(self, graph, graph_instance, **kwargs):
"""
Run a specific instance on dreadnaut
dreadnaut At -a V=0 -m <"[path]" x q
:param graph:
:param graph_instance:
:param kwargs:
:return:
"""
# Init
ph = ProcessHandler()
path = "./../assets/graphs/" + graph + "/" + graph_instance
is_dimacs = self.is_dimacs(graph)
if is_dimacs:
process = False
nodes = ph.run_command("head '" + path + "'")[0].split(" ")[2]
program = kwargs.get("program", "bliss")
else:
nodes = re.search("(n=?)=\d+", ' '.join(ph.run_command("head '" + path + "'"))).group(0)[2:]
process = ph.open_process("dreadnaut")
program = kwargs.get("program", "traces")
command = self.get_command(program, path)
# Set timeout (seconds)
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(kwargs.get("timeout", kwargs.get("timeout", 0)))
# Gather results
try:
time, d_time = self.run_program(process, command, program)
except TimeoutError:
print "Timed out: Took too long to validate"
time = -1
d_time = -1
if not is_dimacs:
process.kill()
process.terminate()
finally:
signal.alarm(0)
return {
"name": graph_instance,
"nodes": nodes,
"time": time,
"d_time": d_time
}
def run_bliss(self, command):
ph = ProcessHandler()
time, out = ph.run_command_timed(command)
out_time_string = out[len(out) - 1]
time_string_split = out_time_string.split("\t")
d_time = time_string_split[1].split(" ")[0]
return time, d_time
def run_conauto(self, command):
ph = ProcessHandler()
time, out = ph.run_command_timed(command)
out_time_string = out[len(out) - 1]
time_string_split = out_time_string.split(" ")
d_time = time_string_split[1]
return time, d_time
def run_nauty_traces(self, process, command):
ph = ProcessHandler()
time, (stdout, stderr) = ph.run_function_timed(process.communicate,
(command,),
return_args=True)
split = re.search('(time=?) = \d+.\d+\d+', stdout)
if split:
d_time = split.group(0)[7:]
else:
time = -1
d_time = -1
return time, d_time
def load_graphs(self):
"""
Load graph instances from package
:return:
"""
ph = ProcessHandler()
graphs = {}
for graph in ph.run_command('ls -v ./../assets/graphs/'):
graphs[graph] = []
for graph_instance in ph.run_command('ls -v ./../assets/graphs/' + graph):
graphs[graph].append(graph_instance)
return graphs
def load_results(self):
"""
Load results from tests
:return:
"""
ph = ProcessHandler()
fh = FileHandler()
results = {}
run = ph.run_command("ls -v ./../assets/graphs_run/")
for graph_run in run:
results[graph_run[:-4]] = fh.read_from_file("./../assets/graphs_run/" + graph_run)
return results
def generate_random_graphs(self):
"""
Extend random graphs by generating larger versions provided in the Traces package.
Random graphs are defined with edge probability 1/2, 1/10 and sqrt(n)
:return:
"""
ph = ProcessHandler()
instances = [5, 10, 15, 20, 25, 30, 40, 50, 60, 70, 80, 100, 200, 300,
400, 500, 600, 700, 800, 900, 1000, 2000, 3000, 4000, 5000,
10000, 20000, 30000]
probabilities = ["1/2", "1/10", "sqrt"]
names = ["ran2_custom", "ran10_custom", "ransq_custom"]
for p, n in zip(probabilities, names):
print p
for i in instances:
dest = "./../assets/graphs_custom/{0}/{1}".format(n, i)
print dest
p = "1/" + str(int(math.ceil(math.sqrt(float(i)))))
print "./../assets/nauty26r7/genrang -P{0} {1} 1 {2}.g6".format(p, i, dest)
ph.run_command("./../assets/nauty26r7/genrang -P{0} {1} 1 {2}.g6".format(p, i, dest))
ph.run_command("./../assets/nauty26r7/showg -d {0}.g6 {1}.dre".format(dest, dest))
# ph.run_command("rm ./../assets/{0}.g6".format(dest))
def convert_graph_to_traces(self, n, m, G, type, dir):
"""
Convert a given networkx graph into dreadnaut format
:param n:
:param m:
:param G:
:return:
"""
if type == "B":
nodes = (2 * n) + (4 * m)
variables = (2 * n)
else:
nodes = n + m
variables = n
# Init
fh = FileHandler()
path = dir + "{0}_{1}_{2}.dre".format(n, m, type)
path_temp = "./../assets/temp/temp.adjlist"
# Convert to Adjlist and store temporarily
nx.write_adjlist(G, path_temp)
# Read data and convert
data = fh.read_from_file_simple(path_temp)
output = ["n={0} $=0 g".format(nodes)]
for i in range(3, variables + 3):
datum = data[i].split()
datum[0] = "{0}:".format(datum[0])
output.append(" ".join(datum))
output[-1] = "{}.".format(output[-1])
output.append("$$")
# Save as .dre
fh.write_to_file_simple(path, output)
# Convert to dot if necessary
# ./nauty26r7/dretodot construction/3.dre construction/3.dot
return path
def graph_has_automorphisms(self, path):
"""
Check if a given graph has any automorphisms
:param path:
:return: True of if has, False otherwise
"""
ph = ProcessHandler()
process = ph.open_process("dreadnaut")
time, (stdout, stderr) = ph.run_function_timed(process.communicate,
('At -a V=0 -m <"' + path + '" x q',),
return_args=True)
return False if stdout.split(";")[2].split()[0] == '0' else True
def is_graph_slower(self, path_a, path_b):
"""
Determine if a graph at a saved location runs slower than another
:param path_a:
:param path_b:
:return:
"""
ph = ProcessHandler()
process = ph.open_process("dreadnaut")
time_a, (stdout, stderr) = ph.run_function_timed(process.communicate,
('At -a V=0 -m <"' + path_a + '" x q',),
return_args=True)
time_b, (stdout, stderr) = ph.run_function_timed(process.communicate,
('At -a V=0 -m <"' + path_b + '" x q',),
return_args=True)
return time_a > time_b
def get_command(self, program, path):
if program == "bliss":
return "./../assets/programs/bliss-0.73/bliss " + path
elif program == "traces":
return 'At -a V=0 -m <"' + path + '" x q'
elif program == "nauty":
return 'As -a V=0 -m <"' + path + '" x q'
elif program == "conauto":
return "./../assets/programs/conauto-2.03/bin/conauto-2.03 -aut -dv " + path
def is_dimacs(self, graph):
return "dimacs" in graph
def run_program(self, process, command, program):
time = -1
d_time = -1
if program == "bliss":
time, d_time = self.run_bliss(command)
elif program == "traces" or program == "nauty":
time, d_time = self.run_nauty_traces(process, command)
elif program == "conauto":
time, d_time = self.run_conauto(command)
return time, d_time
def get_filename(self, graph, program):
if program != "traces":
return graph + "_" + program + ".txt"
return graph + ".txt"
def pp_helper(self, data):
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(data)
exit()
| 11,302 | 33.045181 | 104 |
py
|
graph-ismorphism
|
graph-ismorphism-master/program/tester.py
|
#! /usr/bin/python2.7
"""
Main logic
"""
from sat import Sat
from gi import Gi
from main_logic import Main
from handlers.filehandler import FileHandler
from handlers.plothandler import PlotHandler
from handlers.processhandler import ProcessHandler
# Tests
class Tester(object):
def test_1(self):
"""
Search 1: 1 - 100
:return:
"""
main = Main()
main.generate_systems(n=4,
min_m=4,
max_n=100,
max_m=100,
step=1,
save_results=False,
save_systems=True,
limit=False,
max_tries=30)
def test_2(self):
"""
Search 2: 100 - 1000
+ 10
:return:
"""
main = Main()
main.generate_systems(n=100,
min_m=100,
max_n=1000,
max_m=1000,
step=10,
save_results=False,
save_systems=True,
limit=False,
max_tries=10)
def test_3(self):
"""
Search 3: 1000 - 10000
+ 100
:return:
"""
main = Main()
main.generate_systems(n=1000,
min_m=1000,
max_n=10000,
max_m=10000,
step=100,
save_results=True,
save_systems=True,
limit=10,
efficient_search=True,
max_tries=10)
def test_4(self):
"""
Search 4: 10000 - 50000
+ 1000
:return:
"""
main = Main()
main.generate_systems(n=10000,
min_m=10000,
max_n=50000,
max_m=50000,
step=1000,
save_results=False,
save_systems=True,
limit=5,
max_tries=5)
def test_5(self):
"""
Run sat solver through saved instances
:return:
"""
main = Main()
main.execute_sat_solver()
main.plot_sat_solver_results()
def test_6(self):
"""
Convert saved instances to graphs and run
:return:
"""
main = Main()
main.convert_systems_to_constructions(validate=True, delete=False)
main.execute_constructions()
# main.plot_constructions_results()
def test_7(self):
"""
Run saved .dre graphs through Traces and time results
:return:
"""
main = Main()
main.execute_graphs(outstanding=True,
timeout=10800,
save=True)
# main.plot_graphs_results(save=True)
def test_8(self):
"""
Use recursion to find instances for a given n and m rather than randomly searching
:return:
"""
sat = Sat()
x = sat.find_equations(5, 6)
for i in x:
print x.count(i), sat.is_system_uniquely_satisfiable(i, 5)
for j in x:
if i == j:
continue
def test_9(self):
"""
Construction search
Generate systems that also fulfil construction criteria
- k consistent
- No automorphisms
Slow
Systems are saved to construction_search
:return:
"""
main = Main()
main.generate_systems(n=4,
min_m=4,
max_n=100,
max_m=100,
step=10,
save_results=True,
save_systems=True,
gi=Gi())
def test_10(self):
"""
Threshold search 1: 1000 - 10000
:return:
"""
main = Main()
main.generate_systems(n=1000,
min_m=1000,
max_n=10000,
max_m=10000,
step=100,
save_results=True,
save_systems=True,
limit=10)
main.plot_generate_systems_results('./../assets/sat_run/0-n-10000_0-m-10000_step-100/results', aggregate=True)
def test_11(self):
"""
Construction search 2
Generate systems that also fulfil construction criteria
- k consistent
- No automorphisms
Slow
Systems are saved to construction_search
:return:
"""
main = Main()
main.generate_systems(n=1000,
min_m=1000,
max_n=10000,
max_m=20000,
step=1000,
save_results=True,
save_systems=True,
upper_bound=2,
lower_bound=1,
max_tries=30,
gi=Gi())
def test_12(self):
"""
Update strongly k directory
:return:
"""
main = Main()
main.generate_systems(n=10,
min_m=10,
max_n=100,
max_m=200,
step=10,
save_results=True,
save_systems=True,
upper_bound=3,
lower_bound=1,
max_tries=30,
update_strongly_k=True,
gi=Gi())
def test_13(self):
"""
Find strongly k
4 - 10
10 - 100
100 - 1000
1000 - 10000
:return:
"""
main = Main()
for i in range(0, 30):
main.generate_systems(n=10,
min_m=10,
max_n=100,
max_m=300,
step=10,
save_results=True,
save_systems=True,
upper_bound=3,
lower_bound=1,
max_tries=30,
update_strongly_k=True,
gi=Gi())
def test_14(self):
"""
Recursive search
Warning - may crash
:return:
"""
sat = Sat()
n = 10
m = 10
clauses = sat.find_clauses(n)
systems = sat.find_systems(clauses, [], n, m, 0, find_one=True)
return systems
def test_15(self):
"""
Demonstrating difficulty in finding n = m
:return:
"""
main = Main()
main.generate_systems(n=100,
min_m=100,
max_n=200,
max_m=200,
step=1,
save_results=True,
save_systems=True,
upper_bound=1,
lower_bound=1,
max_tries=10000,
update_strongly_k=True,
gi=Gi())
def test_16(self):
"""
Demonstrating difficulty in finding 2n = m
:return:
"""
main = Main()
main.generate_systems(n=10,
min_m=10,
max_n=100,
max_m=100,
step=10,
save_results=True,
save_systems=True,
upper_bound=1,
lower_bound=1,
max_tries=1000,
update_strongly_k=True,
gi=Gi())
def test_17(self):
fh = FileHandler()
data = fh.read_from_file("../assets/results/sat/0-n-10000_0-m-10000_step-100/results")
for d in data:
print "{0} {1} a".format(d[1], d[2])
i = [10,20,30,40,50,60,70,80,90,100, 110, 120]
gi = Gi()
for j in i:
a = gi.run_graph_instance("ga_vs_gb", "{0}_{1}_A.dre".format(j,j))
b = gi.run_graph_instance("ga_vs_gb", "{0}_{1}_B.dre".format(j,j))
print "{0} {1} a".format(j,a["time"],1)
print "{0} {1} b".format(j,b["time"],1)
def test_18(self):
main = Main()
main.execute_graphs(outstanding=True,
timeout=10800,
save=True)
main.plot_graphs_results(save=True)
def test_19(self):
main = Main()
main.execute_graphs(timeout=5400, save=True, useAll=True)
def test_20(self):
main = Main()
main.plot_graphs_results(save=True, time="d_time")
| 9,480 | 27.905488 | 118 |
py
|
graph-ismorphism
|
graph-ismorphism-master/program/main_logic.py
|
#! /usr/bin/python2.7
"""
Main logic
"""
from sat import Sat
from gi import Gi
from handlers.filehandler import FileHandler
from handlers.plothandler import PlotHandler
from handlers.processhandler import ProcessHandler
class Main(object):
def generate_systems(self, **kwargs):
"""
Generate instances
:param kwargs:
:return:
"""
sat = Sat()
results, systems = sat.generate_systems(**kwargs)
return results, systems
def plot_generate_systems_results(self, filename, **kwargs):
"""
Plot time taken to generate instances
:param filename:
:param kwargs:
:return:
"""
ph = PlotHandler()
fh = FileHandler()
data = fh.read_from_file(filename, **kwargs)
ph.plot_sat_results(data)
def execute_graphs(self, **kwargs):
"""
Run graphs through Traces
:param kwargs:
:return:
"""
gi = Gi()
# kwargs["graphs"] = {
# "tnn": gi.load_graphs()["tnn"]
# }
if kwargs.get("graphs"):
graphs = kwargs.get("graphs")
kwargs.pop('graphs', None)
gi.run_graphs(graphs, **kwargs)
else:
gi.run_all_graphs(**kwargs)
def plot_graphs_results(self, **kwargs):
"""
Plot time taken to run graphs through Traces
:param kwargs:
:return:
"""
ph = PlotHandler()
gi = Gi()
results = gi.load_results()
ph.plot_graphs_results(results, **kwargs)
ph.plot_graphs_comparison(results)
def execute_sat_solver(self):
"""
Time execution time of running instances through Sat Solver
:return:
"""
sat = Sat()
sat.run_solver()
def plot_sat_solver_results(self):
"""
Plot execution time of running instances through Sat Solver
:return:
"""
sat = Sat()
ph = PlotHandler()
results = sat.load_results()
ph.plot_k_consistency_check(results)
def convert_systems_to_constructions(self, **kwargs):
"""
Convert found systems into graphs and run them through Traces
:return:
"""
# Init
gi = Gi()
sat = Sat()
ph = ProcessHandler()
fh = FileHandler()
paths = ph.run_command("ls -v ./../assets/systems_to_convert/")
validate = kwargs.get("validate", False)
delete = kwargs.get("delete", False)
# Iterate systems
for path in paths:
print "Checking " + path
# Paths
graph_path = "./../assets/construction/" + path + "_A.dre"
system_path = "./../assets/systems_to_convert/" + path
# Extract n and m values
n, m = path.split("_")
n = int(n)
m = int(m)
# Load system
system = fh.read_from_file(system_path)
if validate:
# Check for k-local consistency
if not sat.is_k_consistent(n, m, system):
print "\t Not K consistent system. Removing and skipping."
if delete:
fh.delete_file(system_path)
continue
else:
print "\t K consistent system. Constructing A."
# Convert system into graphs and check for automorphisms
G = sat.convert_system_to_graph(n, m, system)
gi.convert_graph_to_traces(n, m, G, "A", "./../assets/construction/") # First construction
if not gi.graph_has_automorphisms(graph_path):
print "\t No Automorphisms. Constructing B."
G = sat.convert_system_to_construction(n, m, system)
gi.convert_graph_to_traces(n, m, G, "B", "./../assets/construction/") # Second construction
if delete:
fh.delete_file(graph_path)
else:
print "\t Automorphisms. Removing and skipping."
if delete:
fh.delete_file(graph_path) # Remove unwanted graph
fh.delete_file(system_path) # Remove unwanted system
else:
G = sat.convert_system_to_construction(n, m, system)
gi.convert_graph_to_traces(n, m, G, "B", "./../assets/construction/")
def execute_constructions(self):
"""
Run new constructions through Traces
:return:
"""
gi = Gi()
fh = FileHandler()
graphs = {
"con_all": gi.load_graphs()["con_all"]
}
results = gi.run_graphs(graphs, save=True, timeout=7200)
# Init
con_4_10 = []
con_10_100 = []
con_100_1000 = []
con_n = []
con_2n = []
con_3n = []
con_sml = []
found = []
# Extract packages
for result in results["con_all"]:
n = int(result["name"].split("_")[0])
m = int(result["name"].split("_")[1])
if n in range(4, 10, 1):
con_4_10.append(result)
if n in range(10, 100, 10):
con_10_100.append(result)
if n in range(100, 1000, 100):
con_100_1000.append(result)
if n == m:
con_n.append(result)
if 2 * n == m:
con_2n.append(result)
if 3 * n == m:
con_3n.append(result)
# Extract smallest n : m ratio
for i in results["con_all"]:
n = int(i["name"].split("_")[0])
m = int(i["name"].split("_")[1])
if n in found:
continue
for j in results["con_all"]:
n_1 = int(j["name"].split("_")[0])
m_1 = int(j["name"].split("_")[1])
if n == n_1 and m_1 <= m:
con_sml.append(j)
found.append(n)
# Produce packages
packages = {
"con_4_10": con_4_10,
"con_10_100": con_10_100,
"con_100_1000": con_100_1000,
"con_n": con_n,
"con_2n": con_2n,
"con_3n": con_3n,
"con_sml": con_sml
}
# Save packages
for package in packages:
fh.write_to_file("./../assets/graphs_run/{0}.txt".format(package), packages[package])
fh.makedir("./../assets/graphs/{0}".format(package))
for instance in packages[package]:
name = instance["name"]
fh.copy_file("./../assets/graphs/con_all/{0}".format(name),
"./../assets/graphs/{0}/{1}".format(package, name))
return results
def plot_constructions_results(self):
ph = PlotHandler()
gi = Gi()
r = gi.load_results()
results = {
"con_4_10": r["con_4_10"],
"con_10_100": r["con_10_100"],
"con_100_1000": r["con_100_1000"],
"con_n": r["con_n"],
"con_2n": r["con_2n"],
"con_3n": r["con_3n"],
"con_sml": r["con_sml"]
}
ph.plot_graphs_results(results)
ph.plot_construction_results(results)
| 7,368 | 30.762931 | 112 |
py
|
graph-ismorphism
|
graph-ismorphism-master/program/__init__.py
| 0 | 0 | 0 |
py
|
|
graph-ismorphism
|
graph-ismorphism-master/program/sat.py
|
#! /usr/bin/python2.7
"""
Logic for producing uniquely satisfiable instances using Cryptominisat package
"""
import random
from pycryptosat import Solver
from handlers.filehandler import FileHandler
from handlers.processhandler import ProcessHandler
import timeit
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import itertools
class Sat(object):
def generate_systems(self, **kwargs):
"""
Generate instances by searching through combinations of n and m
Save these results as files
:param kwargs:
:return: Results of time taken to search
"""
# Init
fh = FileHandler()
ph = ProcessHandler()
all_results = [['key', 'n', 'm', 'tries', 'vTime']]
all_systems = []
# Looping params
step = kwargs.get("step", 1)
max_tries = kwargs.get("max_tries", 30)
min_m = kwargs.get("min_m", 4)
n = kwargs.get("n", 4)
max_n = kwargs.get("max_n", 100)
max_m = kwargs.get("max_m", 100)
# Complex looping params
efficient_search = kwargs.get("limited_search", False)
limit = kwargs.get("limit", False)
upper_bound = kwargs.get("upper_bound", 4)
lower_bound = kwargs.get("lower_bound", 1)
# Additional params
save_results_dir = "./../assets/sat_run/{0}-n-{1}_{2}-m-{3}_step-{4}".format(n, max_n, min_m, max_m, step)
save_results = kwargs.get("save_results", False)
save_systems = kwargs.get("save_systems", False)
gi = kwargs.get("gi", False)
update_strongly_k = kwargs.get("update_strongly_k", False)
# Prep results folder
if save_results or save_systems or gi:
save_results_dir = fh.makedir(save_results_dir)
save_results_location = save_results_dir + "/results"
save_systems_location = save_results_dir + "/systems/"
save_constructions_location = save_results_dir + "/constructions/"
fh.makedir(save_systems_location)
fh.makedir(save_constructions_location)
# Loop n value
while n <= max_n:
# Init loop
tries = 0
found = 0
smallest_m_found = False
n_results = []
n_systems = []
if min_m < n:
# If m is smaller than n, then bring m up to speed
m = lower_bound * n
else:
m = min_m
# Loop m value
while m <= max_m:
# Handle Iterators
if max_tries == tries:
# Failed to find and tried too many times
print "Skipping: {0} {1}".format(n, m)
tries = 0
all_results.append([key, n, m, tries, -1, -1])
n_results.append([key, n, m, tries, -1, -1])
m += step
continue
elif m > (upper_bound * n) or (found and found == limit):
# Do not search for m > 4n or continue to next m if adequate systems are found
break
# Generate random system and record time taken to find
key = `n` + ':' + `m`
validation_start = timeit.default_timer()
generate_time, system = ph.run_function_timed(self.generate_rand_system, (n, m),
return_args=True)
# Validate system
if self.is_system_uniquely_satisfiable(system, n) \
and ((gi and self.is_system_eligble(n, m, system, gi, save_results_dir)) or not gi) \
and ((update_strongly_k and self.is_system_slower(n, m, system)) or not update_strongly_k):
# Found unique system
print "Found: {0} {1}".format(n, m)
# Record times
validation_time = timeit.default_timer() - validation_start
all_results.append([key, n, m, tries, validation_time, generate_time])
n_results.append([key, n, m, tries, validation_time, generate_time])
all_systems.append([key, n, m, system])
n_systems.append([key, n, m, system])
# Update iterators
tries = 0
found += 1
if efficient_search and not smallest_m_found:
# Update the lower bound
min_m = m - step
smallest_m_found = True
if update_strongly_k:
self.update_strongly_k(n, m, system)
else:
# Failed to find, try again
# print 'Couldnt find for {0} {1} Misses {2}'.format(n, m, tries)
tries += 1
m -= step
# Increment m
m += step
# Save search information
if save_results:
self.save_results(n_results, save_results_location)
if save_systems:
self.save_results_systems(n_systems, save_systems_location)
# Increment n
n += step
return all_results, all_systems
def generate_rand_system(self, n, m):
"""
Generates a random homogenous system
Try n times to pull a unique random set of three variables from the pool.
- I.e. don't pick the same clause twice.
:param n:
:param m:
:return: A 2d array of systems
"""
# Init
pool = range(1, n + 1)
system = []
tries = 3
i = 0
# Build an array of unique arrays as a system
while i < m:
clause = random.sample(pool, 3)
clause.sort()
if tries == 0:
return False
elif clause in system:
tries -= 1
else:
system.append(clause)
i += 1
return system
def generate_systems_fix_n(self):
"""
Generate a system forcing n to stay static and allow m to vary
Give up after n tries
:return: Null
"""
sat = Sat()
n = 50
max_m = 1000
tries = 10
for m in range(555, max_m):
system = sat.generate_rand_system(n, m)
if self.is_system_uniquely_satisfiable(system, n):
tries = 10
print 'Found: ' + `m`
else:
print 'Couldnt find for ' + `m`
tries -= 1
print tries
if tries == 0:
print 'Sequence of misses'
return
def generate_systems_fix_n_force(self):
"""
Generate a system forcing n to stay static and allowing m to vary
Don't give up, that is, keep trying until a system is found
:return: Null
"""
sat = Sat()
n = 50
m = 555
max_m = 1000
misses = 0
while m < max_m:
m += 1
system = sat.generate_rand_system(n, m)
if self.is_system_uniquely_satisfiable(system, n):
misses = 0
print "Found: ", m
# print eq
else:
misses += 1
m -= 1
print 'Couldnt find for ' + `m` + ' Misses ' + `misses`
def is_system_uniquely_satisfiable(self, system, n):
"""
Tests unique satisfiable by banning all zero solution
:param system:
:param n:
:return:
"""
if not system:
return False
# Prep solver
solver = Solver()
for clause in system:
solver.add_xor_clause(clause, False)
# Ban all zero
solver.add_clause(range(1, n + 1))
sat, sol = solver.solve()
# print "Found system is {0}".format(sat)
return not sat
def is_system_eligble(self, n, m, system, gi, location):
"""
Check if graph meets the construction criteria
- k consistent
- No automorphisms
:param n:
:param m:
:param gi:
:param system:
:return:
"""
# Init
ph = ProcessHandler()
fh = FileHandler()
graph_path = "{0}/constructions/".format(location)
system_path = "{0}/constructions/{1}_{2}".format(location, n, m)
construct_a_location = "{0}{1}_{2}_A.dre".format(graph_path, n, m)
# Save system temporarily
self.save_system(n, m, system, graph_path)
G = self.convert_system_to_graph(n, m, system)
gi.convert_graph_to_traces(n, m, G, "A", graph_path) # First construction
# Check for k-local consistency
if not self.is_k_consistent(n, m, system):
# print "Not K consistent"
fh.delete_file(system_path)
return False
elif not gi.graph_has_automorphisms(construct_a_location):
# print "No Automorphisms. Construct."
G = self.convert_system_to_construction(n, m, system)
gi.convert_graph_to_traces(n, m, G, "B", graph_path) # Second construction
fh.delete_file(system_path)
return True
else:
# print "Automorphisms. Remove."
fh.delete_file(construct_a_location) # Remove unwanted graph
fh.delete_file(system_path) # Remove unwanted system
return False
def is_system_slower(self, n, m, system):
"""
Determine if a given system is slower than the current slowest system stored in "systems_strongly_k"
:param n:
:param m:
:param system:
:return:
"""
fh = FileHandler()
path = "./../assets/systems_strongly_k/{0}_{1}".format(n, m)
temp_path_a = "./../assets/temp/temp_a"
temp_path_b = "./../assets/temp/temp_b"
system_old = fh.read_from_file(path)
if not system_old:
return True
fh.write_to_file_simple(temp_path_a, self.prepare_cryptominisat_system(n, m, system))
fh.write_to_file_simple(temp_path_b, self.prepare_cryptominisat_system(n, m, system_old))
diff_a = self.get_gauss_off_time(temp_path_a) - self.get_gauss_on_time(temp_path_a)
diff_b = self.get_gauss_off_time(temp_path_b) - self.get_gauss_on_time(temp_path_b)
if diff_a > diff_b:
print "Slower {0}".format(diff_a - diff_b)
pass
return diff_a > diff_b
def update_strongly_k(self, n, m, system):
"""
Update the folder of stored systems which are a catalogue of slow systems
:param n:
:param m:
:param system:
:return:
"""
ph = ProcessHandler()
fh = FileHandler()
path = "./../assets/systems_strongly_k/{0}_{1}".format(n, m)
fh.delete_file(path)
self.save_system(n, m, system, "./../assets/systems_strongly_k/")
def find_equations(self, n, m):
"""
Find unique systems by searching combinations, rather than picking at random.
That is, find systems of equations "systematically"
:param n:
:param m:
:return:
"""
clauses = self.find_clauses(n)
systems = self.find_systems(clauses, [], n, m, 0, find_one=False)
return systems
def find_clauses(self, n):
"""
Helper that finds all unique combinations of clauses
:param n:
:return: A 2d array of all combinations of
"""
pool = range(1, n + 1)
clauses = list(itertools.combinations(pool, 3))
return clauses
def find_systems(self, clauses, system, n, m, depth, **kwargs):
"""
Find all systems using recursive method
:param clauses:
:param system:
:param n:
:param m:
:param depth:
:return: A system of equations
"""
# Base Case
# If length of system = m, then we have long enough system
if len(system) == m:
for i in range(0, len(system) - 1):
if system[i] > system[i + 1]:
return False
# print system
sat = Sat()
if sat.is_system_uniquely_satisfiable(system, n):
return True
else:
return False
# Recursive Case
# Else, system is not long enough, we need to append to system
else:
systems = []
# For each clause not in the system, add to system
for clause in clauses:
# Remove this clause from the pool
tail = list(clauses)
tail.remove(clause)
# Add this clause to the current system and validate
system_temp = list(system)
system_temp.append(clause)
unique_system = self.find_systems(tail, system_temp, n, m, depth + 1)
# Check if it is a uniquely satisfiable instance or is it a return call
if isinstance(unique_system, bool) and unique_system:
systems.append(system_temp)
if kwargs.get("find_one", False):
return systems
elif unique_system:
systems = systems + unique_system
return systems
def run_solver(self, **kwargs):
"""
Run Traces through systems and record times
Looking for systems that are faster with gauss off => K-local consistent
:param kwargs:
:return: Null
"""
fh = FileHandler()
ph = ProcessHandler()
results = []
skip = kwargs.get("outstanding", False)
completed = []
if fh.read_from_file("./../assets/systems_run/run"):
for result in fh.read_from_file("./../assets/systems_run/run"):
completed.append(result[0])
for filename in ph.run_command('ls -v ./../assets/systems/'):
# Init
path = './../assets/systems/' + filename
system = fh.read_from_file(path)
split = filename.split("_")
n = split[0]
m = split[1]
# Skip completed systems
key = "{0}:{1}".format(n, m)
if skip and key in completed:
continue
print key
# Create cryptominisat system
time_off, time_on = self.get_gauss_times(n, m, system)
# Save
results.append([key, n, m, time_off, time_on, time_off - time_on])
fh.update_file("./../assets/systems_run/run", results)
def prepare_cryptominisat_system(self, n, m, system):
"""
Helper to translate a python system to a cryptominisat system
Save this translation into a file for processing
:param n:
:param m:
:param system:
:return:
"""
# init
input = [
'p cnf {0} {1}'.format(n, m)
]
# Grab clauses
for clause in system:
input.append("x{0} {1} -{2} 0".format(int(clause[0]), int(clause[1]), int(clause[2])))
# Ensures uniquely satisfiable
input.append(" ".join([str(i) for i in range(1, int(n) + 1)]) + " 0")
# Return
return input
def find_pool(self, clauses):
"""
Helper to find variables used
Redundant
:param clauses:
:return: A list of integers used in clauses
"""
variables = []
for clause in clauses:
for variable in clause:
if variable not in variables:
variables.append(variable)
return variables
def save_systems(self, systems, location):
"""
Save a set of systems of equations to a file
:param systems:
:return:
"""
for system in systems:
# n, m, system
self.save_system(system[1], system[2], system[3], location)
def save_system(self, n, m, system, location):
"""
Save a system of equations to file
:param n:
:param m:
:param system:
:param location:
:return:
"""
fh = FileHandler()
path = location + "{0}_{1}".format(n, m)
fh.write_to_file(path, system)
def save_results(self, results, location):
"""
Save a set of results
:param location:
:param results:
:return:
"""
fh = FileHandler()
ph = ProcessHandler()
print "Saving results..."
save_results_time = ph.run_function_timed(fh.update_file, (location, results))
print "Time taken: ", save_results_time
def save_results_systems(self, systems, location):
fh = FileHandler()
ph = ProcessHandler()
print "Saving systems..."
save_systems_time = ph.run_function_timed(self.save_systems, (systems, location))
print "Time taken: ", save_systems_time
def load_results(self):
"""
Load Sat Solver execution results
:return:
"""
fh = FileHandler()
results = fh.read_from_file("./../assets/systems_run/run")
return results
def convert_system_to_graph(self, n, m, system, **kwargs):
"""
Convert a system of equations to a Traces graph
First construction used in checking for Automorphisms
:param system:
:return:
"""
# Empty matrix of correct size
width = n + m
A = np.zeros((width, width))
# Insert edges
c = 0
for clause in system:
for i in range(0, 3):
A[clause[i] - 1][n + c] = 1
# Increment
c = c + 1
# Make Symmetric
A = np.maximum(A, A.transpose())
# Prepare Positioning
L = range(0, n)
R = range(n, n + m)
# Prepare Labels
labels = range(1, n + 1) + ["C" + str(i) for i in range(1, m + 1)]
labels_dict = {}
for i in range(0, n + m):
labels_dict[i] = labels[i]
# Construct Graph
G = nx.from_numpy_matrix(A)
if kwargs.get("plot", False):
pos = nx.spring_layout(G)
pos = dict()
pos.update((n, (i, 1)) for i, n in enumerate(R)) # put nodes from X at x=1
pos.update((n, (i, 2)) for i, n in enumerate(L)) # put nodes from Y at x=2
nx.draw(G, pos)
nx.draw_networkx_labels(G, pos, labels_dict)
plt.draw()
plt.show()
return G
def convert_system_to_construction(self, n, m, system, **kwargs):
"""
Convert a system of equations into a Traces graph
Second construction after checking for automorphisms
:param n:
:param m:
:param system:
:return:
"""
width = (2 * n) + (4 * m)
A = np.zeros((width, width))
# Insert edges
c = 0
for clause in system:
c_pos = (2 * n) + c
# 0 0 0
A[(clause[0] * 2) - 2][c_pos] = 1
A[(clause[1] * 2) - 2][c_pos] = 1
A[(clause[2] * 2) - 2][c_pos] = 1
# 0 1 1
A[(clause[0] * 2) - 2][c_pos + 1] = 1
A[(clause[1] * 2) - 1][c_pos + 1] = 1
A[(clause[2] * 2) - 1][c_pos + 1] = 1
# 1 0 1
A[(clause[0] * 2) - 1][c_pos + 2] = 1
A[(clause[1] * 2) - 2][c_pos + 2] = 1
A[(clause[2] * 2) - 1][c_pos + 2] = 1
# 1 1 0
A[(clause[0] * 2) - 1][c_pos + 3] = 1
A[(clause[1] * 2) - 1][c_pos + 3] = 1
A[(clause[2] * 2) - 2][c_pos + 3] = 1
# Increment
c = c + 4
# Make Symmetric
A = np.maximum(A, A.transpose())
# Prepare Positioning
L = range(0, 2 * n)
R = range(2 * n, width)
# Prepare Labels
labels_dict = {}
labels = []
for i in range(1, n + 1):
labels.append("{0}F".format(i))
labels.append("{0}T".format(i))
for i in range(1, m + 1):
labels.append("C{0}_1".format(i))
labels.append("C{0}_2".format(i))
labels.append("C{0}_3".format(i))
labels.append("C{0}_4".format(i))
for i in range(0, width):
labels_dict[i] = labels[i]
# Construct Graph
G = nx.from_numpy_matrix(A)
if kwargs.get("plot", False):
pos = nx.spring_layout(G)
pos = dict()
pos.update((n, (i, 1)) for i, n in enumerate(R)) # put nodes from X at x=1
pos.update((n, (i, 2)) for i, n in enumerate(L)) # put nodes from Y at x=2
nx.draw(G, pos)
nx.draw_networkx_labels(G, pos, labels_dict)
plt.draw()
plt.show()
return G
def is_k_consistent(self, n, m, system):
"""
Looking for systems that are faster with gauss off => K-local consistent
on vs off
faster on versus off
Looking for systems that are FASTER (take less time) with GAUSS ON than GAUSS OFF
:param n:
:param m:
:param system:
:return:
"""
ph = ProcessHandler()
fh = FileHandler()
# Get times
time_off, time_on = self.get_gauss_times(n, m, system)
# If Gauss On - Gauss Off > Threshold (sec)
# threshold = time_b - time_a > float(1)
# I.e. time_off - time_on > 0
threshold = time_on < time_off # No threshold determined
return threshold
def get_gauss_times(self, n, m, system):
"""
Retrieve the validation times (unique satisfiability) on a given system executed on a Sat solver
:param n:
:param m:
:param system:
:return:
"""
path = "./../assets/temp/temp_gauss_check"
fh = FileHandler()
# Create cryptominisat system
input = self.prepare_cryptominisat_system(n, m, system)
fh.write_to_file_simple(path, input)
# run gauss off
time_off = self.get_gauss_off_time(path)
# run gauss on
time_on = self.get_gauss_on_time(path)
return time_off, time_on
def get_gauss_off_time(self, path):
"""
Retrieve time taken to execute with gauss off
= Helper
:param path:
:return:
"""
ph = ProcessHandler()
cmd = "./../assets/sat/cryptominisat/build/cryptominisat5 --verb=0 {0}".format(path)
time, out = ph.run_function_timed(ph.run_command, (cmd,), return_args=True)
return time
def get_gauss_on_time(self, path):
"""
Retrieve time taken to execute with gauss on
- Helper
:param path:
:return:
"""
ph = ProcessHandler()
cmd = "./../assets/sat/cryptominisat/build_gauss/cryptominisat5 --verb=0 {0}".format(path)
time, out = ph.run_function_timed(ph.run_command, (cmd,), return_args=True)
return time
| 23,485 | 31.847552 | 115 |
py
|
graph-ismorphism
|
graph-ismorphism-master/program/tests/papercode.py
|
"""Writing logic from Prof. Dawars handout
"""
# Set of variables (including negations)
X = []
for i in range(-4, 5):
X.append(i)
# Set of clauses
phi = []
c1 = [1, 2, 3]
c2 = [-3, 4, 5]
c3 = [-2, 1, 3]
phi = [c1, c2]
# Satisfiable assignment
# -4, -3, -2, -1, 0, 1, 2, 3, 4
T = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# System over the two element field
S = []
s1 = [1, 1, 1, 0]
s2 = [0, 1, 1, 1]
s3 = [0, 1, 1, 1]
# Pebbles
k = 6
pebbles = [1, 2, 3, 4, 5, 6]
while True:
pi = random.choice(pebbles)
x = random.choice(random.choice(phi))
duplicator = random.choice([0, 1])
| 584 | 17.870968 | 42 |
py
|
graph-ismorphism
|
graph-ismorphism-master/program/tests/tester.py
|
from program.handlers.filehandler import FileHandler
from program.sat import Sat
fh = FileHandler()
sat = Sat()
system = fh.read_from_file("./../../assets/systems/2600_6900")
print sat.is_system_uniquely_satisfiable(system, 2600)
| 231 | 28 | 62 |
py
|
graph-ismorphism
|
graph-ismorphism-master/program/handlers/plothandler.py
|
#! /usr/bin/python2.7
"""
Logic that handles the plotting of data into graphs
"""
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from numpy.random import uniform, seed
from matplotlib.mlab import griddata
import matplotlib.pyplot as plt
import operator
class PlotHandler(object):
def plot_plot_2d(self, title, x, y, **kwargs):
"""
Plot a generic 2d line graph
:param title:
:param x:
:param y:
:param kwargs:
:return:
"""
plt.title(title)
plt.xlabel(kwargs.get("x_label", "Set X"))
plt.ylabel(kwargs.get("y_label", "Set Y"))
plt.scatter(x, y)
plt.plot(x, y)
if kwargs.get("timed_out_x", False):
plt.scatter(kwargs["timed_out_x"], kwargs["timed_out_y"], c='red')
plt.ylim(ymin=0)
plt.grid()
# Save / Show
if kwargs.get("save", False):
plt.savefig("./../assets/graphs_run/" + title)
if kwargs.get("hide", False):
pass
else:
plt.show()
plt.clf()
plt.close()
def plot_scatter_2d(self, title, x, y, z, **kwargs):
"""
Plot a generic 2d scatter graph
:param title:
:param x:
:param y:
:param z:
:param kwargs:
:return:
"""
plt.title(title)
plt.xlabel(kwargs.get("x_label", "Set X"))
plt.ylabel(kwargs.get("y_label", "Set Y"))
plt.scatter(x, y)
plt.ylim(ymin=0)
plt.grid()
# Save / Show
if kwargs.get("save", False):
plt.savefig("./../assets/graphs_run/" + title)
if kwargs.get("hide", False):
pass
else:
plt.show()
plt.clf()
plt.close()
def plot_heatmap_2d(self, title, x, y, z, **kwargs):
"""
Plot a generic 2d heatmap
:param title:
:param x:
:param y:
:param z:
:param kwargs:
:return:
"""
plt.title(title)
plt.xlabel(kwargs.get("x_label", "X"))
plt.ylabel(kwargs.get("y_label", "Y"))
plt.scatter(x, y, c=z, s=500)
clb = plt.colorbar()
clb.ax.set_title('Time(sec)')
plt.grid()
# Save / Show
if kwargs.get("save", False):
plt.savefig("./../assets/graphs_run/" + title)
if kwargs.get("hide", False):
pass
else:
plt.show()
plt.clf()
plt.close()
def plot_graph_3d(self, title, x, y, z, **kwargs):
"""
Plot a generic 3d scatter graph
:param title:
:param x:
:param y:
:param z:
:param kwargs:
:return:
"""
fig = plt.figure()
plt.title(title)
ax = fig.add_subplot(111, projection='3d')
ax.set_title(title)
ax.scatter(x, y, z, c='r', marker='o')
ax.set_xlabel('N Values')
ax.set_ylabel('M Values')
ax.set_zlabel('Time (sec)')
plt.grid()
# Save / Show
if kwargs.get("save", False):
plt.savefig("./../assets/graphs_run/" + title)
if kwargs.get("hide", False):
pass
else:
plt.show()
plt.clf()
plt.close()
def plot_graphs_results(self, results, **kwargs):
"""
Handler for plotting results generated by Traces
:param results:
:param kwargs:
:return:
"""
kwargs["x_label"] = "nodes"
kwargs["y_label"] = "time(sec)"
for graph in results:
self.plot_graph(results, graph, **kwargs)
def plot_graph(self, results, graph, **kwargs):
"""
Plot a single graph
- Helper to plot gi results
:param results:
:param graph:
:param kwargs:
:return:
"""
x_axis = []
y_axis = []
timed_out_x = []
timed_out_y = []
time = kwargs.get("time", "time") # Can switch between d_time
# Sort nodes
for result in results[graph]:
result["nodes"] = int(result["nodes"])
result["d_time"] = float(result["d_time"])
results[graph].sort(key=operator.itemgetter("nodes"))
for result in results[graph]:
# Some code to deal with nodes with multiple entries
if result['nodes'] in x_axis:
pos = x_axis.index(result["nodes"])
# y_axis[pos] = "%.2f" % (float(y_axis[pos]) + float(result["time"]) * 0.5) # avg
v = y_axis[pos] if y_axis[pos] > result[time] else result[time]
y_axis[pos] = v
if result["nodes"] in timed_out_x:
pos = timed_out_x.index(result["nodes"])
timed_out_y[pos] = v
continue
# Deal with timeouts
if result[time] == -1:
print "timed out"
result[time] = y_axis[-1]
timed_out_x.append(result['nodes'])
timed_out_y.append(result[time])
x_axis.append(result['nodes'])
y_axis.append(result[time])
kwargs["timed_out_x"] = timed_out_x
kwargs["timed_out_y"] = timed_out_y
if kwargs.get("get", False):
return x_axis, y_axis, kwargs
else:
self.plot_plot_2d(graph, x_axis, y_axis, **kwargs)
def plot_graphs_comparison(self, results):
"""
A 2d plot for comparing graphs run on Traces
:param results:
:return:
"""
# Init
x_axis = range(10, 100, 10) + range(100, 1000, 100) + range(1000, 10000, 1000)
x_label = "nodes"
y_label = "time(sec)"
title = "Graph comparison"
plots = []
for graph in results:
x, y, kwargs = self.plot_graph(results, graph, get=True)
plot = {
"x": x,
"y": y
}
plots.append(plot)
# Plotting
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
# plt.xlim([1, 4000])
for plot in plots:
plt.plot(plot["x"], plot["y"])
plt.grid()
plt.show()
plt.clf()
plt.close()
def plot_construction_results(self, results):
"""
A 3d plot for constructions run on Traces
:param results:
:return:
"""
graph = "construction_custom"
x = []
y = []
z = []
for result in results[graph]:
n = int(result['name'].split("_")[0])
m = int(result['name'].split("_")[1])
t = float(result["time"])
x.append(n)
y.append(m)
z.append(t)
self.plot_graph_3d("Should work", x, y, z)
def plot_sat_results(self, data):
"""
Handler for plotting results from generating systems
:param data:
:return:
"""
x = []
y = []
z = []
for r in data:
if r[0] == 'key':
continue
elif r[4] > 0:
x.append(r[1])
y.append(r[2])
z.append(r[4])
title = 'Sat run 0-n-10000_0-m-10000_step-100'
self.plot_heatmap_2d(title,
x,
y,
z,
x_label="N Values",
y_label="M Values")
self.plot_graph_3d(title, x, y, z)
def plot_k_consistency_check(self, data):
"""
Handler for plotting results for testing systems in Sat Solver
:param data:
:return:
"""
# Time off - on (positive values are good)
x = []
y = []
z = []
for r in data:
x.append(int(r[1]))
y.append(int(r[2]))
z.append(float(r[3])) # off
# z.append(float(r[4])) # on
# z.append(float(r[5])) # off - on
title = 'Sat run 0-n-10000_0-m-10000_step-100'
self.plot_heatmap_2d(title,
x,
y,
z,
x_label="N Values",
y_label="M Values")
self.plot_graph_3d(title, x, y, z)
| 8,463 | 27.402685 | 97 |
py
|
graph-ismorphism
|
graph-ismorphism-master/program/handlers/processhandler.py
|
#! /usr/bin/python2.7
"""
Logic that handles running system processes
"""
import os
import subprocess
import timeit
class ProcessHandler(object):
def run_command(self, command):
"""
Run a command and return a list
Good for single lined commands
:param command:
:return:
"""
p = os.popen(command, "r")
out = []
while 1:
line = p.readline()
if not line:
break
out.append(line.rstrip())
return out
def open_process(self, command):
"""
Prepare a process
Useful for multiple inputs
:param command:
:return: Return a subprocess object
"""
process = subprocess.Popen([command], stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return process
def run_process(self, command, inputs):
"""
Execute a process
Useful for multiple inputs
:param command:
:param inputs:
:return:
"""
process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
outputs = []
for input in inputs:
print inputs
outputs.append(process.stdin.write(input))
print process.stdout
return outputs,
def run_function_timed(self, f, args, **kwargs):
"""
Time a process
Useful in running Sat Solver and Traces
:param f:
:param args:
:param kwargs:
:return:
"""
# print args
start = timeit.default_timer()
ret = f(*args)
time = timeit.default_timer() - start
if kwargs.get("return_args"):
return time, ret
return time
def run_command_timed(self, command):
start = timeit.default_timer()
out = self.run_command(command)
time = timeit.default_timer() - start
return time, out
| 2,079 | 25.329114 | 92 |
py
|
graph-ismorphism
|
graph-ismorphism-master/program/handlers/filehandler.py
|
#! /usr/bin/python2.7
"""
Logic that deals with file I/O
"""
import os
import os.path
import operator
import json
import datetime
from processhandler import ProcessHandler
class FileHandler(object):
def append_to_file(self, path, line):
"""
Add a string to the end of a file
:param path:
:param line:
:return:
"""
ph = ProcessHandler()
ph.run_command("echo '{}'".format(line) + " >> " + path)
def write_to_file_simple(self, path, data):
"""
Write to file using raw strings
- Print line by line using new line separators
:param path:
:param data:
:return:
"""
with open(path, 'w') as outfile:
for datum in data:
out = datum
if datum != data[-1]:
out = out + "\n"
outfile.write(out)
def write_to_file(self, path, data):
"""
Write an object to file using JSON
:param path:
:param data:
:return:
"""
with open(path, 'w') as outfile:
json.dump(data, outfile)
def read_from_file(self, path, **kwargs):
"""
Read from a file using JSON objects
:param path:
:param kwargs:
:return:
"""
if not os.path.isfile(path):
return False
# First read
with open(path, 'r') as outfile:
data = json.load(outfile)
# Merge multiple instances
if kwargs.get("aggregate"):
ph = ProcessHandler()
directory = path.rsplit('/', 1)[:-1][0]
filename = path.rsplit('/', 1)[-1]
count = ph.run_command("cd {0} && ls -d *{1}* | wc -l ".format(directory, filename))[0]
for i in range(1, int(count)):
temp_path = "{0}_{1}".format(path, str(i))
with open(temp_path, 'r') as outfile:
data_b = json.load(outfile)
data = self.merge_data(data, data_b)
return data
def read_from_file_simple(self, path):
"""
Read line by line from a file
:param path:
:return:
"""
with open(path) as f:
data = f.readlines()
data = [x.strip() for x in data]
return data
def update_file(self, path, data):
"""
Update a file using a JSON object and keys to update entries
:param path:
:param data:
:return:
"""
# Check if has space, otherwise make a new file
if not self.has_space(path):
ph = ProcessHandler()
directory = path.rsplit('/', 1)[:-1][0]
filename = path.rsplit('/', 1)[-1]
count = ph.run_command("cd {0} && ls -d *{1}* | wc -l ".format(directory, filename))[0]
count = str(int(count) - 1) if int(count) > 1 else count
data = self.update_file("{0}_{1}".format(path, count), data)
return data
# If the file exists, grab data and merge it with this data
elif os.path.isfile(path):
old_data = self.read_from_file(path)
data = self.merge_data(old_data, data)
# Update and return data
data.sort(key=operator.itemgetter(1))
self.write_to_file(path, data)
return data
def makedir(self, path):
"""
Create a folder
:param path:
:return:
"""
if not os.path.exists(path):
os.makedirs(path)
return path
else:
path = "{0}_{1}".format(path, datetime.datetime.now())
os.makedirs(path)
return path
def has_space(self, path):
"""
Check if a file has space, that is, it does not exceed 20mb
:param path:
:return:
"""
if not os.path.exists(path):
return True
statinfo = os.stat(path)
size = statinfo.st_size >> 20
return size < 20
def merge_data(self, data_a, data_b):
"""
Combine the data from multiple JSON sources into one object
:param data_a:
:param data_b:
:return:
"""
for datum1 in data_a:
key1 = datum1[0]
found = False
for datum2 in data_b:
key2 = datum2[0]
if key1 == key2:
found = True
break
if found == False:
data_b.append(datum1)
return data_b
def is_file(self, path):
"""
Determine if file/folder exists
:param path:
:return:
"""
return os.path.exists(path)
def delete_file(self, path):
"""
Delete a file
:param path:
:return:
"""
ph = ProcessHandler()
if self.is_file(path):
ph.run_command("rm '{0}'".format(path))
def move_file(self, path_old, path_new):
"""
Move a file
:param path_old:
:param path_new:
:return:
"""
ph = ProcessHandler()
ph.run_command("mv '{0}' '{1}'".format(path_old, path_new))
def copy_file(self, path_old, path_new):
"""
Copy a file
:param path_old:
:param path_new:
:return:
"""
ph = ProcessHandler()
ph.run_command("cp '{0}' '{1}'".format(path_old, path_new))
| 5,502 | 26.653266 | 99 |
py
|
graph-ismorphism
|
graph-ismorphism-master/program/handlers/__init__.py
| 0 | 0 | 0 |
py
|
|
graph-ismorphism
|
graph-ismorphism-master/program/handlers/exceptionhandler.py
|
#! /usr/bin/python2.7
"""
Exception handlers
"""
class TimeoutError(Exception):
pass
def signal_handler(signum, frame):
raise TimeoutError()
| 151 | 12.818182 | 34 |
py
|
CTDGMR
|
CTDGMR-master/reduction.py
|
import os
import pickle
import pyreadr
import argparse
import numpy as np
from CTDGMR.minCTD import GMR_CTD
from CTDGMR.optGMR import GMR_opt_BFGS
# stop the warning message in initialization
from warnings import simplefilter
from sklearn.exceptions import ConvergenceWarning
simplefilter("ignore", category=ConvergenceWarning)
# Demo for Gaussian mixture reduction to reduce mixture of order N to order K in d dimensional space
N = 10
d = 2
# Set the value for original mixture
# The parameter values are randomly generated for domenstration purposes
np.random.seed(1)
base_means = np.random.randn(N*d).reshape((N, d)) # float array with dimension (N, d)
base_covs = np.empty((N, d, d))
for i in range(N):
base_cov = np.random.randn(d, d)
base_cov = base_cov.dot(base_cov.T)
base_cov += 0.5 * np.eye(d)
base_covs[i] = base_cov
# float array with dimension (N, d, d)
base_weights = np.random.uniform(0, 1, N)
base_weights /= base_weights.sum() # float array with dimension (N, )
# Set the order of the reduced mixture
K = 5
#####################
# perform reduction
#####################
# Approach 1: minimium ISE in Williams 2006
obj = np.Inf
for i in range(1): # speiciy the number of initial values by kmeans
reduced_mix = GMR_opt_BFGS(base_means,
base_covs,
base_weights,
K,
loss='ISE',
init_method='kmeans',
tol=1e-8,
random_state=i)
reduced_mix.run()
if reduced_mix.obj < obj:
reduction = reduced_mix
obj = reduced_mix.obj
# Approach 2: Reduction by our proposed CTD based approach
cost_function = 'KL' # cost function in CTD
reg = 0.0 # strength of lambda in the regularized CTD
obj = np.Inf
for i in range(5): # speiciy the number of initial values by kmeans
reduced_mix = GMR_CTD(base_means,
base_covs,
base_weights,
K,
n_pseudo=1,
init_method='kmeans',
ground_distance=cost_function,
reg=reg,
max_iter=1000,
tol=1e-5,
random_state=i)
reduced_mix.iterative()
if reduced_mix.obj < obj:
reduction = reduced_mix
obj = reduced_mix.obj
# Get the parameter values of the reduced mixture
reduced_means = reduction.reduced_means
reduced_covs = reduction.reduced_covs
reduced_weights = reduction.reduced_weights
print(reduced_means)
| 2,700 | 31.154762 | 100 |
py
|
CTDGMR
|
CTDGMR-master/CTDGMR/greedy.py
|
import ot
import time
import numpy as np
from scipy import linalg
from .distance import GMM_L2, GMM_CTD
def moment_preserving_merge(w1, mu1, cov1, w2, mu2, cov2):
w11, w21 = w1 / (w1 + w2), w2 / (w1 + w2)
mu = w11 * mu1 + w21 * mu2
cov = w11 * cov1 + w21 * cov2 + w11 * w21 * (mu1 - mu2).dot((mu1 - mu2).T)
weight = w1 + w2
return mu, cov, weight
def wbarycenter_merge(w1, mu1, cov1, w2, mu2, cov2):
w11, w21 = w1 / (w1 + w2), w2 / (w1 + w2)
mu = w11 * mu1 + w21 * mu2
cov = w11**2 * cov1 + w21**2 * cov2 + w11 * w21 * (
linalg.sqrtm(cov2.dot(cov1)) + linalg.sqrtm(cov1.dot(cov2)))
weight = w1 + w2
return mu, cov, weight
def bound_on_KL(w1, cov1, w2, cov2, merged_cov):
d = 0.5 * ((w1 + w2) * np.sum(np.log(linalg.eigvals(merged_cov))) -
w1 * np.sum(np.log(linalg.eigvals(cov1))) -
w2 * np.sum(np.log(linalg.eigvals(cov2))))
return d
"""
Greedy algorithm for Gaussian mixture reduction
"""
def GMR_greedy(means, covs, weights, n_components, method="Salmond"):
"""Find a GMM with n_components that is closest
to a GMM parameterized by means, covs, weights by greedy algorithm
Parameters
----------
means : numpy array, (N, d)
covs : numpy array, (N, d, d)
weights: numpy array, (N, )
n_components: integer>=1
method: string: "Salmond", "Runnalls", "W", "Williams"
Returns
-------
weights and support points of reduced GMM.
"""
means = np.copy(means)
covs = np.copy(covs)
weights = np.copy(weights)
N, d = means.shape
M = n_components
if method == "Salmond":
# compute mean and covariance of the original mixture
mu = np.sum(weights.reshape((-1, 1)) * means, axis=0)
P = np.sum(weights.reshape((-1, 1, 1)) * covs, axis=0) + np.trace(
np.diag(weights).dot((means - mu).dot((means - mu).T)))
while N > M:
distances = {}
for i in range(N - 1):
for j in range(i + 1, N):
delta_W = (weights[i] * weights[j]) / (
weights[i] + weights[j]) * (means[i] - means[j]).dot(
(means[i] - means[j]).T)
distances[(i, j)] = np.trace(np.linalg.inv(P).dot(delta_W))
i, j = list(distances.keys())[np.array(list(
distances.values())).argmin()]
means[i], covs[i], weights[i] = moment_preserving_merge(
weights[i], means[i], covs[i], weights[j], means[j], covs[j])
means = np.delete(means, j, 0)
covs = np.delete(covs, j, 0)
weights = np.delete(weights, j)
N -= 1
elif method == "Runnalls" or method == "Williams":
while N > M:
distances = {}
merged = {}
for i in range(N - 1):
for j in range(i + 1, N):
mu, cov, w = moment_preserving_merge(
weights[i], means[i], covs[i], weights[j], means[j],
covs[j])
merged[(i, j)] = [mu, cov, w]
if method == "Runnalls":
distances[(i,
j)] = bound_on_KL(weights[i], covs[i],
weights[j], covs[j], cov)
elif method == "Williams":
distances[(i, j)] = GMM_L2(
[means[[i, j]], mu.reshape(1, d)],
[covs[[i, j]], cov.reshape(1, d, d)],
[weights[[i, j]], w.reshape(-1, )])
i, j = list(distances.keys())[np.array(list(
distances.values())).argmin()]
means[i], covs[i], weights[i] = merged[(i, j)]
means = np.delete(means, j, 0)
covs = np.delete(covs, j, 0)
weights = np.delete(weights, j)
N -= 1
elif method == "W":
while N > M:
distances = {}
for i in range(N - 1):
for j in range(i + 1, N):
distances[(i,
j)] = Gaussian_distance(means[i], means[j],
covs[i], covs[j],
"W2")**2
i, j = list(distances.keys())[np.array(list(
distances.values())).argmin()]
means[i], covs[i], weights[i] = wbarycenter_merge(
weights[i], means[i], covs[i], weights[j], means[j], covs[j])
means = np.delete(means, j, 0)
covs = np.delete(covs, j, 0)
weights = np.delete(weights, j)
N -= 1
else:
raise ValueError("This method is not implemented!")
return means.astype(float), covs.astype(float), weights.astype(float)
| 4,889 | 37.503937 | 79 |
py
|
CTDGMR
|
CTDGMR-master/CTDGMR/optGMR.py
|
import ot
import time
import warnings
import numpy as np
from scipy import linalg
from scipy import optimize
from scipy.special import logsumexp, softmax
from scipy.stats import multivariate_normal
from numpy.linalg import det
from cvxopt import matrix
from cvxopt import solvers
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from .greedy import *
from .utils import *
from .distance import *
###########################################
# objective and gradients
###########################################
def opt_obj(reduced_means,
reduced_covs_chol,
reduced_weights,
means,
covs,
weights,
chol=True,
loss='ISE'):
if chol:
reduced_covs = np.zeros_like(reduced_covs_chol)
for i in range(reduced_means.shape[0]):
reduced_covs[i] = reduced_covs_chol[i].dot(reduced_covs_chol[i].T)
else:
reduced_covs = reduced_covs_chol
# compute the similarity matrices
SRR_diff = reduced_means[np.newaxis, :] - reduced_means[:, np.newaxis]
SRR_covs = reduced_covs[np.newaxis, :] + reduced_covs[:, np.newaxis]
SRR = np.exp(log_normal(SRR_diff, SRR_covs))
SOR_diff = reduced_means[np.newaxis, :] - means[:, np.newaxis]
SOR_covs = reduced_covs[np.newaxis, :] + covs[:, np.newaxis]
SOR = np.exp(log_normal(SOR_diff, SOR_covs))
if loss == 'NISE':
SOO_diff = means[np.newaxis, :] - means[:, np.newaxis]
SOO_covs = covs[np.newaxis, :] + covs[:, np.newaxis]
SOO = np.exp(log_normal(SOO_diff, SOO_covs))
# return the objective functions
if loss == 'CS':
return -np.log(weights.T.dot(SOR).dot(reduced_weights)) + .5 * np.log(
reduced_weights.T.dot(SRR).dot(reduced_weights))
elif loss == 'ISE':
return reduced_weights.T.dot(SRR).dot(
reduced_weights) - 2 * weights.T.dot(SOR).dot(reduced_weights)
elif loss == 'NISE':
# we work with the logorithm version
return -np.log(weights.T.dot(SOR).dot(reduced_weights)) + np.log(
reduced_weights.T.dot(SRR).dot(reduced_weights) +
weights.T.dot(SOO).dot(weights))
# gradients wrt to reduced model parameters
def obj_grads_theta(reduced_means,
reduced_covs_chol,
reduced_weights,
means,
covs,
weights,
loss='ISE'):
"""
The gradient with respect to the subpopulation
means and choleskdy decomposition of covariance
"""
reduced_covs = np.zeros_like(reduced_covs_chol)
for i in range(reduced_means.shape[0]):
reduced_covs[i] = np.dot(reduced_covs_chol[i], reduced_covs_chol[i].T)
n = means.shape[0]
m, d = reduced_means.shape
# S12
S12_diff = reduced_means[np.newaxis, :] - means[:, np.newaxis]
S12_cov = reduced_covs[np.newaxis, :] + covs[:, np.newaxis]
S12, S12_precision = log_normal(S12_diff, S12_cov, prec=True)
S12 = np.exp(S12)
# S22
S22_diff = reduced_means[np.newaxis, :] - reduced_means[:, np.newaxis]
S22_cov = reduced_covs[np.newaxis, :] + reduced_covs[:, np.newaxis]
S22, S22_precision = log_normal(S22_diff, S22_cov, prec=True)
S22 = np.exp(S22)
# S11
if loss == 'NISE':
S11_diff = means[np.newaxis, :] - means[:, np.newaxis]
S11_cov = covs[np.newaxis, :] + covs[:, np.newaxis]
S11 = np.exp(log_normal(S11_diff, S11_cov))
# gradient w.r.t. subpop means
L12_mean_std = np.einsum('ijk,ik->ij', S12_precision,
S12_diff.reshape((-1, d)))
weighted_S12 = S12 * weights[:,
np.newaxis] * reduced_weights[np.newaxis, :]
dL12dreduced_mean = L12_mean_std.reshape(
(n, m, d)) * weighted_S12[:, :, np.newaxis]
dL12dreduced_mean = -np.sum(dL12dreduced_mean, 0)
L22_mean_std = np.einsum('ijk,ik->ij', S22_precision,
S22_diff.reshape((-1, d)))
weighted_S22 = 2 * S22 * reduced_weights[:, np.newaxis] * reduced_weights[
np.newaxis, :]
dL22dreduced_mean = L22_mean_std.reshape(
(m, m, d)) * weighted_S22[:, :, np.newaxis]
dL22dreduced_mean = -np.sum(dL22dreduced_mean, 0)
# gradient w.r.t. cholesky decomposition of subpop covariances
sandwich = (np.einsum('ij,ik->ijk', L22_mean_std, L22_mean_std) -
S22_precision).reshape(m, m, d, d)
sandwich = sandwich * weighted_S22[:, :, np.newaxis, np.newaxis]
dL22dreduced_cov_chol = np.sum(sandwich, 0)
dL22dreduced_cov_chol = np.einsum('ikl,ils->iks', dL22dreduced_cov_chol,
reduced_covs_chol)
sandwich = (np.einsum('ij,ik->ijk', L12_mean_std, L12_mean_std) -
S12_precision).reshape(n, m, d, d)
sandwich = sandwich * weighted_S12[:, :, np.newaxis, np.newaxis]
dL12dreduced_cov_chol = np.sum(sandwich, 0)
dL12dreduced_cov_chol = np.einsum('ikl,ils->iks', dL12dreduced_cov_chol,
reduced_covs_chol)
if loss == 'ISE':
grad_reduced_means = dL22dreduced_mean - 2 * dL12dreduced_mean
grad_reduced_covs_chol = dL22dreduced_cov_chol - 2 * dL12dreduced_cov_chol
elif loss == 'NISE':
L11 = (weights.T).dot(S11).dot(weights)
L12 = (weights.T).dot(S12).dot(reduced_weights)
L22 = (reduced_weights.T).dot(S22).dot(reduced_weights)
grad_reduced_means = dL22dreduced_mean / (
L11 + L22) - 1 / L12 * dL12dreduced_mean
grad_reduced_covs_chol = dL22dreduced_cov_chol / (
L11 + L22) - 1 / L12 * dL12dreduced_cov_chol
elif loss == 'CS':
L12 = (weights.T).dot(S12).dot(reduced_weights)
L22 = (reduced_weights.T).dot(S22).dot(reduced_weights)
grad_reduced_means = dL22dreduced_mean / (
2 * L22) - 1 / L12 * dL12dreduced_mean
grad_reduced_covs_chol = dL22dreduced_cov_chol / (
2 * L22) - 1 / L12 * dL12dreduced_cov_chol
return np.concatenate((grad_reduced_means.reshape(
(-1, )), grad_reduced_covs_chol.reshape((-1, ))))
# gradients wrt to reduced model mixing weights
# this is only used for NISE and CS
def obj_grads_w(reduced_means,
reduced_covs,
reduced_weights,
means,
covs,
weights,
loss='ISE'):
# S12
S12_diff = reduced_means[np.newaxis, :] - means[:, np.newaxis]
S12_cov = reduced_covs[np.newaxis, :] + covs[:, np.newaxis]
S12 = log_normal(S12_diff, S12_cov)
S12 = np.exp(S12)
# S22
S22_diff = reduced_means[np.newaxis, :] - reduced_means[:, np.newaxis]
S22_cov = reduced_covs[np.newaxis, :] + reduced_covs[:, np.newaxis]
S22 = log_normal(S22_diff, S22_cov)
S22 = np.exp(S22)
# S11
if loss == 'NISE':
S11_diff = means[np.newaxis, :] - means[:, np.newaxis]
S11_cov = covs[np.newaxis, :] + covs[:, np.newaxis]
S11 = np.exp(log_normal(S11_diff, S11_cov))
dL12dw = weights.T.dot(S12)
dL22dw = 2 * reduced_weights.T.dot(S22)
# gradient w.r.t the unconstraint parameters
dL12dt = dL12dw * reduced_weights - reduced_weights * np.sum(
dL12dw * reduced_weights)
dL22dt = dL22dw * reduced_weights - reduced_weights * np.sum(
dL22dw * reduced_weights)
if loss == 'ISE':
return dL22dt - 2 * dL12dt
elif loss == 'NISE':
L11 = (weights.T).dot(S11).dot(weights)
L12 = (weights.T).dot(S12).dot(reduced_weights)
L22 = (reduced_weights.T).dot(S22).dot(reduced_weights)
return dL22dt / (L11 + L22) - 1 / L12 * dL12dt
elif loss == 'CS':
L12 = (weights.T).dot(S12).dot(reduced_weights)
L22 = (reduced_weights.T).dot(S22).dot(reduced_weights)
return dL22dt / (2 * L22) - 1 / L12 * dL12dt
# gradients wrt to reduced model parameters
def obj_grads(reduced_means,
reduced_covs_chol,
reduced_weights,
means,
covs,
weights,
loss='ISE'):
"""
The gradient with respect to the subpopulation
means and choleskdy decomposition of covariance
"""
reduced_covs = np.zeros_like(reduced_covs_chol)
for i in range(reduced_means.shape[0]):
reduced_covs[i] = np.dot(reduced_covs_chol[i], reduced_covs_chol[i].T)
n = means.shape[0]
m, d = reduced_means.shape
# S12
S12_diff = reduced_means[np.newaxis, :] - means[:, np.newaxis]
S12_cov = reduced_covs[np.newaxis, :] + covs[:, np.newaxis]
S12, S12_precision = log_normal(S12_diff, S12_cov, prec=True)
S12 = np.exp(S12)
# S12_precision = S12_precision.reshape((n, m, d, d))
# S22
S22_diff = reduced_means[np.newaxis, :] - reduced_means[:, np.newaxis]
S22_cov = reduced_covs[np.newaxis, :] + reduced_covs[:, np.newaxis]
S22, S22_precision = log_normal(S22_diff, S22_cov, prec=True)
S22 = np.exp(S22)
# S22_precision = S22_precision.reshape((m, m, d, d))
# S11
if loss == 'NISE':
S11_diff = means[np.newaxis, :] - means[:, np.newaxis]
S11_cov = covs[np.newaxis, :] + covs[:, np.newaxis]
S11 = np.exp(log_normal(S11_diff, S11_cov))
# gradient w.r.t. subpop means
L12_mean_std = np.einsum('ijk,ik->ij', S12_precision,
S12_diff.reshape((-1, d)))
weighted_S12 = S12 * weights[:,
np.newaxis] * reduced_weights[np.newaxis, :]
dL12dreduced_mean = L12_mean_std.reshape(
(n, m, d)) * weighted_S12[:, :, np.newaxis]
dL12dreduced_mean = -np.sum(dL12dreduced_mean, 0)
L22_mean_std = np.einsum('ijk,ik->ij', S22_precision,
S22_diff.reshape((-1, d)))
weighted_S22 = 2 * S22 * reduced_weights[:, np.newaxis] * reduced_weights[
np.newaxis, :]
dL22dreduced_mean = L22_mean_std.reshape(
(m, m, d)) * weighted_S22[:, :, np.newaxis]
dL22dreduced_mean = -np.sum(dL22dreduced_mean, 0)
# gradient w.r.t. cholesky decomposition of subpop covariances
sandwich = (np.einsum('ij,ik->ijk', L22_mean_std, L22_mean_std) -
S22_precision).reshape(m, m, d, d)
sandwich = sandwich * weighted_S22[:, :, np.newaxis, np.newaxis]
dL22dreduced_cov_chol = np.sum(sandwich, 0)
dL22dreduced_cov_chol = np.einsum('ikl,ils->iks', dL22dreduced_cov_chol,
reduced_covs_chol)
sandwich = (np.einsum('ij,ik->ijk', L12_mean_std, L12_mean_std) -
S12_precision).reshape(n, m, d, d)
sandwich = sandwich * weighted_S12[:, :, np.newaxis, np.newaxis]
dL12dreduced_cov_chol = np.sum(sandwich, 0)
dL12dreduced_cov_chol = np.einsum('ikl,ils->iks', dL12dreduced_cov_chol,
reduced_covs_chol)
dL12dw = weights.T.dot(S12)
dL22dw = 2 * reduced_weights.T.dot(S22)
# gradient w.r.t the unconstraint parameters
dL12dt = dL12dw * reduced_weights - reduced_weights * np.sum(
dL12dw * reduced_weights)
dL22dt = dL22dw * reduced_weights - reduced_weights * np.sum(
dL22dw * reduced_weights)
if loss == 'ISE':
grad_reduced_means = dL22dreduced_mean - 2 * dL12dreduced_mean
grad_reduced_covs_chol = dL22dreduced_cov_chol - 2 * dL12dreduced_cov_chol
grad_reduced_weights = dL22dt - 2 * dL12dt
elif loss == 'NISE':
L11 = (weights.T).dot(S11).dot(weights)
L12 = (weights.T).dot(S12).dot(reduced_weights)
L22 = (reduced_weights.T).dot(S22).dot(reduced_weights)
grad_reduced_means = dL22dreduced_mean / (
L11 + L22) - 1 / L12 * dL12dreduced_mean
grad_reduced_covs_chol = dL22dreduced_cov_chol / (
L11 + L22) - 1 / L12 * dL12dreduced_cov_chol
grad_reduced_weights = dL22dt / (L11 + L22) - 1 / L12 * dL12dt
elif loss == 'CS':
L12 = (weights.T).dot(S12).dot(reduced_weights)
L22 = (reduced_weights.T).dot(S22).dot(reduced_weights)
grad_reduced_means = dL22dreduced_mean / (
2 * L22) - 1 / L12 * dL12dreduced_mean
grad_reduced_covs_chol = dL22dreduced_cov_chol / (
2 * L22) - 1 / L12 * dL12dreduced_cov_chol
grad_reduced_weights = dL22dt / (2 * L22) - 1 / L12 * dL12dt
return np.concatenate(
(grad_reduced_weights, grad_reduced_means.reshape(
(-1, )), grad_reduced_covs_chol.reshape((-1, ))))
##########################################
# optimization based method for reduction
##########################################
# this code implements the minimum ISE, NISE,
# Cauchy-Schwartz divergence between two mixtures for GMR
class GMR_opt:
"""
Find a GMM with n_components that is closest
to a GMM parameterized by means, covs, weights
by optimization based method.
The distances implemented are ISE, NISE, and CS
Parameters
----------
means : numpy array, (N, d)
covs : numpy array, (N, d, d)
weights: numpy array, (N, )
Returns
-------
weights and support points of reduced GMM.
"""
def __init__(self,
means,
covs,
weights,
n,
loss='ISE',
init_method='Runnalls',
tol=1e-5,
max_iter=100,
random_state=0,
reduced_means=None,
reduced_covs=None,
reduced_weights=None):
self.means = means
self.covs = covs
self.weights = weights
self.tol = tol
self.max_iter = max_iter
self.origin_n = self.weights.shape[0]
self.new_n = n
self.d = means.shape[1]
self.random_state = random_state
self.converged_ = False
self.init_method = init_method
self.loss = loss
def _initialize_parameter(self):
"""Initializatin of the reduced mixture"""
if self.init_method == 'kmeans':
total_sample_size = 10000
X = rmixGaussian(self.means, self.covs, self.weights,
total_sample_size, self.random_state)[0]
gm = GaussianMixture(n_components=self.new_n,
random_state=0).fit(X)
self.reduced_means = gm.means_
self.reduced_covs = gm.covariances_
self.reduced_weights = gm.weights_
elif self.init_method == 'user':
self.reduced_means = reduced_means
self.reduced_covs = reduced_covs
self.reduced_weights = reduced_weights
else:
self.reduced_means, self.reduced_covs, self.reduced_weights = GMR_greedy(
self.means, self.covs, self.weights, self.new_n,
self.init_method)
# self._update_H1_H2()
def _obj(self):
if self.loss == 'CS':
return -np.log(
np.dot(self.weights.T, self.H2).dot(
self.reduced_weights)) + .5 * np.log(
np.dot(self.reduced_weights.T, self.H1).dot(
self.reduced_weights))
elif self.loss == 'ISE':
return np.dot(self.reduced_weights.T, self.H1).dot(
self.reduced_weights) - 2 * np.dot(
self.weights.T, self.H2).dot(self.reduced_weights)
elif self.loss == 'NISE':
# we work with the logorithm version
return -np.log(
np.dot(self.weights.T, self.H2).dot(
self.reduced_weights)) + np.log(
np.dot(self.reduced_weights.T, self.H1).dot(
self.reduced_weights) +
np.dot(self.weights.T, self.H3).dot(self.weights))
def _weight_update(self):
if self.loss == 'ISE':
# quadratic programming for updating w
diff = self.reduced_means[
np.newaxis, :] - self.reduced_means[:, np.newaxis]
covs = self.reduced_covs[
np.newaxis, :] + self.reduced_covs[:, np.newaxis]
self.H1 = np.exp(log_normal(diff, covs))
diff = self.reduced_means[np.newaxis, :] - self.means[:,
np.newaxis]
covs = self.reduced_covs[np.newaxis, :] + self.covs[:, np.newaxis]
self.H2 = np.exp(log_normal(diff, covs))
P = matrix(self.H1, tc='d')
q = matrix(-self.weights.T.dot(self.H2), tc='d')
G = matrix(-np.eye(self.new_n), tc='d')
h = matrix(np.zeros((self.new_n)), tc='d')
A = matrix(np.ones((1, self.new_n)), tc='d')
b = matrix(np.array([1]), tc='d')
solvers.options['show_progress'] = False
sol = solvers.qp(P, q, G, h, A, b)
self.reduced_weights = np.array(sol['x']).reshape((-1, ))
return 2 * sol['primal objective']
else:
# use BFGS method to update the weights
obj_lambda = lambda softmaxw: opt_obj(self.reduced_means,
self.reduced_covs,
softmax(softmaxw),
self.means,
self.covs,
self.weights,
chol=False,
loss=self.loss)
grad_lambda = lambda softmaxw: obj_grads_w(
self.reduced_means, self.reduced_covs, softmax(
softmaxw), self.means, self.covs, self.weights, self.loss)
res = optimize.minimize(obj_lambda,
np.log(self.reduced_weights),
method='BFGS',
jac=grad_lambda,
options={
'gtol': 1e-5,
'disp': True
})
# print(res)
self.reduced_weights = softmax(res.x)
return res.fun
# return self._obj()
def _support_update(self):
# update the mean and covariance with gradient descent
# for the covariance, optimize its cholesky decomposition
# to ensure positive definiteness
n, d = self.new_n, self.d
obj_lambda = lambda x: opt_obj(x[:n * d].reshape(
(n, d)), x[n * d:].reshape(
(n, d, d)), self.reduced_weights, self.means, self.covs, self.
weights, self.loss)
grad_lambda = lambda x: obj_grads_theta(x[:n * d].reshape((n, d)),
x[n * d:].reshape((n, d, d)),
self.reduced_weights,
self.means,
self.covs,
self.weights,
loss=self.loss)
self.reduced_covs_chol = np.zeros_like(self.reduced_covs)
for i, cov in enumerate(self.reduced_covs):
try:
cov_chol = linalg.cholesky(cov, lower=True)
except linalg.LinAlgError:
raise ValueError("covariance chol is wrong.")
self.reduced_covs_chol[i] = cov_chol
x0 = np.concatenate((self.reduced_means.reshape(
(-1, )), self.reduced_covs_chol.reshape((-1, ))))
res = optimize.minimize(obj_lambda,
x0,
method='BFGS',
jac=grad_lambda,
options={
'gtol': 1e-5,
'disp': True
})
# print(res)
self.reduced_means, self.reduced_covs_chol = res.x[:n * d].reshape(
(n, d)), res.x[n * d:].reshape((n, d, d))
for i in range(self.new_n):
self.reduced_covs[i] = self.reduced_covs_chol[i].dot(
self.reduced_covs_chol[i].T)
return res.fun
def iterative(self):
self._initialize_parameter()
obj = np.Inf
proc_time = time.time()
for n_iter in range(1, self.max_iter + 1):
# print('Iteration %d' % n_iter)
current_time = time.time()
obj_current = self._weight_update()
if min(self.reduced_weights) == 0:
warnings.warn('The reduced mixture has fewer components!')
else:
change = obj - obj_current
# print(time.time() - current_time)
current_time = time.time()
if abs(change) < self.tol:
self.converged_ = True
self.obj = obj
self.n_iter_ = n_iter
break
if change < 0.0:
raise ValueError('The objective function is increasing!')
obj = obj_current
obj_current = self._support_update()
change = obj - obj_current
# print(time.time() - current_time)
if abs(change) < self.tol:
self.converged_ = True
self.obj = obj
self.n_iter_ = n_iter
break
if change < 0.0:
raise ValueError('The objective function is increasing!')
obj = obj_current
self.time_ = time.time() - proc_time
# print(self.time_, self.n_iter_)
if not self.converged_:
warnings.warn('Did not converge. Try different init parameters, '
'or increase max_iter, tol ')
class GMR_opt_BFGS:
"""
Find a GMM with n_components that is closest
to a GMM parameterized by means, covs, weights
by optimization based method.
The distances implemented are ISE, NISE, and CS
Parameters
----------
means : numpy array, (N, d)
covs : numpy array, (N, d, d)
weights: numpy array, (N, )
Returns
-------
weights and support points of reduced GMM.
"""
def __init__(self,
means,
covs,
weights,
n,
loss='ISE',
init_method='Runnalls',
tol=1e-8,
max_iter=100,
random_state=0,
means_init=None,
covs_init=None,
weights_init=None):
self.means = means
self.covs = covs
self.weights = weights
self.tol = tol
self.n = self.weights.shape[0]
self.m = n
self.d = means.shape[1]
self.random_state = random_state
self.init_method = init_method
self.loss = loss
self.reduced_means = means_init
self.reduced_covs = covs_init
self.reduced_weights = weights_init
def _initialize_parameter(self):
"""Initializatin of the reduced mixture"""
# self.H1 = np.zeros((self.new_n, self.new_n))
# self.H2 = np.zeros((self.origin_n, self.new_n))
# if self.loss == 'NISE':
# self.H3 = np.zeros((self.origin_n, self.origin_n))
if self.init_method == 'kmeans':
total_sample_size = 10000
X = rmixGaussian(self.means, self.covs, self.weights,
total_sample_size, self.random_state)[0]
gm = GaussianMixture(n_components=self.m, random_state = self.random_state, tol = 1e-6).fit(X)
self.reduced_means = gm.means_
self.reduced_covs = gm.covariances_
self.reduced_weights = gm.weights_
elif self.init_method == 'user':
pass
else:
self.reduced_means, self.reduced_covs, self.reduced_weights = GMR_greedy(
self.means, self.covs, self.weights, self.m, self.init_method)
def run(self):
self._initialize_parameter()
proc_time = time.time()
obj_lambda = lambda x: opt_obj(x[self.m:
(self.m + self.m * self.d)].reshape(
(self.m, self.d)),
x[(self.m + self.m * self.d):].reshape(
(self.m, self.d, self.d)),
softmax(x[:self.m]),
self.means,
self.covs,
self.weights,
loss=self.loss)
grad_lambda = lambda x: obj_grads(
x[self.m:(self.m + self.m * self.d)].reshape((self.m, self.d)),
x[(self.m + self.m * self.d):].reshape((self.m, self.d, self.d)),
softmax(x[:self.m]),
self.means,
self.covs,
self.weights,
loss=self.loss)
self.reduced_covs_chol = np.zeros_like(self.reduced_covs)
for i, cov in enumerate(self.reduced_covs):
try:
cov_chol = linalg.cholesky(cov, lower=True)
except linalg.LinAlgError:
raise ValueError('covariance chol is wrong.')
self.reduced_covs_chol[i] = cov_chol
x0 = np.concatenate(
(np.log(self.reduced_weights), self.reduced_means.reshape(
(-1, )), self.reduced_covs_chol.reshape((-1, ))))
res = optimize.minimize(obj_lambda,
x0,
method='BFGS',
jac=grad_lambda,
options={'gtol': self.tol})
if res.success:
self.converged_ = True
self.obj = res.fun
self.reduced_weights = softmax(res.x[:self.m])
self.reduced_means = res.x[self.m:(self.m + self.m * self.d)].reshape(
(self.m, self.d))
self.reduced_covs = res.x[(self.m + self.m * self.d):].reshape(
(self.m, self.d, self.d))
for i, cov in enumerate(self.reduced_covs):
self.reduced_covs[i] = cov.dot(cov.T)
else:
self.converged_ = False
print(res.message)
self.res = res
self.obj = res.fun
self.reduced_weights = softmax(res.x[:self.m])
self.reduced_means = res.x[self.m:(self.m + self.m * self.d)].reshape(
(self.m, self.d))
self.reduced_covs = res.x[(self.m + self.m * self.d):].reshape(
(self.m, self.d, self.d))
for i, cov in enumerate(self.reduced_covs):
self.reduced_covs[i] = cov.dot(cov.T)
self.time_ = time.time() - proc_time
self.n_iter_ = res.nit
# print(self.time_)
if not self.converged_:
warnings.warn('Did not converge. Try different init parameters, '
'or increase max_iter, tol ')
if __name__ == '__main__':
from scipy.stats import norm
import matplotlib.pyplot as plt
means = np.array([-1.0, 2]).reshape((-1, 1))
covs = np.array([.15, .15]).reshape((-1, 1, 1))
weights = np.array([.45, .5])
M = 1
reduction = GMR_L2(means,
covs,
weights,
M,
False,
init_method="kmeans",
tol=1e-5,
max_iter=100)
reduction.iterative()
# visualization
reduced_means = np.squeeze(reduction.reduced_means)
reduced_covs = np.squeeze(reduction.reduced_covs)
x = np.linspace(-10, 10, 100)
y2 = dmixf(x, reduced_means, np.sqrt(reduced_covs),
reduction.reduced_weights, norm)
reduction = GMR_L2(means,
covs,
weights,
M,
True,
init_method="kmeans",
tol=1e-5,
max_iter=100)
reduction.iterative()
print(
GMM_L2([means, reduction.reduced_means],
[covs, reduction.reduced_covs],
[weights, reduction.reduced_weights]))
# visualization
reduced_means = np.squeeze(reduction.reduced_means)
reduced_covs = np.squeeze(reduction.reduced_covs)
reduced_weights = reduction.reduced_weights
y3 = dmixf(x, reduced_means, np.sqrt(reduced_covs), reduced_weights, norm)
means = np.squeeze(means)
covs = np.squeeze(covs)
y1 = dmixf(x, means, np.sqrt(covs), weights, norm)
plt.figure()
plt.plot(x, y1, label='original')
plt.plot(x, y2, label='ISE')
plt.plot(x, y3, label='NISE')
plt.legend()
plt.savefig('ISE_vs_NISE.png')
| 29,228 | 38.713315 | 106 |
py
|
CTDGMR
|
CTDGMR-master/CTDGMR/utils.py
|
import ot
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from sklearn.utils import check_random_state
from scipy.stats import norm
def log_normal(diffs, covs, prec=False):
"""
log normal density of a matrix X
evaluated for multiple multivariate normals
=====
input:
diffs: array-like (N, M, d)
covs: array-like (N, M, d, d)
prec: if true, return the precision matrices
"""
n, m, d, _ = covs.shape
if d == 1:
precisions_chol = (np.sqrt(1 / covs)).reshape((n * m, d, d))
else:
precisions_chol = np.empty((n * m, d, d))
for k, cov in enumerate(covs.reshape((-1, d, d))):
try:
cov_chol = linalg.cholesky(cov, lower=True)
except linalg.LinAlgError:
raise ValueError("covariance chol is wrong.")
precisions_chol[k] = linalg.solve_triangular(cov_chol,
np.eye(d),
lower=True).T
log_det = (np.sum(np.log(precisions_chol.reshape(n * m, -1)[:, ::d + 1]),
1))
diffs = diffs.reshape((-1, d))
y = np.einsum('ij,ijk->ik', diffs, precisions_chol)
log_prob = np.sum(np.square(y), axis=1)
log_probs = -.5 * (d * np.log(2 * np.pi) + log_prob) + log_det
if prec:
precisions = np.zeros_like(precisions_chol)
for k, precision_chol in enumerate(precisions_chol):
precisions[k] = precision_chol.dot(precision_chol.T)
return log_probs.reshape((n, m)), precisions
else:
return log_probs.reshape((n, m))
def rmixGaussian(means, covs, weights, n_samples, random_state=0):
"""
Sample from a Gaussian mixture
Parameters
----------
means : array-like, shape (n, d)
covs : array-like, shape (n, d, d)
weights : array-like, shape (n,)
Returns
-------
# n_sampels of samples from the Gaussian mixture
"""
if n_samples < 1:
raise ValueError(
"Invalid value for 'n_samples': %d . The sampling requires at "
"least one sample." % (n_components))
rng = check_random_state(random_state)
n_samples_comp = rng.multinomial(n_samples, weights)
X = np.vstack([
rng.multivariate_normal(mean, cov, int(sample))
for (mean, cov, sample) in zip(means, covs, n_samples_comp)
])
y = np.concatenate([
np.full(sample, j, dtype=int)
for j, sample in enumerate(n_samples_comp)
])
return (X, y)
def df(x, mean, sd, f):
x = x.reshape(-1, 1) - mean.T
# x = x - mean.T
x /= sd
return f.pdf(x) / sd
def dmixf(x, mean, var, w, f):
"""
Input:
x: array-like (n,)
mean: array-like (k, )
sd: array-like (k, )
w: array-like (k, )
Output:
sum(w*pnorm(x,mean,sd)): array-like (n,)
"""
sd = np.sqrt(var)
prob = df(x, mean, sd, f)
prob *= w
return prob.sum(1)
| 3,028 | 28.696078 | 77 |
py
|
CTDGMR
|
CTDGMR-master/CTDGMR/__init__.py
| 0 | 0 | 0 |
py
|
|
CTDGMR
|
CTDGMR-master/CTDGMR/barycenter.py
|
import numpy as np
from scipy import linalg
from scipy import optimize
# from scipy.optimize import minimize, fsolve, root, newton_krylov
from .distance import Gaussian_distance
from .utils import log_normal
from .optGMR import GMR_opt_BFGS
def barycenter(means, covs, lambdas=None, tol=1e-7, mean_init=None, cov_init=None, ground_distance='W2'):
"""Compute the barycenter of Gaussian measures.
Parameters
----------
means : array-like, shape (n, d)
covs : array-like, shape (n, d, d)
lambdas : array-like, shape (n,), weight in barycenter
ground_distance: string. Options: "W2", "KL", "WKL" ,"Cauchy-Schwartz", "ISE"
Returns
-------
mean and covariance of the Gaussian Wasserstein barycenter.
"""
m, d = means.shape
if lambdas is None:
lambdas = np.ones((m, )) / m
else:
lambdas = lambdas / lambdas.sum()
# weight normalization
if ground_distance == 'KL' or ground_distance == 'WKL':
barycenter_mean = np.sum((lambdas * means.T).T, axis=0)
barycenter_cov = np.sum(covs * lambdas.reshape((-1, 1, 1)), axis=0)
diff = means - barycenter_mean
barycenter_cov += np.dot(lambdas * diff.T, diff)
elif ground_distance == 'W2':
barycenter_mean = np.sum((lambdas * means.T).T, axis=0)
if d == 1:
barycenter_cov = np.sum(np.sqrt(covs) * lambdas.reshape((-1, 1, 1)))**2
else:
#Fixed point iteration for Gaussian barycenter
barycenter_cov = barycenter(means, covs, lambdas, ground_distance='KL')[1]
barycenter_cov_next = np.identity(d)
while np.linalg.norm(barycenter_cov_next - barycenter_cov, 'fro') > tol:
barycenter_cov = barycenter_cov_next
sqrt_barycenter_cov = linalg.sqrtm(barycenter_cov)
barycenter_cov_next = np.zeros((d, d))
for k in range(m):
barycenter_cov_next = barycenter_cov_next + lambdas[k] * linalg.sqrtm(
sqrt_barycenter_cov @ covs[k] @ sqrt_barycenter_cov)
elif ground_distance == 'CS':
# find the barycenter w.r.t. Cauchy-Schwartz divergence
# using fixed point iteration
def compute_sigma(covs, mus, cov, lambdas):
# find (Sigma_r+Sigma)^{-1}
covs = covs + cov
for i, cov in enumerate(covs):
covs[i] = np.linalg.inv(cov)
# find (Sigma_r+Sigma)^{-1}(mu_r-mu)
mu = compute_mean(covs, mus, cov, lambdas)
mus = mus - mu
weighted_mus = np.einsum('ijk,ik->ij', covs, mus)
sandwich = np.einsum('ij,ik->ijk', weighted_mus, weighted_mus)
return mu, 2 * ((covs - sandwich) * lambdas[:, np.newaxis, np.newaxis]).sum(0)
def compute_mean(precisions, mus, cov, lambdas):
# precisions are: (Sigma_r+Sigma)^{-1}
# find sum_{r}lambda_r(Sigma_r+Sigma)^{-1}
weighted_precisions = precisions * lambdas[:, np.newaxis, np.newaxis]
# find sum_{r}lambda_r(Sigma_r+Sigma)^{-1}mu_r
weighted_mus = np.einsum('ijk,ik->ij', weighted_precisions, mus)
weighted_mus = weighted_mus.sum(0)
return np.linalg.solve(weighted_precisions.sum(0), weighted_mus)
# initial value for fixed point iteration
barycenter_mean, barycenter_cov = barycenter(means, covs, lambdas, ground_distance='KL')
barycenter_next = compute_sigma(covs, means, barycenter_cov, lambdas)
barycenter_cov_next = np.linalg.inv(barycenter_next[1])
n_iter = 0
while np.linalg.norm(barycenter_cov_next - barycenter_cov, 'fro') > tol:
n_iter += 1
barycenter_cov = barycenter_cov_next
barycenter_next = compute_sigma(covs, means, barycenter_cov, lambdas)
barycenter_cov_next = np.linalg.inv(barycenter_next[1])
barycenter_mean = barycenter_next[0]
elif ground_distance == 'ISE':
# print(mean_init.shape, cov_init.shape)
reduced_mix = GMR_opt_BFGS(means,
covs,
lambdas,
1,
loss='ISE',
init_method='user',
tol=tol,
means_init=mean_init.reshape((-1, d)),
covs_init=cov_init.reshape((-1, d, d)),
weights_init=np.array([1.0]),
random_state=0)
reduced_mix.run()
barycenter_mean = np.squeeze(reduced_mix.reduced_means)
barycenter_cov = np.squeeze(reduced_mix.reduced_covs)
elif ground_distance == 'L2':
def obj(par, means, covs, lambdas):
"""
par: shape (d+ d^2)
means: shape (N, d)
covs: shape (N, d, d)
lambdas: shape (N,)
Outputs:
sum_{n} lambdas[n]*(|4*pi*Sigma|^{-1/2}
- 2 phi(mu| means[n], Sigma + covs[n]))
"""
# standardize the weights
# lambdas /= lambdas.sum()
n, d = means.shape
mean, cov_chol = par[:d], par[d:].reshape((d, d))
eigvals = np.zeros(n)
for i, origin_cov in enumerate(covs):
eigvals[i] = np.linalg.eigvals(origin_cov).prod()
if np.iscomplex(np.linalg.eigvals(cov_chol)).sum() > 0:
return np.Inf
else:
cov = cov_chol.dot(cov_chol.T)
diff = means - mean # shape (N, d)
covs = covs + cov # shape (N, d, d)
precisions_chol = np.zeros_like(covs)
for k, sigma in enumerate(covs.reshape((-1, d, d))):
try:
sigma_chol = linalg.cholesky(sigma, lower=True)
except linalg.LinAlgError:
raise ValueError('covariance chol is wrong.')
precisions_chol[k] = linalg.solve_triangular(sigma_chol, np.eye(d),
lower=True).T
log_det = (np.sum(np.log(precisions_chol.reshape(n, -1)[:, ::d + 1]), 1))
y = np.einsum('ij,ijk->ik', diff, precisions_chol)
log_prob = np.sum(np.square(y), axis=1)
log_probs = -.5 * (d * np.log(2 * np.pi) + log_prob) + log_det
probs = np.exp(log_probs)
return np.sum(
lambdas * np.sqrt((4 * np.pi)**(-d / 2) / np.linalg.eigvals(cov_chol).prod() +
(4 * np.pi)**(-d / 2) * eigvals**(-1 / 2) - 2 * probs))
def grad(par, means, covs, lambdas):
n, d = means.shape
mean, cov_chol = par[:d], par[d:].reshape((d, d))
cov = cov_chol.dot(cov_chol.T)
if np.iscomplex(np.linalg.eigvals(cov_chol)).sum() > 0:
return 1e8 * np.ones(d + d**2)
else:
diff = means - mean # shape (N, d)
covs = covs + cov # shape (N, d, d)
precisions_chol = np.zeros_like(covs)
for k, sigma in enumerate(covs.reshape((-1, d, d))):
try:
sigma_chol = linalg.cholesky(sigma, lower=True)
except linalg.LinAlgError:
raise ValueError('covariance chol is wrong.')
precisions_chol[k] = linalg.solve_triangular(sigma_chol, np.eye(d),
lower=True).T
log_det = (np.sum(np.log(precisions_chol.reshape(n, -1)[:, ::d + 1]), 1))
y = np.einsum('ij,ijk->ik', diff, precisions_chol)
log_prob = np.sum(np.square(y), axis=1)
log_probs = -.5 * (d * np.log(2 * np.pi) + log_prob) + log_det
probs = np.exp(log_probs)
precisions = np.stack([prec_chol.dot(prec_chol.T) for prec_chol in precisions_chol])
# partial derivative w.r.t. mean
diff_std = np.einsum('ijk,ik->ij', precisions, diff)
weighted_probs = probs * lambdas
dLdmu = (diff_std.T * weighted_probs).T
dLdmu = -2 * np.sum(dLdmu, 0)
# partial derivative w.r.t. covariance
sandwich = np.einsum('ij,ik->ijk', diff_std, diff_std)
sandwich -= precisions
dLdSigma = -2 * np.sum(sandwich * weighted_probs.reshape((-1, 1, 1)), 0)
dLdSigma = dLdSigma.dot(cov_chol)
prec_chol = linalg.solve_triangular(cov_chol, np.eye(d), lower=True).T
dLdSigma -= np.sum(lambdas) * (4 * np.pi)**(
-d / 2) / np.linalg.eigvals(cov_chol).prod() * prec_chol
return np.concatenate((dLdmu, dLdSigma.reshape((-1, ))))
obj_lambda = lambda x: obj(x, means, covs, lambdas)
# non-convex, try multiple initial values
barycenter_mean, barycenter_cov = barycenter(means, covs, lambdas, ground_distance='KL')
barycenter_cholesky = linalg.cholesky(barycenter_cov, lower=True)
x0 = np.concatenate((barycenter_mean, barycenter_cholesky.reshape((-1, ))))
res = optimize.minimize(obj_lambda, x0, method='Nelder-Mead')
# second initial value
barycenter_mean = np.sum((lambdas * means.T).T, axis=0)
barycenter_cov = np.sum(covs * lambdas.reshape((-1, 1, 1)), axis=0)
barycenter_cholesky = linalg.cholesky(barycenter_cov, lower=True)
x0 = np.concatenate((barycenter_mean, barycenter_cholesky.reshape((-1, ))))
res2 = optimize.minimize(obj_lambda, x0, method='Nelder-Mead')
if res2.fun < res.fun:
res = res2
if res.success == True:
barycenter_mean = res.x[:d]
barycenter_chol = res.x[d:].reshape((d, d))
barycenter_cov = barycenter_chol.dot(barycenter_chol.T)
else:
print(res)
else:
raise ValueError('This ground_distance %s is no implemented.' % ground_distance)
return barycenter_mean, barycenter_cov
# sanity check
if __name__ == '__main__':
d = 3
means = np.random.randn(4, d)
covs = np.empty((4, d, d))
for i in range(4):
a = np.random.randn(d, d)
covs[i] = a @ a.T + 0.5 * np.eye(d)
weights = np.ones(4) / 4
barycenter_mean, barycenter_cov = barycenter(means, covs, weights, ground_distance='KL')
print(barycenter_mean, barycenter_cov)
barycenter_mean, barycenter_cov = barycenter(means,
covs,
weights,
ground_distance='L2',
coeffs=np.array([1, 1]))
print(barycenter_mean, barycenter_cov)
| 11,073 | 43.296 | 105 |
py
|
CTDGMR
|
CTDGMR-master/CTDGMR/minCTD.py
|
import ot
import time
import warnings
import numpy as np
from scipy import linalg
from scipy import optimize
from scipy.special import logsumexp
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from .distance import GMM_L2, GMM_CTD, Gaussian_distance
from .greedy import *
from .utils import *
from .barycenter import barycenter
"""
Minimum composite transportation divergence (CTD) for GMR
Created by Qiong Zhang
"""
def entropy(log_ot_plan):
"""
The entropy of a coupling matrix
"""
return 1 - np.sum(np.exp(log_ot_plan) * log_ot_plan)
class GMR_CTD:
"""Find a GMM with n_components that is closest
to a GMM parameterized by means, covs, weights in
the composite transportation distance sense.
Parameters
----------
reg: strength of entropic regularization
Returns
-------
weights and support points of reduced GMM.
"""
def __init__(self,
means,
covs,
weights,
n,
n_pseudo=100,
init_method='kmeans',
tol=1e-5,
max_iter=100,
ground_distance='W2',
reg=0,
means_init=None,
covs_init=None,
weights_init=None,
random_state=0):
self.means = means
self.covs = covs
self.weights = weights
self.tol = tol
self.max_iter = max_iter
self.origin_n = self.weights.shape[0]
self.new_n = n
self.n_pseudo = n_pseudo
self.random_state = random_state
self.ground_distance = ground_distance
self.converged_ = False
if reg >= 0:
self.reg = reg
else:
raise ValueError('The regularization term should be non-negative.')
self.init_method = init_method
self.means_init = means_init
self.covs_init = covs_init
self.weights_init = weights_init
self.time_ = []
def _initialize_parameter(self):
"""Initializatin of the clustering barycenter"""
if self.init_method == 'kmeans':
total_sample_size = 10000
X = rmixGaussian(self.means, self.covs, self.weights, total_sample_size,
self.random_state)[0]
gm = GaussianMixture(n_components=self.new_n, random_state=self.random_state,
tol=1e-6).fit(X)
self.reduced_means = gm.means_
self.reduced_covs = gm.covariances_
self.reduced_weights = gm.weights_
elif self.init_method == 'user':
self.reduced_means = self.means_init
self.reduced_covs = self.covs_init
self.reduced_weights = self.weights_init
else:
self.reduced_means, self.reduced_covs, self.reduced_weights = GMR_greedy(
self.means, self.covs, self.weights, self.new_n, self.init_method)
self.cost_matrix = GMM_CTD(means=[self.means, self.reduced_means],
covs=[self.covs, self.reduced_covs],
weights=[self.weights, self.reduced_weights],
ground_distance=self.ground_distance,
matrix=True,
N=self.n_pseudo)
def _obj(self):
if self.reg == 0:
return np.sum(self.cost_matrix * self.ot_plan)
elif self.reg > 0:
return np.sum(self.cost_matrix * self.ot_plan) - self.reg * entropy(self.log_ot_plan)
def _weight_update(self):
if self.reg == 0:
self.clustering_matrix = (self.cost_matrix.T == np.min(self.cost_matrix, 1)).T
self.ot_plan = self.clustering_matrix * (self.weights /
self.clustering_matrix.sum(1)).reshape((-1, 1))
# if there are ties, then the weights are equally splitted into
# different groups
self.reduced_weights = self.ot_plan.sum(axis=0)
elif self.reg > 0:
lognum = -self.cost_matrix / self.reg
logtemp = (lognum.T - logsumexp(lognum, axis=1)).T
self.log_ot_plan = (logtemp.T + np.log(self.weights)).T
self.ot_plan = np.exp(self.log_ot_plan)
self.reduced_weights = self.ot_plan.sum(axis=0)
return self._obj()
def _support_update(self):
for i in range(self.new_n):
self.reduced_means[i], self.reduced_covs[i] = barycenter(
self.means,
self.covs,
self.ot_plan[:, i],
mean_init=self.reduced_means[i],
cov_init=self.reduced_covs[i],
ground_distance=self.ground_distance)
self.cost_matrix = GMM_CTD([self.means, self.reduced_means], [self.covs, self.reduced_covs],
[self.weights, self.reduced_weights],
ground_distance=self.ground_distance,
matrix=True,
N=self.n_pseudo)
return self._obj()
def iterative(self):
self._initialize_parameter()
obj = np.Inf
for n_iter in range(1, self.max_iter + 1):
proc_time = time.time()
obj_current = self._weight_update()
# remove the empty cluster centers
index = np.where(self.ot_plan.sum(axis=0) != 0)
self.new_n = index[0].shape[0]
self.ot_plan = self.ot_plan.T[index].T
self.reduced_means = self.reduced_means[index[0]]
self.reduced_covs = self.reduced_covs[index[0]]
self.reduced_weights = self.reduced_weights[index[0]]
change = obj - obj_current
if abs(change) < self.tol:
self.converged_ = True
self.obj = obj
self.n_iter_ = n_iter
break
if change < 0.0:
raise ValueError('Weight update: The objective function is increasing!')
obj = obj_current
obj_current = self._support_update()
change = obj - obj_current
self.time_.append(time.time() - proc_time)
if abs(change) < self.tol:
self.converged_ = True
self.obj = obj
self.n_iter_ = n_iter
break
if change < 0.0:
raise ValueError('Support update: The objective function is increasing!')
obj = obj_current
if not self.converged_:
print('Algorithm did not converge. '
'Try different init parameters, '
'or increase max_iter, tol ')
if __name__ == '__main__':
from scipy.stats import norm
import matplotlib.pyplot as plt
means = np.array([1.45, 2.2, 0.67, 0.48, 1.49, 0.91, 1.01, 1.42, 2.77, 0.89]).reshape((-1, 1))
covs = np.array(
[0.0487, 0.0305, 0.1171, 0.0174, 0.0295, 0.0102, 0.0323, 0.0380, 0.0115, 0.0679]).reshape(
(-1, 1, 1))
weights = np.array([0.03, 0.18, 0.12, 0.19, 0.02, 0.16, 0.06, 0.1, 0.08, 0.06])
print(
GMM_L2([
means,
np.array([0.48576481, 0.91249295, 1.03276885, 1.39806918, 2.20693554, 2.76902991
]).reshape((-1, 1))
], [
covs,
np.array([0.01860878, 0.01370735, 0.29000884, 0.03605234, 0.02781392, 0.0116604
]).reshape((-1, 1, 1))
], [
weights,
np.array([0.22000596, 0.23553842, 0.18454371, 0.11243351, 0.16731566, 0.08016274])
]))
reduction = GMR_CTD(
means,
covs,
weights,
5,
init_method="user",
tol=1e-5,
max_iter=100,
ground_distance="L2",
reg=0,
means_init=np.array(
[0.48576481, 0.91249295, 1.03276885, 1.39806918, 2.20693554, 2.76902991]).reshape(
(-1, 1)),
covs_init=np.array([0.01860878, 0.01370735, 0.29000884, 0.03605234, 0.02781392,
0.0116604]).reshape((-1, 1, 1)),
weights_init=np.array(
[0.22000596, 0.23553842, 0.18454371, 0.11243351, 0.16731566, 0.08016274]),
random_state=0,
coeff=None)
reduction.iterative()
print(
GMM_L2([means, reduction.reduced_means], [covs, reduction.reduced_covs],
[weights, reduction.reduced_weights]))
reduction2 = GMR_CTD(
means,
covs,
weights,
5,
init_method="user",
tol=1e-5,
max_iter=100,
ground_distance="SW",
reg=0,
means_init=np.array(
[0.48576481, 0.91249295, 1.03276885, 1.39806918, 2.20693554, 2.76902991]).reshape(
(-1, 1)),
covs_init=np.array([0.01860878, 0.01370735, 0.29000884, 0.03605234, 0.02781392,
0.0116604]).reshape((-1, 1, 1)),
weights_init=np.array(
[0.22000596, 0.23553842, 0.18454371, 0.11243351, 0.16731566, 0.08016274]),
random_state=0,
coeff=None)
reduction2.iterative()
print(
GMM_L2([means, reduction2.reduced_means], [covs, reduction2.reduced_covs],
[weights, reduction2.reduced_weights]))
# visualization
means = np.squeeze(means)
covs = np.squeeze(covs)
reduced_means = np.squeeze(reduction.reduced_means)
reduced_covs = np.squeeze(reduction.reduced_covs)
reduced_means2 = np.squeeze(reduction2.reduced_means)
reduced_covs2 = np.squeeze(reduction2.reduced_covs)
x = np.linspace(0, 3, 100)
y1 = dmixf(x, means, np.sqrt(covs), weights, norm)
y2 = dmixf(x, reduced_means, np.sqrt(reduced_covs), reduction.reduced_weights, norm)
y3 = dmixf(x, reduced_means2, np.sqrt(reduced_covs2), reduction2.reduced_weights, norm)
plt.figure()
plt.plot(x, y1, label='original')
plt.plot(x, y2, label='reduced (L2)')
plt.plot(x, y3, label='reduced (SW)')
plt.legend()
plt.savefig('test.png')
| 10,180 | 35.102837 | 100 |
py
|
CTDGMR
|
CTDGMR-master/CTDGMR/distance.py
|
import ot
import numpy as np
from scipy import linalg
from .utils import log_normal
from scipy.stats import multivariate_normal
"""
Distance functions
Part I: distance between Gaussians
Part II: distance between Gaussian mixtures
"""
def Gaussian_distance(mu1, mu2, Sigma1, Sigma2, which='W2'):
"""
Compute distance between Gaussians.
Parameters
----------
mu1 : array-like, shape (d, )
mu2 : array-like, shape (d, )
Sigma1 : array-like, shape (d, d)
Sigma2 : array-like, shape (d, d)
which : string, 'KL', 'WKL', 'CS', 'W2' and others
Returns
-------
2-Wasserstein distance between Gaussians.
"""
if which == 'KL':
d = mu1.shape[0]
# cholesky decomposition
Sigma2_chol = linalg.cholesky(Sigma2, lower=True)
Sigma1_chol = linalg.cholesky(Sigma1, lower=True)
precisions_chol = linalg.solve_triangular(Sigma2_chol,
np.eye(d),
lower=True)
log_det = 2 * (np.sum(np.log(np.diag(Sigma2_chol))) -
np.sum(np.log(np.diag(Sigma1_chol))))
prod = precisions_chol @ Sigma1_chol
trace = np.trace(prod.T @ prod)
quadratic_term = precisions_chol.dot(mu2 - mu1)
quadratic_term = np.sum(quadratic_term**2)
return .5 * (log_det + trace + quadratic_term - d)
elif which == 'W2':
# 1 dimensional
if mu1.shape[0] == 1 or mu2.shape[0] == 1:
W2_squared = (mu1 - mu2)**2 + (np.sqrt(Sigma1) -
np.sqrt(Sigma2))**2
W2_squared = np.asscalar(W2_squared)
# multi-dimensional
else:
sqrt_Sigma1 = linalg.sqrtm(Sigma1)
Sigma = Sigma1 + Sigma2 - 2 * linalg.sqrtm(
sqrt_Sigma1 @ Sigma2 @ sqrt_Sigma1)
W2_squared = np.linalg.norm(mu1 - mu2)**2 + np.trace(
Sigma) + 1e-13
return np.sqrt(W2_squared)
elif which == 'L2':
det_first_two = np.linalg.det(4 * np.pi * Sigma1)**(
-1 / 2) + np.linalg.det(4 * np.pi * Sigma2)**(-1 / 2)
l2_squared = det_first_two - 2 * multivariate_normal.pdf(
mu1, mu2, Sigma1 + Sigma2)
return l2_squared
elif which == 'CS':
log_det1 = np.log(np.linalg.eigvals(4 * np.pi * Sigma1)).sum()
log_det2 = np.log(np.linalg.eigvals(4 * np.pi * Sigma2)).sum()
return -multivariate_normal.logpdf(
mu1, mu2, Sigma1 + Sigma2) - 0.25 * (log_det1 + log_det2)
elif which == 'Hellinger':
return
else:
raise ValueError('This ground distance is not implemented!')
def GMM_CTD(means,
covs,
weights,
ground_distance='W2',
matrix=False,
N=1):
"""Compute the 2 Wasserstein distance between Gaussian mixtures.
Parameters
----------
means : list of numpy arrays, length 2, (k1,d), (k2,d)
covs : list of numpy arrays , length 2, (k1, d, d), (k2, d, d)
weights: list of numpy arrays
Returns
-------
Composite Wasserstein distance.
"""
mus1, mus2 = means[0], means[1]
Sigmas1, Sigmas2 = covs[0], covs[1]
k1, k2 = mus1.shape[0], mus2.shape[0]
cost_matrix = np.zeros((k1, k2))
w1, w2 = weights[0], weights[1]
if ground_distance == 'KL':
d = mus1.shape[1]
diff = mus2[np.newaxis, :] - mus1[:, np.newaxis]
cost_matrix, precisions = log_normal(diff,
np.tile(
Sigmas2,
(k1, 1, 1, 1)),
prec=True)
# corresponding to log\phi(\mu_1|mu_2,\sigma_2)
# precision matrices
precisions = precisions.reshape((k1, k2, d, d))
traces = np.einsum('ijkl,ikl->ij', precisions, Sigmas1)
log_det = np.zeros(k1)
for i in range(k1):
log_det[i] = np.sum(
np.log(np.linalg.eigvals(2 * np.pi * Sigmas1[i])))
cost_matrix = 0.5 * (traces.T - log_det - d).T - cost_matrix
elif ground_distance == 'WKL':
d = mus1.shape[1]
diff = mus2[np.newaxis, :] - mus1[:, np.newaxis]
cost_matrix, precisions = log_normal(diff,
np.tile(
Sigmas2,
(k1, 1, 1, 1)),
prec=True)
# precision matrices
precisions = precisions.reshape((k1, k2, d, d))
# precisions = precisions[]
traces = np.einsum('ijkl,ikl->ij', precisions, Sigmas1)
cost_matrix -= 0.5 * traces
cost_matrix = -N * cost_matrix - np.log(w2)
elif ground_distance == 'W2':
for i in range(k1):
for j in range(k2):
cost_matrix[i, j] = Gaussian_distance(
mus1[i], mus2[j], Sigmas1[i], Sigmas2[j], 'W2')**2
elif ground_distance == 'W1':
for i in range(k1):
for j in range(k2):
cost_matrix[i, j] = np.linalg.norm(
mus1[i] - mus2[j]) + np.linalg.norm(
linalg.sqrtm(Sigmas1[i]) -
linalg.sqrtm(Sigmas2[j]))
elif ground_distance == 'ISE':
diff = mus2[np.newaxis, :] - mus1[:, np.newaxis]
covs = Sigmas2[np.newaxis, :] + Sigmas1[:, np.newaxis]
cost_matrix = -2 * np.exp(log_normal(diff, covs))
# add determinant
col_det = np.zeros(k2)
for i in range(k2):
col_det[i] = np.linalg.det(
4 * np.pi * Sigmas2[[i]])**(-.5)
row_det = np.zeros(k1)
for i in range(k1):
row_det[i] = np.linalg.det(
4 * np.pi * Sigmas1[[i]])**(-.5)
cost_matrix += col_det
cost_matrix = (cost_matrix.T + row_det).T
elif ground_distance == 'L2':
diff = mus2[np.newaxis, :] - mus1[:, np.newaxis]
covs = Sigmas2[np.newaxis, :] + Sigmas1[:, np.newaxis]
cost_matrix = -2 * np.exp(log_normal(diff, covs))
# add determinant
col_det = np.zeros(k2)
for i in range(k2):
col_det[i] = np.linalg.det(
4 * np.pi * Sigmas2[[i]])**(-.5)
row_det = np.zeros(k1)
for i in range(k1):
row_det[i] = np.linalg.det(
4 * np.pi * Sigmas1[[i]])**(-.5)
cost_matrix += col_det
cost_matrix = (cost_matrix.T + row_det).T
cost_matrix = np.sqrt(cost_matrix)
elif ground_distance == 'WISE':
diff = mus2[np.newaxis, :] - mus1[:, np.newaxis]
covs = Sigmas2[np.newaxis, :] + Sigmas1[:, np.newaxis]
cost_matrix = -2 * np.outer(w1, w2) * np.exp(
log_normal(diff, covs))
# add determinant
col_det = np.zeros(k2)
for i in range(k2):
col_det[i] = w2[i]**2 * np.linalg.det(
4 * np.pi * Sigmas2[[i]])**(-.5)
row_det = np.zeros(k1)
for i in range(k1):
row_det[i] = w1[i]**2 * np.linalg.det(
4 * np.pi * Sigmas1[[i]])**(-.5)
cost_matrix += col_det
cost_matrix = (cost_matrix.T + row_det).T
elif ground_distance == 'CS':
d = mus1.shape[1]
diff = mus2[np.newaxis, :] - mus1[:, np.newaxis]
covs = Sigmas2[np.newaxis, :] + Sigmas1[:, np.newaxis]
cost_matrix = -log_normal(diff, covs) # shape (k1, k2)
# for each row, add the determinant of the covariance matrix of Sigmas1
Sigmas1_log_det = np.zeros((k1, ))
for i, cov in enumerate(Sigmas1):
Sigmas1_log_det[i] = np.sum(np.log(
np.linalg.eigvals(cov)))
# for each column, add the determinant of the covariance matrix of Sigmas2
Sigmas2_log_det = np.zeros((k2, ))
for i, cov in enumerate(Sigmas2):
Sigmas2_log_det[i] = np.sum(np.log(
np.linalg.eigvals(cov)))
cost_matrix = cost_matrix - (Sigmas1_log_det[:, np.newaxis] +
Sigmas2_log_det[np.newaxis, :]
) / 4 - d / 2 * np.log(4 * np.pi)
elif ground_distance == 'inner':
diff = mus2[np.newaxis, :] - mus1[:, np.newaxis]
covs = Sigmas2[np.newaxis, :] + Sigmas1[:, np.newaxis]
cost_matrix = np.exp(log_normal(diff, covs))
else:
raise ValueError('This ground distance is not implemented!')
if matrix:
return cost_matrix
else:
CTD = ot.emd2(w1, w2, cost_matrix)
return CTD
def GMM_L2(means, covs, weights, normalized=False):
# compute the squared L2 distance between two mixtures
w1, w2 = weights[0], weights[1]
mus1, mus2 = means[0], means[1]
Sigmas1, Sigmas2 = covs[0], covs[1]
# normalization of the weights
w1 /= w1.sum()
w2 /= w2.sum()
# S11
diff = mus1[np.newaxis, :] - mus1[:, np.newaxis]
covs = Sigmas1[np.newaxis, :] + Sigmas1[:, np.newaxis]
S11 = np.exp(log_normal(diff, covs))
# S12
diff = mus2[np.newaxis, :] - mus1[:, np.newaxis]
covs = Sigmas2[np.newaxis, :] + Sigmas1[:, np.newaxis]
# print(diff.shape, covs.shape)
S12 = np.exp(log_normal(diff, covs))
# S22
diff = mus2[np.newaxis, :] - mus2[:, np.newaxis]
covs = Sigmas2[np.newaxis, :] + Sigmas2[:, np.newaxis]
S22 = np.exp(log_normal(diff, covs))
if normalized:
return 1 - 2 * w1.T.dot(S12).dot(w2) / (
w1.T.dot(S11).dot(w1) + w2.T.dot(S22).dot(w2))
else:
return w1.T.dot(S11).dot(
w1) - 2 * w1.T.dot(S12).dot(w2) + w2.T.dot(S22).dot(w2)
def GMM_CS(means, covs, weights):
#compute the Cauchy-Schwartz divergence between two mixtures
w1, w2 = weights[0], weights[1]
mus1, mus2 = means[0], means[1]
Sigmas1, Sigmas2 = covs[0], covs[1]
# normalization of the weights
w1 /= w1.sum()
w2 /= w2.sum()
# S11
diff = mus1[np.newaxis, :] - mus1[:, np.newaxis]
covs = Sigmas1[np.newaxis, :] + Sigmas1[:, np.newaxis]
S11 = np.exp(log_normal(diff, covs))
# S12
diff = mus2[np.newaxis, :] - mus1[:, np.newaxis]
covs = Sigmas2[np.newaxis, :] + Sigmas1[:, np.newaxis]
# print(diff.shape, covs.shape)
S12 = np.exp(log_normal(diff, covs))
# S22
diff = mus2[np.newaxis, :] - mus2[:, np.newaxis]
covs = Sigmas2[np.newaxis, :] + Sigmas2[:, np.newaxis]
S22 = np.exp(log_normal(diff, covs))
return -np.log(w1.T.dot(S12).dot(w2)) + .5 * (
np.log(w1.T.dot(S11).dot(w1)) + np.log(w2.T.dot(S22).dot(w2)))
# sanity check
if __name__ == '__main__':
# d = 3
# means = [
# np.random.randn(4, d),
# np.random.randn(3, d)
# ]
# covs = [
# np.empty((4, d, d)),
# np.empty((3, d, d))
# ]
# for i in range(4):
# a = np.random.randn(d, d)
# covs[0][i] = a @ a.T + 0.5 * np.eye(d)
# for i in range(3):
# a = np.random.randn(d, d)
# covs[1][i] = a @ a.T + 0.5 * np.eye(d)
# weights = [
# np.random.rand(4),
# np.random.rand(3)
# ]
means = [
np.array([2.0, 2.0]).reshape((-1, 1)),
np.array([2.0, 2.0]).reshape((-1, 1))
]
covs = [
np.array([1.0, 2.0]).reshape((-1, 1, 1)),
np.array([1.0, 2.0]).reshape((-1, 1, 1))
]
weights = [np.array([0.6, 0.4]), np.array([0.5, 0.5])]
print(GMM_L2(means, covs, weights))
| 11,638 | 31.878531 | 82 |
py
|
alibi-detect
|
alibi-detect-master/setup.py
|
from setuptools import find_packages, setup
def readme():
with open("README.md", encoding="utf-8") as f:
return f.read()
# read version file
exec(open("alibi_detect/version.py").read())
extras_require = {
"prophet": [
"prophet>=1.1.0, <2.0.0",
],
"torch": [
"torch>=1.7.0, <1.14.0"
],
# https://github.com/SeldonIO/alibi-detect/issues/375 and 387
"tensorflow": [
"tensorflow_probability>=0.8.0, <0.21.0",
"tensorflow>=2.2.0, !=2.6.0, !=2.6.1, <2.13.0", # https://github.com/SeldonIO/alibi-detect/issues/375 and 387
],
"keops": [
"pykeops>=2.0.0, <2.2.0",
"torch>=1.7.0, <1.14.0"
],
"all": [
"prophet>=1.1.0, <2.0.0",
"tensorflow_probability>=0.8.0, <0.21.0",
"tensorflow>=2.2.0, !=2.6.0, !=2.6.1, <2.13.0", # https://github.com/SeldonIO/alibi-detect/issues/375 and 387
"pykeops>=2.0.0, <2.2.0",
"torch>=1.7.0, <1.14.0"
],
}
setup(
name="alibi-detect",
author="Seldon Technologies Ltd.",
author_email="[email protected]",
version=__version__, # type: ignore # noqa F821
description="Algorithms for outlier detection, concept drift and metrics.",
long_description=readme(),
long_description_content_type="text/markdown",
url="https://github.com/SeldonIO/alibi-detect",
license="Apache 2.0",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.8",
# lower bounds based on Debian Stable versions where available
install_requires=[
"matplotlib>=3.0.0, <4.0.0",
"numpy>=1.16.2, <2.0.0",
"pandas>=1.0.0, <3.0.0",
"Pillow>=5.4.1, <11.0.0",
"opencv-python>=3.2.0, <5.0.0",
"scipy>=1.3.0, <2.0.0",
'scikit-image>=0.14.2, !=0.17.1, <0.22', # https://github.com/SeldonIO/alibi/issues/215
"scikit-learn>=0.20.2, <2.0.0",
"transformers>=4.0.0, <5.0.0",
"dill>=0.3.0, <0.4.0",
"tqdm>=4.28.1, <5.0.0",
"requests>=2.21.0, <3.0.0",
"pydantic>=1.8.0, <2.0.0",
"toml>=0.10.1, <1.0.0", # STC, see https://discuss.python.org/t/adopting-recommending-a-toml-parser/4068
"catalogue>=2.0.0, <3.0.0",
"numba>=0.50.0, !=0.54.0, <0.58.0", # Avoid 0.54 due to: https://github.com/SeldonIO/alibi/issues/466
"typing-extensions>=3.7.4.3"
],
extras_require=extras_require,
test_suite="tests",
zip_safe=False,
classifiers=[
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"License :: OSI Approved :: Apache Software License",
"Topic :: Scientific/Engineering",
],
)
| 2,931 | 33.494118 | 118 |
py
|
alibi-detect
|
alibi-detect-master/testing/test_notebooks.py
|
"""
This script is an example of using `jupytext` to execute notebooks for testing instead of relying on `nbmake`
plugin. This approach may be more flexible if our requirements change in the future.
"""
import glob
from pathlib import Path
import shutil
import pytest
from jupytext.cli import jupytext
try:
from fbprophet import Prophet # noqa F401
PROPHET_INSTALLED = True
except ImportError:
PROPHET_INSTALLED = False
# Set of all example notebooks
# NOTE: we specifically get only the name of the notebook not the full path as we want to
# use these as variables on the command line for `pytest` for the workflow executing only
# changed notebooks. `pytest` does not allow `/` as part of the test name for the -k argument.
# This also means that the approach is limited to all notebooks being in the `NOTEBOOK_DIR`
# top-level path.
NOTEBOOK_DIR = 'doc/source/examples'
ALL_NOTEBOOKS = {Path(x).name for x in glob.glob(str(Path(NOTEBOOK_DIR).joinpath('*.ipynb')))}
# The following set includes notebooks which are not to be executed during notebook tests.
# These are typically those that would take too long to run in a CI environment or impractical
# due to other dependencies (e.g. downloading large datasets
EXCLUDE_NOTEBOOKS = {
# the following are all long-running
'cd_distillation_cifar10.ipynb',
'cd_ks_cifar10.ipynb',
'cd_mmd_cifar10.ipynb',
'od_llr_genome.ipynb',
'od_llr_mnist.ipynb',
'od_seq2seq_synth.ipynb',
'cd_context_20newsgroup.ipynb',
'cd_context_ecg.ipynb',
'cd_text_imdb.ipynb',
'cd_mmd_keops.ipynb',
# the following requires a k8s cluster
'alibi_detect_deploy.ipynb',
# the following require downloading large datasets
'cd_online_camelyon.ipynb',
'cd_text_amazon.ipynb',
# the following require complex dependencies
'cd_mol.ipynb', # complex to install pytorch-geometric
# the following require remote artefacts to be updated
'ad_ae_cifar10.ipynb', # bad marshal data error when fetching cifar10-resnet56 model
}
if not PROPHET_INSTALLED:
EXCLUDE_NOTEBOOKS.add('od_prophet_weather.ipynb') # Exclude if fbprophet not installed i.e. on Windows
EXECUTE_NOTEBOOKS = ALL_NOTEBOOKS - EXCLUDE_NOTEBOOKS
@pytest.mark.timeout(600)
@pytest.mark.parametrize("notebook", EXECUTE_NOTEBOOKS)
def test_notebook_execution(notebook, tmp_path):
# Original notebook filepath
orig_path = Path(NOTEBOOK_DIR, notebook)
# Copy notebook to a temp directory (so that any save/loading is done in a clean directory)
test_path = tmp_path.joinpath(notebook)
shutil.copy(orig_path, test_path)
# Execute copied notebook
jupytext(args=[str(test_path), "--execute"])
| 2,694 | 38.632353 | 109 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/base.py
|
from abc import ABC, abstractmethod
import os
import copy
import json
import numpy as np
from typing import Dict, Any, Optional, Union
from typing_extensions import Protocol, runtime_checkable
from alibi_detect.version import __version__
DEFAULT_META: Dict = {
"name": None,
"online": None, # true or false
"data_type": None, # tabular, image or time-series
"version": None,
"detector_type": None # drift, outlier or adversarial
}
def outlier_prediction_dict():
data = {
'instance_score': None,
'feature_score': None,
'is_outlier': None
}
return copy.deepcopy({"data": data, "meta": DEFAULT_META})
def adversarial_prediction_dict():
data = {
'instance_score': None,
'is_adversarial': None
}
return copy.deepcopy({"data": data, "meta": DEFAULT_META})
def adversarial_correction_dict():
data = {
'instance_score': None,
'is_adversarial': None,
'corrected': None,
'no_defense': None,
'defense': None
}
return copy.deepcopy({"data": data, "meta": DEFAULT_META})
def concept_drift_dict():
data = {
'is_drift': None,
'distance': None,
'p_val': None,
'threshold': None
}
return copy.deepcopy({"data": data, "meta": DEFAULT_META})
class BaseDetector(ABC):
"""Base class for outlier, adversarial and drift detection algorithms."""
def __init__(self):
self.meta = copy.deepcopy(DEFAULT_META)
self.meta['name'] = self.__class__.__name__
self.meta['version'] = __version__
def __repr__(self):
return self.__class__.__name__
@property
def meta(self) -> Dict:
return self._meta
@meta.setter
def meta(self, value: Dict):
if not isinstance(value, dict):
raise TypeError('meta must be a dictionary')
self._meta = value
@abstractmethod
def score(self, X: np.ndarray):
pass
@abstractmethod
def predict(self, X: np.ndarray):
pass
class FitMixin(ABC):
@abstractmethod
def fit(self, *args, **kwargs) -> None:
...
class ThresholdMixin(ABC):
@abstractmethod
def infer_threshold(self, *args, **kwargs) -> None:
...
# "Large artefacts" - to save memory these are skipped in _set_config(), but added back in get_config()
# Note: The current implementation assumes the artefact is stored as a class attribute, and as a config field under
# the same name. Refactoring will be required if this assumption is to be broken.
LARGE_ARTEFACTS = ['x_ref', 'c_ref', 'preprocess_fn']
class DriftConfigMixin:
"""
A mixin class containing methods related to a drift detector's configuration dictionary.
"""
config: Optional[dict] = None
def get_config(self) -> dict: # TODO - move to BaseDetector once config save/load implemented for non-drift
"""
Get the detector's configuration dictionary.
Returns
-------
The detector's configuration dictionary.
"""
if self.config is not None:
# Get config (stored in top-level self)
cfg = self.config
# Add large artefacts back to config
for key in LARGE_ARTEFACTS:
if key in cfg and hasattr(self._nested_detector, key):
cfg[key] = getattr(self._nested_detector, key)
# Set x_ref_preprocessed flag
# If no preprocess_at_init, always true!
preprocess_at_init = getattr(self._nested_detector, 'preprocess_at_init', True)
cfg['x_ref_preprocessed'] = preprocess_at_init and self._nested_detector.preprocess_fn is not None
return cfg
else:
raise NotImplementedError('Getting a config (or saving via a config file) is not yet implemented for this'
'detector')
@classmethod
def from_config(cls, config: dict):
"""
Instantiate a drift detector from a fully resolved (and validated) config dictionary.
Parameters
----------
config
A config dictionary matching the schema's in :class:`~alibi_detect.saving.schemas`.
"""
# Check for existing version_warning. meta is pop'd as don't want to pass as arg/kwarg
meta = config.pop('meta', None)
meta = {} if meta is None else meta # Needed because pydantic sets meta=None if it is missing from the config
version_warning = meta.pop('version_warning', False)
# Init detector
detector = cls(**config)
# Add version_warning
detector.meta['version_warning'] = version_warning # type: ignore[attr-defined]
detector.config['meta']['version_warning'] = version_warning
return detector
def _set_config(self, inputs: dict): # TODO - move to BaseDetector once config save/load implemented for non-drift
"""
Set a detectors `config` attribute upon detector instantiation.
Large artefacts are overwritten with `None` in order to avoid memory duplication. They're added back into
the config later on by `get_config()`.
Parameters
----------
inputs
The inputs (args/kwargs) given to the detector at instantiation.
"""
# Set config metadata
name = self.__class__.__name__
# Init config dict
self.config = {
'name': name,
'meta': {
'version': __version__,
}
}
# args and kwargs
pop_inputs = ['self', '__class__', '__len__', 'name', 'meta']
[inputs.pop(k, None) for k in pop_inputs]
# Overwrite any large artefacts with None to save memory. They'll be added back by get_config()
for key in LARGE_ARTEFACTS:
if key in inputs and hasattr(self._nested_detector, key):
inputs[key] = None
self.config.update(inputs)
@property
def _nested_detector(self):
"""
The low-level nested detector.
"""
detector = self._detector if hasattr(self, '_detector') else self
detector = detector._detector if hasattr(detector, '_detector') else detector
return detector
@runtime_checkable
class Detector(Protocol):
"""Type Protocol for all detectors.
Used for typing legacy save and load functionality in `alibi_detect.saving._tensorflow.saving.py`.
Note
----
This exists to distinguish between detectors with and without support for config saving and loading. Once all
detector support this then this protocol will be removed.
"""
meta: Dict
def predict(self) -> Any: ...
@runtime_checkable
class ConfigurableDetector(Detector, Protocol):
"""Type Protocol for detectors that have support for saving via config.
Used for typing save and load functionality in `alibi_detect.saving.saving`.
"""
def get_config(self) -> dict: ...
@classmethod
def from_config(cls, config: dict): ...
def _set_config(self, inputs: dict): ...
@runtime_checkable
class StatefulDetectorOnline(ConfigurableDetector, Protocol):
"""Type Protocol for detectors that have support for save/loading of online state.
Used for typing save and load functionality in `alibi_detect.saving.saving`.
"""
t: int = 0
def save_state(self, filepath: Union[str, os.PathLike]): ...
def load_state(self, filepath: Union[str, os.PathLike]): ...
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(
obj,
(
np.int_,
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
),
):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
| 8,321 | 29.708487 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/exceptions.py
|
"""This module defines the Alibi Detect exception hierarchy and common exceptions used across the library."""
from typing_extensions import Literal
from typing import Callable
from abc import ABC
from functools import wraps
class AlibiDetectException(Exception, ABC):
def __init__(self, message: str) -> None:
"""Abstract base class of all alibi detect errors.
Parameters
----------
message
The error message.
"""
super().__init__(message)
class NotFittedError(AlibiDetectException):
def __init__(self, object_name: str) -> None:
"""Exception raised when a transform is not fitted.
Parameters
----------
object_name
The name of the unfit object.
"""
message = f'{object_name} has not been fit!'
super().__init__(message)
class ThresholdNotInferredError(AlibiDetectException):
def __init__(self, object_name: str) -> None:
"""Exception raised when a threshold not inferred for an outlier detector.
Parameters
----------
object_name
The name of the object that does not have a threshold fit.
"""
message = f'{object_name} has no threshold set, call `infer_threshold` to fit one!'
super().__init__(message)
def _catch_error(err_name: Literal['NotFittedError', 'ThresholdNotInferredError']) -> Callable:
"""Decorator to catch errors and raise a more informative error message.
Note: This decorator should only be used on detector frontend methods. It catches errors raised by
backend components and re-raises them with error messages corresponding to the specific detector frontend.
This is done to avoid exposing the backend components to the user.
"""
error_type = globals()[err_name]
def decorate(f):
@wraps(f)
def applicator(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except error_type as err:
raise error_type(self.__class__.__name__) from err
return applicator
return decorate
| 2,130 | 32.296875 | 110 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/version.py
|
# Store the version here so:
# 1) we don't load dependencies by storing it in __init__.py
# 2) we can import it in setup.py for the same reason
# 3) we can import it into your module module
__version__ = "0.11.5dev"
| 217 | 30.142857 | 60 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/datasets.py
|
import io
import logging
from io import BytesIO
from typing import List, Tuple, Type, Union
from xml.etree import ElementTree
import dill
import numpy as np
import pandas as pd
import requests
from alibi_detect.utils.data import Bunch
from alibi_detect.utils.url import _join_url
from requests import RequestException
from urllib.error import URLError
from scipy.io import arff
from sklearn.datasets import fetch_kddcup99
# do not extend pickle dispatch table so as not to change pickle behaviour
dill.extend(use_dill=False)
pd.options.mode.chained_assignment = None # default='warn'
logger = logging.getLogger(__name__)
"""Number of seconds to wait for URL requests before raising an error."""
TIMEOUT = 10
def fetch_kdd(target: list = ['dos', 'r2l', 'u2r', 'probe'],
keep_cols: list = ['srv_count', 'serror_rate', 'srv_serror_rate',
'rerror_rate', 'srv_rerror_rate', 'same_srv_rate',
'diff_srv_rate', 'srv_diff_host_rate', 'dst_host_count',
'dst_host_srv_count', 'dst_host_same_srv_rate',
'dst_host_diff_srv_rate', 'dst_host_same_src_port_rate',
'dst_host_srv_diff_host_rate', 'dst_host_serror_rate',
'dst_host_srv_serror_rate', 'dst_host_rerror_rate',
'dst_host_srv_rerror_rate'],
percent10: bool = True,
return_X_y: bool = False) -> Union[Bunch, Tuple[np.ndarray, np.ndarray]]:
"""
KDD Cup '99 dataset. Detect computer network intrusions.
Parameters
----------
target
List with attack types to detect.
keep_cols
List with columns to keep. Defaults to continuous features.
percent10
Bool, whether to only return 10% of the data.
return_X_y
Bool, whether to only return the data and target values or a Bunch object.
Returns
-------
Bunch
Dataset and outlier labels (0 means 'normal' and 1 means 'outlier').
(data, target)
Tuple if 'return_X_y' equals True.
"""
# fetch raw data
try:
data_raw = fetch_kddcup99(subset=None, data_home=None, percent10=percent10)
except URLError:
logger.exception("Could not connect, URL may be out of service")
raise
# specify columns
cols = ['duration', 'protocol_type', 'service', 'flag', 'src_bytes', 'dst_bytes',
'land', 'wrong_fragment', 'urgent', 'hot', 'num_failed_logins', 'logged_in',
'num_compromised', 'root_shell', 'su_attempted', 'num_root', 'num_file_creations',
'num_shells', 'num_access_files', 'num_outbound_cmds', 'is_host_login',
'is_guest_login', 'count', 'srv_count', 'serror_rate', 'srv_serror_rate',
'rerror_rate', 'srv_rerror_rate', 'same_srv_rate', 'diff_srv_rate',
'srv_diff_host_rate', 'dst_host_count', 'dst_host_srv_count', 'dst_host_same_srv_rate',
'dst_host_diff_srv_rate', 'dst_host_same_src_port_rate', 'dst_host_srv_diff_host_rate',
'dst_host_serror_rate', 'dst_host_srv_serror_rate', 'dst_host_rerror_rate', 'dst_host_srv_rerror_rate']
# create dataframe
data = pd.DataFrame(data=data_raw['data'], columns=cols)
# add target to dataframe
data['attack_type'] = data_raw['target']
# specify and map attack types
attack_list = np.unique(data['attack_type'])
attack_category = ['dos', 'u2r', 'r2l', 'r2l', 'r2l', 'probe', 'dos', 'u2r',
'r2l', 'dos', 'probe', 'normal', 'u2r', 'r2l', 'dos', 'probe',
'u2r', 'probe', 'dos', 'r2l', 'dos', 'r2l', 'r2l']
attack_types = {}
for i, j in zip(attack_list, attack_category):
attack_types[i] = j
data['attack_category'] = 'normal'
for k, v in attack_types.items():
data['attack_category'][data['attack_type'] == k] = v
# define target
data['target'] = 0
for t in target:
data['target'][data['attack_category'] == t] = 1
is_outlier = data['target'].values
# define columns to be dropped
drop_cols = []
for col in data.columns.values:
if col not in keep_cols:
drop_cols.append(col)
if drop_cols != []:
data.drop(columns=drop_cols, inplace=True)
if return_X_y:
return data.values, is_outlier
return Bunch(data=data.values,
target=is_outlier,
target_names=['normal', 'outlier'],
feature_names=keep_cols)
def load_url_arff(url: str, dtype: Type[np.generic] = np.float32) -> np.ndarray:
"""
Load arff files from url.
Parameters
----------
url
Address of arff file.
Returns
-------
Arrays with data and labels.
"""
try:
resp = requests.get(url, timeout=TIMEOUT)
resp.raise_for_status()
except RequestException:
logger.exception("Could not connect, URL may be out of service")
raise
data = arff.loadarff(io.StringIO(resp.text))[0]
return np.array(data.tolist(), dtype=dtype)
def fetch_ecg(return_X_y: bool = False) \
-> Union[Bunch, Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]]:
"""
Fetch ECG5000 data. The dataset contains 5000 ECG's, originally obtained from
Physionet (https://archive.physionet.org/cgi-bin/atm/ATM) under the name
"BIDMC Congestive Heart Failure Database(chfdb)", record "chf07".
Parameters
----------
return_X_y
Bool, whether to only return the data and target values or a Bunch object.
Returns
-------
Bunch
Train and test datasets with labels.
(train data, train target), (test data, test target)
Tuple of tuples if 'return_X_y' equals True.
"""
Xy_train = load_url_arff('https://storage.googleapis.com/seldon-datasets/ecg/ECG5000_TRAIN.arff')
X_train, y_train = Xy_train[:, :-1], Xy_train[:, -1]
Xy_test = load_url_arff('https://storage.googleapis.com/seldon-datasets/ecg/ECG5000_TEST.arff')
X_test, y_test = Xy_test[:, :-1], Xy_test[:, -1]
if return_X_y:
return (X_train, y_train), (X_test, y_test)
else:
return Bunch(data_train=X_train,
data_test=X_test,
target_train=y_train,
target_test=y_test)
def fetch_cifar10c(corruption: Union[str, List[str]], severity: int, return_X_y: bool = False) \
-> Union[Bunch, Tuple[np.ndarray, np.ndarray]]:
"""
Fetch CIFAR-10-C data. Originally obtained from https://zenodo.org/record/2535967#.XkKh2XX7Qts and
introduced in "Hendrycks, D and Dietterich, T.G. Benchmarking Neural Network Robustness to Common Corruptions
and Perturbations. In 7th International Conference on Learning Represenations, 2019.".
Parameters
----------
corruption
Corruption type. Options can be checked with `get_corruption_cifar10c()`.
Alternatively, specify 'all' for all corruptions at a severity level.
severity
Severity level of corruption (1-5).
return_X_y
Bool, whether to only return the data and target values or a Bunch object.
Returns
-------
Bunch
Corrupted dataset with labels.
(corrupted data, target)
Tuple if 'return_X_y' equals True.
"""
url = 'https://storage.googleapis.com/seldon-datasets/cifar10c/'
n = 10000 # instances per corrupted test set
istart, iend = (severity - 1) * n, severity * n # idx for the relevant severity level
corruption_list = corruption_types_cifar10c() # get all possible corruption types
# convert input to list
if isinstance(corruption, str) and corruption != 'all':
corruption = [corruption]
elif corruption == 'all':
corruption = corruption_list
for corr in corruption: # check values in corruptions
if corr not in corruption_list:
raise ValueError(f'{corr} is not a valid corruption type.')
# get corrupted data
shape = ((len(corruption)) * n, 32, 32, 3)
X = np.zeros(shape)
for i, corr in enumerate(corruption):
url_corruption = _join_url(url, corr + '.npy')
try:
resp = requests.get(url_corruption, timeout=TIMEOUT)
resp.raise_for_status()
except RequestException:
logger.exception("Could not connect, URL may be out of service")
raise
X_corr = np.load(BytesIO(resp.content))[istart:iend].astype('float32')
X[i * n:(i + 1) * n] = X_corr
# get labels
url_labels = _join_url(url, 'labels.npy')
try:
resp = requests.get(url_labels, timeout=TIMEOUT)
resp.raise_for_status()
except RequestException:
logger.exception("Could not connect, URL may be out of service")
raise
y = np.load(BytesIO(resp.content))[istart:iend].astype('int64')
if X.shape[0] != y.shape[0]:
repeat = X.shape[0] // y.shape[0]
y = np.tile(y, (repeat,))
if return_X_y:
return (X, y)
else:
return Bunch(data=X, target=y)
def google_bucket_list(url: str, folder: str, filetype: str = None, full_path: bool = False) -> List[str]:
"""
Retrieve list with items in google bucket folder.
Parameters
----------
url
Bucket directory.
folder
Folder to retrieve list of items from.
filetype
File extension, e.g. `npy` for saved numpy arrays.
Returns
-------
List with items in the folder of the google bucket.
"""
try:
resp = requests.get(url, timeout=TIMEOUT)
resp.raise_for_status()
except RequestException:
logger.exception("Could not connect, URL may be out of service")
raise
root = ElementTree.fromstring(resp.content)
bucket_list = []
for r in root:
if list(r):
filepath = r[0].text
if filetype is not None:
if filepath.startswith(folder) and filepath.endswith(filetype):
istart, istop = filepath.find('/') + 1, filepath.find('.')
bucket_list.append(filepath[istart:istop])
else:
if filepath.startswith(folder):
istart, istop = filepath.find('/') + 1, filepath.find('.')
bucket_list.append(filepath[istart:istop])
return bucket_list
def corruption_types_cifar10c() -> List[str]:
"""
Retrieve list with corruption types used in CIFAR-10-C.
Returns
-------
List with corruption types.
"""
url = 'https://storage.googleapis.com/seldon-datasets/'
folder = 'cifar10c'
filetype = 'npy'
corruption_types = google_bucket_list(url, folder, filetype)
corruption_types.remove('labels')
return corruption_types
def fetch_attack(dataset: str, model: str, attack: str, return_X_y: bool = False) \
-> Union[Bunch, Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]]:
"""
Load adversarial instances for a given dataset, model and attack type.
Parameters
----------
dataset
Dataset under attack.
model
Model under attack.
attack
Attack name.
return_X_y
Bool, whether to only return the data and target values or a Bunch object.
Returns
-------
Bunch
Adversarial instances with original labels.
(train data, train target), (test data, test target)
Tuple of tuples if 'return_X_y' equals True.
"""
# define paths
url = 'https://storage.googleapis.com/seldon-datasets/'
path_attack = _join_url(url, [dataset, 'attacks', model, attack])
path_data = path_attack + '.npz'
path_meta = path_attack + '_meta.pickle'
# get adversarial instances and labels
try:
resp = requests.get(path_data, timeout=TIMEOUT)
resp.raise_for_status()
except RequestException:
logger.exception("Could not connect, URL may be out of service")
raise
data = np.load(BytesIO(resp.content))
X_train, X_test = data['X_train_adv'], data['X_test_adv']
y_train, y_test = data['y_train'], data['y_test']
if return_X_y:
return (X_train, y_train), (X_test, y_test)
# get metadata
try:
resp = requests.get(path_meta, timeout=TIMEOUT)
resp.raise_for_status()
except RequestException:
logger.exception("Could not connect, URL may be out of service")
raise
meta = dill.load(BytesIO(resp.content))
return Bunch(data_train=X_train,
data_test=X_test,
target_train=y_train,
target_test=y_test,
meta=meta)
def fetch_nab(ts: str,
return_X_y: bool = False
) -> Union[Bunch, Tuple[pd.DataFrame, pd.DataFrame]]:
"""
Get time series in a DataFrame from the Numenta Anomaly Benchmark: https://github.com/numenta/NAB.
Parameters
----------
ts
return_X_y
Bool, whether to only return the data and target values or a Bunch object.
Returns
-------
Bunch
Dataset and outlier labels (0 means 'normal' and 1 means 'outlier') in DataFrames with timestamps.
(data, target)
Tuple if 'return_X_y' equals True.
"""
url_labels = 'https://raw.githubusercontent.com/numenta/NAB/master/labels/combined_labels.json'
try:
resp = requests.get(url_labels, timeout=TIMEOUT)
resp.raise_for_status()
except RequestException:
logger.exception("Could not connect, URL may be out of service")
raise
labels_json = resp.json()
outliers = labels_json[ts + '.csv']
if not outliers:
logger.warning('The dataset does not contain any outliers.')
url = 'https://raw.githubusercontent.com/numenta/NAB/master/data/' + ts + '.csv'
df = pd.read_csv(url, header=0, index_col=0)
labels = np.zeros(df.shape[0])
for outlier in outliers:
outlier_id = np.where(df.index == outlier)[0][0]
labels[outlier_id] = 1
df.index = pd.to_datetime(df.index)
df_labels = pd.DataFrame(data={'is_outlier': labels}, index=df.index)
if return_X_y:
return df, df_labels
return Bunch(data=df,
target=df_labels,
target_names=['normal', 'outlier'])
def get_list_nab() -> list:
"""
Get list of possible time series to retrieve from the Numenta Anomaly Benchmark: https://github.com/numenta/NAB.
Returns
-------
List with time series names.
"""
url_labels = 'https://raw.githubusercontent.com/numenta/NAB/master/labels/combined_labels.json'
try:
resp = requests.get(url_labels, timeout=TIMEOUT)
resp.raise_for_status()
except RequestException:
logger.exception("Could not connect, URL may be out of service")
raise
labels_json = resp.json()
files = [k[:-4] for k, v in labels_json.items()]
return files
def load_genome_npz(fold: str, return_labels: bool = False) \
-> Union[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, np.ndarray]]:
url = 'https://storage.googleapis.com/seldon-datasets/genome/'
path_data = _join_url(url, fold + '.npz')
try:
resp = requests.get(path_data, timeout=TIMEOUT)
resp.raise_for_status()
except RequestException:
logger.exception("Could not connect, URL may be out of service")
raise
data = np.load(BytesIO(resp.content))
if return_labels:
return data['x'], data['is_outlier'], data['y']
else:
return data['x'], data['is_outlier']
def fetch_genome(return_X_y: bool = False, return_labels: bool = False) -> Union[Bunch, tuple]:
"""
Load genome data including their labels and whether they are outliers or not. More details about the data can be
found in the readme on https://console.cloud.google.com/storage/browser/seldon-datasets/genome/.
The original data can be found here: https://drive.google.com/drive/folders/1Ht9xmzyYPbDouUTl_KQdLTJQYX2CuclR.
Parameters
----------
return_X_y
Bool, whether to only return the data and target values or a Bunch object.
return_labels
Whether to return the genome labels which are detailed in the `label_json` dict
of the returned Bunch object.
Returns
-------
Bunch
Training, validation and test data, whether they are outliers and optionally including the
genome labels which are specified in the `label_json` key as a dictionary.
(data, outlier) or (data, outlier, target)
Tuple for the train, validation and test set with either the data and whether they
are outliers or the data, outlier flag and labels for the genomes if 'return_X_y' equals True.
"""
data_train = load_genome_npz('train_in', return_labels=return_labels)
data_val_in = load_genome_npz('val_in', return_labels=return_labels)
data_val_ood = load_genome_npz('val_ood', return_labels=return_labels)
data_val = (
np.concatenate([data_val_in[0], data_val_ood[0]]),
np.concatenate([data_val_in[1], data_val_ood[1]])
)
data_test_in = load_genome_npz('test_in', return_labels=return_labels)
data_test_ood = load_genome_npz('test_ood', return_labels=return_labels)
data_test = (
np.concatenate([data_test_in[0], data_test_ood[0]]),
np.concatenate([data_test_in[1], data_test_ood[1]])
)
if return_labels:
data_val += (np.concatenate([data_val_in[2], data_val_ood[2]]),) # type: ignore
data_test += (np.concatenate([data_test_in[2], data_test_ood[2]]),) # type: ignore
if return_X_y:
return data_train, data_val, data_test
try:
resp = requests.get('https://storage.googleapis.com/seldon-datasets/genome/label_dict.json', timeout=TIMEOUT)
resp.raise_for_status()
except RequestException:
logger.exception("Could not connect, URL may be out of service")
raise
label_dict = resp.json()
bunch = Bunch(
data_train=data_train[0],
data_val=data_val[0],
data_test=data_test[0],
outlier_train=data_train[1],
outlier_val=data_val[1],
outlier_test=data_test[1],
label_dict=label_dict
)
if not return_labels:
return bunch
else:
bunch['target_train'] = data_train[2] # type: ignore
bunch['target_val'] = data_val[2] # type: ignore
bunch['target_test'] = data_test[2] # type: ignore
return bunch
| 18,577 | 35.427451 | 117 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/__init__.py
|
from . import ad, cd, models, od, utils, saving
from .version import __version__ # noqa F401
__all__ = ["ad", "cd", "models", "od", "utils", "saving"]
| 153 | 29.8 | 57 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/base.py
|
from alibi_detect.utils.missing_optional_dependency import import_optional
from typing import Union
from typing_extensions import Literal, Protocol, runtime_checkable
# Use Protocols instead of base classes for the backend associated objects. This is a bit more flexible and allows us to
# avoid the torch/tensorflow imports in the base class.
@runtime_checkable
class TransformProtocol(Protocol):
"""Protocol for transformer objects.
The :py:obj:`~alibi_detect.od.pytorch.ensemble.BaseTransformTorch` object provides abstract methods for
objects that map between `torch` tensors. This protocol models the interface of the `BaseTransformTorch`
class.
"""
def transform(self, x):
pass
@runtime_checkable
class FittedTransformProtocol(TransformProtocol, Protocol):
"""Protocol for fitted transformer objects.
This protocol models the joint interface of the :py:obj:`~alibi_detect.od.pytorch.ensemble.BaseTransformTorch`
class and the :py:obj:`~alibi_detect.od.pytorch.ensemble.FitMixinTorch` class. These objects are transforms that
require to be fit."""
def fit(self, x_ref):
pass
def set_fitted(self):
pass
def check_fitted(self):
pass
TransformProtocolType = Union[TransformProtocol, FittedTransformProtocol]
NormalizerLiterals = Literal['PValNormalizer', 'ShiftAndScaleNormalizer']
AggregatorLiterals = Literal['TopKAggregator', 'AverageAggregator',
'MaxAggregator', 'MinAggregator']
PValNormalizer, ShiftAndScaleNormalizer, TopKAggregator, AverageAggregator, \
MaxAggregator, MinAggregator = import_optional(
'alibi_detect.od.pytorch.ensemble',
['PValNormalizer', 'ShiftAndScaleNormalizer', 'TopKAggregator',
'AverageAggregator', 'MaxAggregator', 'MinAggregator']
)
def get_normalizer(normalizer: Union[TransformProtocolType, NormalizerLiterals]) -> TransformProtocol:
if isinstance(normalizer, str):
try:
return {
'PValNormalizer': PValNormalizer,
'ShiftAndScaleNormalizer': ShiftAndScaleNormalizer,
}.get(normalizer)()
except KeyError:
raise NotImplementedError(f'Normalizer {normalizer} not implemented.')
return normalizer
def get_aggregator(aggregator: Union[TransformProtocol, AggregatorLiterals]) -> TransformProtocol:
if isinstance(aggregator, str):
try:
return {
'TopKAggregator': TopKAggregator,
'AverageAggregator': AverageAggregator,
'MaxAggregator': MaxAggregator,
'MinAggregator': MinAggregator,
}.get(aggregator)()
except KeyError:
raise NotImplementedError(f'Aggregator {aggregator} not implemented.')
return aggregator
| 2,818 | 36.092105 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/vaegmm.py
|
import logging
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from typing import Callable, Dict, Tuple
from alibi_detect.models.tensorflow.autoencoder import VAEGMM, eucl_cosim_features
from alibi_detect.models.tensorflow.gmm import gmm_energy, gmm_params
from alibi_detect.models.tensorflow.losses import loss_vaegmm
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils._types import OptimizerTF
logger = logging.getLogger(__name__)
class OutlierVAEGMM(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
threshold: float = None,
vaegmm: tf.keras.Model = None,
encoder_net: tf.keras.Model = None,
decoder_net: tf.keras.Model = None,
gmm_density_net: tf.keras.Model = None,
n_gmm: int = None,
latent_dim: int = None,
samples: int = 10,
beta: float = 1.,
recon_features: Callable = eucl_cosim_features,
data_type: str = None
) -> None:
"""
VAEGMM-based outlier detector.
Parameters
----------
threshold
Threshold used for outlier score to determine outliers.
vaegmm
A trained tf.keras model if available.
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class if no 'vaegmm' is specified.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class if no 'vaegmm' is specified.
gmm_density_net
Layers for the GMM network wrapped in a tf.keras.Sequential class.
n_gmm
Number of components in GMM.
latent_dim
Dimensionality of the latent space.
samples
Number of samples sampled to evaluate each instance.
beta
Beta parameter for KL-divergence loss term.
recon_features
Function to extract features from the reconstructed instance by the decoder.
data_type
Optionally specifiy the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if threshold is None:
logger.warning('No threshold level set. Need to infer threshold using `infer_threshold`.')
self.threshold = threshold
self.samples = samples
# check if model can be loaded, otherwise initialize VAEGMM model
if isinstance(vaegmm, tf.keras.Model):
self.vaegmm = vaegmm
elif (isinstance(encoder_net, tf.keras.Sequential) and
isinstance(decoder_net, tf.keras.Sequential) and
isinstance(gmm_density_net, tf.keras.Sequential)):
self.vaegmm = VAEGMM(encoder_net, decoder_net, gmm_density_net, n_gmm,
latent_dim, recon_features=recon_features, beta=beta)
else:
raise TypeError('No valid format detected for `vaegmm` (tf.keras.Model) '
'or `encoder_net`, `decoder_net` and `gmm_density_net` (tf.keras.Sequential).')
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = data_type
self.meta['online'] = False
self.phi, self.mu, self.cov, self.L, self.log_det_cov = None, None, None, None, None
def fit(self,
X: np.ndarray,
loss_fn: tf.keras.losses = loss_vaegmm,
w_recon: float = 1e-7,
w_energy: float = .1,
w_cov_diag: float = .005,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
cov_elbo: dict = dict(sim=.05),
epochs: int = 20,
batch_size: int = 64,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None,
) -> None:
"""
Train VAEGMM model.
Parameters
----------
X
Training batch.
loss_fn
Loss function used for training.
w_recon
Weight on elbo loss term if default `loss_vaegmm`.
w_energy
Weight on sample energy loss term if default `loss_vaegmm` loss fn is used.
w_cov_diag
Weight on covariance regularizing loss term if default `loss_vaegmm` loss fn is used.
optimizer
Optimizer used for training.
cov_elbo
Dictionary with covariance matrix options in case the elbo loss function is used.
Either use the full covariance matrix inferred from X (dict(cov_full=None)),
only the variance (dict(cov_diag=None)) or a float representing the same standard deviation
for each feature (e.g. dict(sim=.05)).
epochs
Number of training epochs.
batch_size
Batch size used for training.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
"""
# train arguments
args = [self.vaegmm, loss_fn, X]
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
kwargs = {'optimizer': optimizer,
'epochs': epochs,
'batch_size': batch_size,
'verbose': verbose,
'log_metric': log_metric,
'callbacks': callbacks,
'loss_fn_kwargs': {'w_recon': w_recon,
'w_energy': w_energy,
'w_cov_diag': w_cov_diag}
}
# initialize covariance matrix if default vaegmm loss fn is used
use_elbo = loss_fn.__name__ == 'loss_vaegmm'
cov_elbo_type, cov = [*cov_elbo][0], [*cov_elbo.values()][0]
if use_elbo and cov_elbo_type in ['cov_full', 'cov_diag']:
cov = tfp.stats.covariance(X.reshape(X.shape[0], -1))
if cov_elbo_type == 'cov_diag': # infer standard deviation from covariance matrix
cov = tf.math.sqrt(tf.linalg.diag_part(cov))
if use_elbo:
kwargs['loss_fn_kwargs'][cov_elbo_type] = tf.dtypes.cast(cov, tf.float32)
# train
trainer(*args, **kwargs)
# set GMM parameters
x_recon, z, gamma = self.vaegmm(X)
self.phi, self.mu, self.cov, self.L, self.log_det_cov = gmm_params(z, gamma)
def infer_threshold(self,
X: np.ndarray,
threshold_perc: float = 95.,
batch_size: int = int(1e10)
) -> None:
"""
Update threshold by a value inferred from the percentage of instances considered to be
outliers in a sample of the dataset.
Parameters
----------
X
Batch of instances.
threshold_perc
Percentage of X considered to be normal based on the outlier score.
batch_size
Batch size used when making predictions with the VAEGMM.
"""
# compute outlier scores
iscore = self.score(X, batch_size=batch_size)
# update threshold
self.threshold = np.percentile(iscore, threshold_perc)
def score(self, X: np.ndarray, batch_size: int = int(1e10)) -> np.ndarray:
"""
Compute outlier scores.
Parameters
----------
X
Batch of instances to analyze.
batch_size
Batch size used when making predictions with the VAEGMM.
Returns
-------
Array with outlier scores for each instance in the batch.
"""
# draw samples from latent space
X_samples = np.repeat(X, self.samples, axis=0)
_, z, _ = predict_batch(X_samples, self.vaegmm, batch_size=batch_size)
# compute average energy for samples
energy, _ = gmm_energy(z, self.phi, self.mu, self.cov, self.L, self.log_det_cov, return_mean=False)
energy_samples = energy.numpy().reshape((-1, self.samples))
iscore = np.mean(energy_samples, axis=-1)
return iscore
def predict(self,
X: np.ndarray,
batch_size: int = int(1e10),
return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[np.ndarray, np.ndarray]]:
"""
Compute outlier scores and transform into outlier predictions.
Parameters
----------
X
Batch of instances.
batch_size
Batch size used when making predictions with the VAEGMM.
return_instance_score
Whether to return instance level outlier scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the outlier predictions and instance level outlier scores.
"""
# compute outlier scores
iscore = self.score(X, batch_size=batch_size)
# values above threshold are outliers
outlier_pred = (iscore > self.threshold).astype(int)
# populate output dict
od = outlier_prediction_dict()
od['meta'] = self.meta
od['data']['is_outlier'] = outlier_pred
if return_instance_score:
od['data']['instance_score'] = iscore
return od
| 9,744 | 37.98 | 107 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/_svm.py
|
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
import numpy as np
from alibi_detect.base import (BaseDetector, FitMixin, ThresholdMixin,
outlier_prediction_dict)
from alibi_detect.exceptions import _catch_error as catch_error
from alibi_detect.od.pytorch import SgdSVMTorch, BgdSVMTorch
from alibi_detect.utils._types import Literal
from alibi_detect.utils.frameworks import BackendValidator
from alibi_detect.version import __version__
if TYPE_CHECKING:
import torch
backends = {
'pytorch': {
'sgd': SgdSVMTorch,
'bgd': BgdSVMTorch
}
}
class SVM(BaseDetector, ThresholdMixin, FitMixin):
def __init__(
self,
nu: float,
n_components: Optional[int] = None,
kernel: 'torch.nn.Module' = None,
optimization: Literal['sgd', 'bgd'] = 'sgd',
backend: Literal['pytorch'] = 'pytorch',
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
) -> None:
"""One-Class Support vector machine (OCSVM) outlier detector.
The one-class Support vector machine outlier detector fits a one-class SVM to the reference data.
Rather than the typical approach of optimizing the exact kernel OCSVM objective through a dual formulation,
here we instead map the data into the kernel's RKHS and then solve the linear optimization problem
directly through its primal formulation. The Nystroem approximation is used to speed up training and inference
by approximating the kernel's RKHS.
We provide two options, specified by the `optimization` parameter, for optimizing the one-class svm. `''sgd''`
wraps the `SGDOneClassSVM` class from the sklearn package and the other, `''bgd''` uses a custom implementation
in PyTorch. The PyTorch approach is tailored for operation on GPUs. Instead of applying stochastic gradient
descent (one data point at a time) with a fixed learning rate schedule it performs full gradient descent with
step size chosen at each iteration via line search. Note that on a CPU this would not necessarily be preferable
to SGD as we would have to iterate through both data points and candidate step sizes, however on GPU all of the
operations are vectorized/parallelized. Moreover, the Nystroem approximation has complexity `O(n^2m)` where
`n` is the number of reference instances and `m` defines the number of inducing points. This can therefore be
expensive for large reference sets and benefits from implementation on the GPU.
In general if using a small dataset then using the `''cpu''` with the optimization `''sgd''` is the best choice.
Whereas if using a large dataset then using the `''gpu''` with the optimization `''bgd''` is the best choice.
Parameters
----------
nu
The proportion of the training data that should be considered outliers. Note that this does not necessarily
correspond to the false positive rate on test data, which is still defined when calling the
`infer_threshold` method. `nu` should be thought of as a regularization parameter that affects how smooth
the svm decision boundary is.
n_components
Number of components in the Nystroem approximation, By default uses all of them.
kernel
Kernel function to use for outlier detection. Should be an instance of a subclass of `torch.nn.Module`. If
not specified then defaults to the `GaussianRBF`.
optimization
Optimization method to use. Choose from ``'sgd'`` or ``'bgd'``. Defaults to ``'sgd'``.
backend
Backend used for outlier detection. Defaults to ``'pytorch'``. Options are ``'pytorch'``.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
Raises
------
NotImplementedError
If choice of `backend` is not implemented.
ValueError
If choice of `optimization` is not valid.
ValueError
If `n_components` is not a positive integer.
"""
super().__init__()
if optimization not in ('sgd', 'bgd'):
raise ValueError(f'Optimization {optimization} not recognized. Choose from `sgd` or `bgd`.')
if n_components is not None and n_components <= 0:
raise ValueError(f'n_components must be a positive integer, got {n_components}.')
backend_str: str = backend.lower()
BackendValidator(
backend_options={'pytorch': ['pytorch']},
construct_name=self.__class__.__name__
).verify_backend(backend_str)
backend_cls = backends[backend][optimization]
args: Dict[str, Any] = {
'n_components': n_components,
'kernel': kernel,
'nu': nu
}
args['device'] = device
self.backend = backend_cls(**args)
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = 'numeric'
self.meta['online'] = False
def fit(
self,
x_ref: np.ndarray,
tol: float = 1e-6,
max_iter: int = 1000,
step_size_range: Tuple[float, float] = (1e-8, 1.0),
n_step_sizes: int = 16,
n_iter_no_change: int = 25,
verbose: int = 0,
) -> None:
"""Fit the detector on reference data.
Uses the choice of optimization method to fit the svm model to the data.
Parameters
----------
x_ref
Reference data used to fit the detector.
tol
Convergence threshold used to fit the detector. Used for both ``'sgd'`` and ``'bgd'`` optimizations.
Defaults to ``1e-3``.
max_iter
The maximum number of optimization steps. Used for both ``'sgd'`` and ``'bgd'`` optimizations.
step_size_range
The range of values to be considered for the gradient descent step size at each iteration. This is specified
as a tuple of the form `(min_eta, max_eta)` and only used for the ``'bgd'`` optimization.
n_step_sizes
The number of step sizes in the defined range to be tested for loss reduction. This many points are spaced
evenly along the range in log space. This is only used for the ``'bgd'`` optimization.
n_iter_no_change
The number of iterations over which the loss must decrease by `tol` in order for optimization to continue.
This is only used for the ``'bgd'`` optimization..
verbose
Verbosity level during training. ``0`` is silent, ``1`` prints fit status. If using `bgd`, fit displays a
progress bar. Otherwise, if using `sgd` then we output the Sklearn `SGDOneClassSVM.fit()` logs.
Returns
-------
Dictionary with fit results. The dictionary contains the following keys depending on the optimization used:
- converged: `bool` indicating whether training converged.
- n_iter: number of iterations performed.
- lower_bound: loss lower bound. Only returned for the `bgd`.
"""
return self.backend.fit(
self.backend._to_backend_dtype(x_ref),
**self.backend.format_fit_kwargs(locals())
)
@catch_error('NotFittedError')
def score(self, x: np.ndarray) -> np.ndarray:
"""Score `x` instances using the detector.
Scores the data using the fitted svm model. The higher the score, the more anomalous the instance.
Parameters
----------
x
Data to score. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Outlier scores. The shape of the scores is `(n_instances,)`. The higher the score, the more anomalous the \
instance.
Raises
------
NotFittedError
If called before detector has been fit.
"""
score = self.backend.score(self.backend._to_backend_dtype(x))
return self.backend._to_frontend_dtype(score)
@catch_error('NotFittedError')
def infer_threshold(self, x: np.ndarray, fpr: float) -> None:
"""Infer the threshold for the SVM detector.
The threshold is computed so that the outlier detector would incorrectly classify `fpr` proportion of the
reference data as outliers.
Parameters
----------
x
Reference data used to infer the threshold.
fpr
False positive rate used to infer the threshold. The false positive rate is the proportion of
instances in `x` that are incorrectly classified as outliers. The false positive rate should
be in the range ``(0, 1)``.
Raises
------
ValueError
Raised if `fpr` is not in ``(0, 1)``.
NotFittedError
If called before detector has been fit.
"""
self.backend.infer_threshold(self.backend._to_backend_dtype(x), fpr)
@catch_error('NotFittedError')
def predict(self, x: np.ndarray) -> Dict[str, Any]:
"""Predict whether the instances in `x` are outliers or not.
Scores the instances in `x` and if the threshold was inferred, returns the outlier labels and p-values as well.
Parameters
----------
x
Data to predict. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Dictionary with keys 'data' and 'meta'. 'data' contains the outlier scores. If threshold inference was \
performed, 'data' also contains the threshold value, outlier labels and p-vals . The shape of the scores is \
`(n_instances,)`. The higher the score, the more anomalous the instance. 'meta' contains information about \
the detector.
Raises
------
NotFittedError
If called before detector has been fit.
"""
outputs = self.backend.predict(self.backend._to_backend_dtype(x))
output = outlier_prediction_dict()
output['data'] = {
**output['data'],
**self.backend._to_frontend_dtype(outputs)
}
output['meta'] = {
**output['meta'],
'name': self.__class__.__name__,
'detector_type': 'outlier',
'online': False,
'version': __version__,
}
return output
| 10,687 | 41.923695 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/sr.py
|
import enum
import logging
import numpy as np
from typing import Dict
from alibi_detect.base import BaseDetector, ThresholdMixin, outlier_prediction_dict
from alibi_detect.utils._types import Literal
logger = logging.getLogger(__name__)
# numerical stability for division
EPSILON = 1e-8
class Padding(str, enum.Enum):
CONSTANT = 'constant'
REPLICATE = 'replicate'
REFLECT = 'reflect'
class Side(str, enum.Enum):
BILATERAL = 'bilateral'
LEFT = 'left'
RIGHT = 'right'
class SpectralResidual(BaseDetector, ThresholdMixin):
def __init__(self,
threshold: float = None,
window_amp: int = None,
window_local: int = None,
padding_amp_method: Literal['constant', 'replicate', 'reflect'] = 'reflect',
padding_local_method: Literal['constant', 'replicate', 'reflect'] = 'reflect',
padding_amp_side: Literal['bilateral', 'left', 'right'] = 'bilateral',
n_est_points: int = None,
n_grad_points: int = 5,
) -> None:
"""
Outlier detector for time-series data using the spectral residual algorithm.
Based on "Time-Series Anomaly Detection Service at Microsoft" (Ren et al., 2019)
https://arxiv.org/abs/1906.03821.
Parameters
----------
threshold
Threshold used to classify outliers. Relative saliency map distance from the moving average.
window_amp
Window for the average log amplitude.
window_local
Window for the local average of the saliency map. Note that the averaging is performed over the
previous `window_local` data points (i.e., is a local average of the preceding `window_local` points for
the current index).
padding_amp_method
Padding method to be used prior to each convolution over log amplitude.
Possible values: `constant` | `replicate` | `reflect`. Default value: `replicate`.
- `constant` - padding with constant 0.
- `replicate` - repeats the last/extreme value.
- `reflect` - reflects the time series.
padding_local_method
Padding method to be used prior to each convolution over saliency map.
Possible values: `constant` | `replicate` | `reflect`. Default value: `replicate`.
- `constant` - padding with constant 0.
- `replicate` - repeats the last/extreme value.
- `reflect` - reflects the time series.
padding_amp_side
Whether to pad the amplitudes on both sides or only on one side.
Possible values: `bilateral` | `left` | `right`.
n_est_points
Number of estimated points padded to the end of the sequence.
n_grad_points
Number of points used for the gradient estimation of the additional points padded
to the end of the sequence.
"""
super().__init__()
if threshold is None:
logger.warning("No threshold level set. Need to infer threshold using `infer_threshold`.")
self.threshold = threshold
self.window_amp = window_amp
self.window_local = window_local
self.conv_amp = np.ones((1, window_amp)).reshape(-1,) / window_amp
# conv_local needs a special treatment since the paper says that:
# \bar{S}(xi) is the local average of the preceding z points of S(xi).
# To use the same padding implementation that includes the current point we convolving, we define a modified
# filter given by: [0, 1, 1, 1, ... ,1] / window_local of size `window_local + 1`. In this way
# the current value is multiplied by 0 and thus neglected. Note that the 0 goes first since before the
# element-wise multiplication, the filter is flipped. We only do this since the filter is asymmetric.
self.conv_local = np.ones((1, window_local + 1)).reshape(-1,) / window_local
self.conv_local[0] = 0
self.n_est_points = n_est_points
self.n_grad_points = n_grad_points
self.padding_amp_method = padding_amp_method
self.padding_local_method = padding_local_method
self.padding_amp_side = padding_amp_side
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = 'time-series'
self.meta['online'] = True
def infer_threshold(self,
X: np.ndarray,
t: np.ndarray = None,
threshold_perc: float = 95.
) -> None:
"""
Update threshold by a value inferred from the percentage of instances considered to be
outliers in a sample of the dataset.
Parameters
----------
X
Uniformly sampled time series instances.
t
Equidistant timestamps corresponding to each input instances (i.e, the array should contain
numerical values in increasing order). If not provided, the timestamps will be replaced by an array of
integers `[0, 1, ... , N - 1]`, where `N` is the size of the input time series.
threshold_perc
Percentage of `X` considered to be normal based on the outlier score.
"""
if t is None:
t = np.arange(X.shape[0])
# compute outlier scores
iscore = self.score(X, t)
# update threshold
self.threshold = np.percentile(iscore, threshold_perc)
@staticmethod
def pad_same(X: np.ndarray,
W: np.ndarray,
method: str = 'replicate',
side: str = 'bilateral') -> np.ndarray:
"""
Adds padding to the time series `X` such that after applying a valid convolution with a kernel/filter
`w`, the resulting time series has the same shape as the input `X`.
Parameters
----------
X
Time series to be padded
W
Convolution kernel/filter.
method
Padding method to be used.
Possible values:
- `constant` - padding with constant 0.
- `replicate` - repeats the last/extreme value.
- `reflect` - reflects the time series.
side
Whether to pad the time series bilateral or only on one side.
Possible values:
- `bilateral` - time series is padded on both sides.
- `left` - time series is padded only on the left hand side.
- `right` - time series is padded only on the right hand side.
Returns
-------
Padded time series.
"""
paddings = [p.value for p in Padding]
if method not in paddings:
raise ValueError(f"Unknown padding method. Received '{method}'. Select one of the following: {paddings}.")
sides = [s.value for s in Side]
if side not in sides:
raise ValueError(f"Unknown padding side. Received '{side}'. Select one of the following: {sides}.")
if len(X.shape) != 1:
raise ValueError(f"Only 1D time series supported. Received a times series with {len(X.shape)} dimensions.")
if len(W.shape) != 1:
raise ValueError("Only 1D kernel/filter supported. Received a kernel/filter "
f"with {len(W.shape)} dimensions.")
pad_size = W.shape[0] - 1
if side == Side.BILATERAL:
pad_size_right = pad_size // 2
pad_size_left = pad_size - pad_size_right
elif side == Side.LEFT:
pad_size_right = 0
pad_size_left = pad_size
else:
pad_size_right = pad_size
pad_size_left = 0
# replicate padding
if method == Padding.REPLICATE:
return np.concatenate([
np.tile(X[0], pad_size_left),
X,
np.tile(X[-1], pad_size_right)
])
# reflection padding
if method == Padding.REFLECT:
return np.concatenate([
X[1:pad_size_left + 1][::-1],
X,
X[-pad_size_right - 1: -1][::-1] if pad_size_right > 0 else np.array([])
])
# zero padding
return np.concatenate([
np.tile(0, pad_size_left),
X,
np.tile(0, pad_size_right)
])
def saliency_map(self, X: np.ndarray) -> np.ndarray:
"""
Compute saliency map.
Parameters
----------
X
Uniformly sampled time series instances.
Returns
-------
Array with saliency map values.
"""
if X.shape[0] <= self.window_amp:
raise ValueError("The length of the input time series should be greater than the amplitude window. "
f"Received an input times series of length {X.shape[0]} and an amplitude "
f"window of {self.window_amp}.")
fft = np.fft.fft(X)
amp = np.abs(fft)
log_amp = np.log(amp)
phase = np.angle(fft)
# split spectrum into bias term and symmetric frequencies
bias, sym_freq = log_amp[:1], log_amp[1:]
# select just the first half of the sym_freq
freq = sym_freq[:(len(sym_freq) + 1) // 2]
# apply filter/moving average, but first pad the `freq` array
padded_freq = SpectralResidual.pad_same(X=freq,
W=self.conv_amp,
method=self.padding_amp_method,
side=self.padding_amp_side)
ma_freq = np.convolve(padded_freq, self.conv_amp, 'valid')
# construct moving average log amplitude spectrum
ma_log_amp = np.concatenate([
bias,
ma_freq,
(ma_freq[:-1] if len(sym_freq) % 2 == 1 else ma_freq)[::-1]
])
assert ma_log_amp.shape[0] == log_amp.shape[0], "`ma_log_amp` size does not match `log_amp` size."
# compute residual spectrum and transform back to time domain
res_amp = log_amp - ma_log_amp
sr = np.abs(np.fft.ifft(np.exp(res_amp + 1j * phase)))
return sr
def compute_grads(self, X: np.ndarray, t: np.ndarray) -> np.ndarray:
"""
Slope of the straight line between different points of the time series
multiplied by the average time step size.
Parameters
----------
X
Uniformly sampled time series instances.
t
Equidistant timestamps corresponding to each input instances (i.e, the array should contain
numerical values in increasing order).
Returns
-------
Array with slope values.
"""
dX = X[-1] - X[-self.n_grad_points - 1:-1]
dt = t[-1] - t[-self.n_grad_points - 1:-1]
mean_grads = np.mean(dX / dt) * np.mean(dt)
return mean_grads
def add_est_points(self, X: np.ndarray, t: np.ndarray) -> np.ndarray:
"""
Pad the time series with additional points since the method works better if the anomaly point
is towards the center of the sliding window.
Parameters
----------
X
Uniformly sampled time series instances.
t
Equidistant timestamps corresponding to each input instances (i.e, the array should contain
numerical values in increasing order).
Returns
-------
Padded version of X.
"""
grads = self.compute_grads(X, t)
X_add = X[-self.n_grad_points] + grads
X_pad = np.concatenate([X, np.tile(X_add, self.n_est_points)])
return X_pad
def score(self, X: np.ndarray, t: np.ndarray = None) -> np.ndarray:
"""
Compute outlier scores.
Parameters
----------
X
Uniformly sampled time series instances.
t
Equidistant timestamps corresponding to each input instances (i.e, the array should contain
numerical values in increasing order). If not provided, the timestamps will be replaced by an array of
integers `[0, 1, ... , N - 1]`, where `N` is the size of the input time series.
Returns
-------
Array with outlier scores for each instance in the batch.
"""
if t is None:
t = np.arange(X.shape[0])
if len(X.shape) == 2:
n_samples, n_dim = X.shape
X = X.reshape(-1, )
if X.shape[0] != n_samples:
raise ValueError("Only uni-variate time series allowed for SR method. Received a time "
f"series with {n_dim} features.")
X_pad = self.add_est_points(X, t) # add padding
sr = self.saliency_map(X_pad) # compute saliency map
sr = sr[:-self.n_est_points] # remove padding again
if X.shape[0] <= self.window_local:
raise ValueError("The length of the time series should be greater than the local window. "
f"Received an input time series of length {X.shape[0]} and a local "
f"window of {self.window_local}.")
# pad the spectral residual before convolving. By applying `replicate` or `reflect` padding we can
# remove some of the bias/outliers introduced at the beginning of the saliency map by a naive zero padding
# performed by numpy. The reason for left padding is explained in a comment in the constructor.
padded_sr = SpectralResidual.pad_same(X=sr,
W=self.conv_local,
method=self.padding_local_method,
side=Side.LEFT)
ma_sr = np.convolve(padded_sr, self.conv_local, 'valid')
assert sr.shape[0] == ma_sr.shape[0], "`ma_sr` size does not match `sr` size."
# compute the outlier score
iscore = (sr - ma_sr) / (ma_sr + EPSILON)
return iscore
def predict(self,
X: np.ndarray,
t: np.ndarray = None,
return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[np.ndarray, np.ndarray]]:
"""
Compute outlier scores and transform into outlier predictions.
Parameters
----------
X
Uniformly sampled time series instances.
t
Equidistant timestamps corresponding to each input instances (i.e, the array should contain
numerical values in increasing order). If not provided, the timestamps will be replaced by an array of
integers `[0, 1, ... , N - 1]`, where `N` is the size of the input time series.
return_instance_score
Whether to return instance level outlier scores.
Returns
-------
Dictionary containing `meta` and `data` dictionaries.
- `meta` - has the model's metadata.
- `data` - contains the outlier predictions and instance level outlier scores.
"""
if t is None:
t = np.arange(X.shape[0])
# compute outlier scores
iscore = self.score(X, t)
# values above threshold are outliers
outlier_pred = (iscore > self.threshold).astype(int)
# populate output dict
od = outlier_prediction_dict()
od['meta'] = self.meta
od['data']['is_outlier'] = outlier_pred
if return_instance_score:
od['data']['instance_score'] = iscore
return od
| 15,789 | 37.512195 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/_lof.py
|
from typing import Callable, Union, Optional, Dict, Any, List, Tuple
from typing import TYPE_CHECKING
from typing_extensions import Literal
import numpy as np
from alibi_detect.base import outlier_prediction_dict
from alibi_detect.exceptions import _catch_error as catch_error
from alibi_detect.od.base import TransformProtocol, TransformProtocolType
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin
from alibi_detect.od.pytorch import LOFTorch, Ensembler
from alibi_detect.od.base import get_aggregator, get_normalizer, NormalizerLiterals, AggregatorLiterals
from alibi_detect.utils.frameworks import BackendValidator
from alibi_detect.version import __version__
if TYPE_CHECKING:
import torch
backends = {
'pytorch': (LOFTorch, Ensembler)
}
class LOF(BaseDetector, FitMixin, ThresholdMixin):
def __init__(
self,
k: Union[int, np.ndarray, List[int], Tuple[int]],
kernel: Optional[Callable] = None,
normalizer: Optional[Union[TransformProtocolType, NormalizerLiterals]] = 'PValNormalizer',
aggregator: Union[TransformProtocol, AggregatorLiterals] = 'AverageAggregator',
backend: Literal['pytorch'] = 'pytorch',
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
) -> None:
"""
Local Outlier Factor (LOF) outlier detector.
The LOF detector is a non-parametric method for outlier detection. It computes the local density
deviation of a given data point with respect to its neighbors. It considers as outliers the
samples that have a substantially lower density than their neighbors.
The detector can be initialized with `k` a single value or an array of values. If `k` is a single value then
the score method uses the distance/kernel similarity to the k-th nearest neighbor. If `k` is an array of
values then the score method uses the distance/kernel similarity to each of the specified `k` neighbors.
In the latter case, an `aggregator` must be specified to aggregate the scores.
Note that, in the multiple k case, a normalizer can be provided. If a normalizer is passed then it is fit in
the `infer_threshold` method and so this method must be called before the `predict` method. If this is not
done an exception is raised. If `k` is a single value then the predict method can be called without first
calling `infer_threshold` but only scores will be returned and not outlier predictions.
Parameters
----------
k
Number of nearest neighbors to compute distance to. `k` can be a single value or
an array of integers. If an array is passed, an aggregator is required to aggregate
the scores. If `k` is a single value we compute the local outlier factor for that `k`.
Otherwise if `k` is a list then we compute and aggregate the local outlier factor for each
value in `k`.
kernel
Kernel function to use for outlier detection. If ``None``, `torch.cdist` is used.
Otherwise if a kernel is specified then instead of using `torch.cdist` the kernel
defines the k nearest neighbor distance.
normalizer
Normalizer to use for outlier detection. If ``None``, no normalization is applied.
For a list of available normalizers, see :mod:`alibi_detect.od.pytorch.ensemble`.
aggregator
Aggregator to use for outlier detection. Can be set to ``None`` if `k` is a single
value. For a list of available aggregators, see :mod:`alibi_detect.od.pytorch.ensemble`.
backend
Backend used for outlier detection. Defaults to ``'pytorch'``. Options are ``'pytorch'``.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``.
Raises
------
ValueError
If `k` is an array and `aggregator` is None.
NotImplementedError
If choice of `backend` is not implemented.
"""
super().__init__()
backend_str: str = backend.lower()
BackendValidator(
backend_options={'pytorch': ['pytorch']},
construct_name=self.__class__.__name__
).verify_backend(backend_str)
backend_cls, ensembler_cls = backends[backend]
ensembler = None
if aggregator is None and isinstance(k, (list, np.ndarray, tuple)):
raise ValueError('If `k` is a `np.ndarray`, `list` or `tuple`, '
'the `aggregator` argument cannot be ``None``.')
if isinstance(k, (list, np.ndarray, tuple)):
ensembler = ensembler_cls(
normalizer=get_normalizer(normalizer),
aggregator=get_aggregator(aggregator)
)
self.backend = backend_cls(k, kernel=kernel, ensembler=ensembler, device=device)
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = 'numeric'
self.meta['online'] = False
def fit(self, x_ref: np.ndarray) -> None:
"""Fit the detector on reference data.
Parameters
----------
x_ref
Reference data used to fit the detector.
"""
self.backend.fit(self.backend._to_backend_dtype(x_ref))
@catch_error('NotFittedError')
@catch_error('ThresholdNotInferredError')
def score(self, x: np.ndarray) -> np.ndarray:
"""Score `x` instances using the detector.
Computes the local outlier factor for each point in `x`. This is the density of each point `x`
relative to those of its neighbors in `x_ref`. If `k` is an array of values then the score for
each `k` is aggregated using the ensembler.
Parameters
----------
x
Data to score. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Outlier scores. The shape of the scores is `(n_instances,)`. The higher the score, the more anomalous the \
instance.
Raises
------
NotFittedError
If called before detector has been fit.
ThresholdNotInferredError
If k is a list and a threshold was not inferred.
"""
score = self.backend.score(self.backend._to_backend_dtype(x))
score = self.backend._ensembler(score)
return self.backend._to_frontend_dtype(score)
@catch_error('NotFittedError')
def infer_threshold(self, x: np.ndarray, fpr: float) -> None:
"""Infer the threshold for the LOF detector.
The threshold is computed so that the outlier detector would incorrectly classify `fpr` proportion of the
reference data as outliers.
Parameters
----------
x
Reference data used to infer the threshold.
fpr
False positive rate used to infer the threshold. The false positive rate is the proportion of
instances in `x` that are incorrectly classified as outliers. The false positive rate should
be in the range ``(0, 1)``.
Raises
------
ValueError
Raised if `fpr` is not in ``(0, 1)``.
NotFittedError
If called before detector has been fit.
"""
self.backend.infer_threshold(self.backend._to_backend_dtype(x), fpr)
@catch_error('NotFittedError')
@catch_error('ThresholdNotInferredError')
def predict(self, x: np.ndarray) -> Dict[str, Any]:
"""Predict whether the instances in `x` are outliers or not.
Scores the instances in `x` and if the threshold was inferred, returns the outlier labels and p-values as well.
Parameters
----------
x
Data to predict. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Dictionary with keys 'data' and 'meta'. 'data' contains the outlier scores. If threshold inference was \
performed, 'data' also contains the threshold value, outlier labels and p-vals . The shape of the scores is \
`(n_instances,)`. The higher the score, the more anomalous the instance. 'meta' contains information about \
the detector.
Raises
------
NotFittedError
If called before detector has been fit.
ThresholdNotInferredError
If k is a list and a threshold was not inferred.
"""
outputs = self.backend.predict(self.backend._to_backend_dtype(x))
output = outlier_prediction_dict()
output['data'] = {
**output['data'],
**self.backend._to_frontend_dtype(outputs)
}
output['meta'] = {
**output['meta'],
'name': self.__class__.__name__,
'detector_type': 'outlier',
'online': False,
'version': __version__,
}
return output
| 9,133 | 40.899083 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/seq2seq.py
|
import logging
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from typing import Dict, Tuple, Union
from alibi_detect.models.tensorflow.autoencoder import Seq2Seq, EncoderLSTM, DecoderLSTM
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils._types import OptimizerTF
logger = logging.getLogger(__name__)
class OutlierSeq2Seq(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
n_features: int,
seq_len: int,
threshold: Union[float, np.ndarray] = None,
seq2seq: tf.keras.Model = None,
threshold_net: tf.keras.Model = None,
latent_dim: int = None,
output_activation: str = None,
beta: float = 1.
) -> None:
"""
Seq2Seq-based outlier detector.
Parameters
----------
n_features
Number of features in the time series.
seq_len
Sequence length fed into the Seq2Seq model.
threshold
Threshold used for outlier detection. Can be a float or feature-wise array.
seq2seq
A trained seq2seq model if available.
threshold_net
Layers for the threshold estimation network wrapped in a
tf.keras.Sequential class if no 'seq2seq' is specified.
latent_dim
Latent dimension of the encoder and decoder.
output_activation
Activation used in the Dense output layer of the decoder.
beta
Weight on the threshold estimation loss term.
"""
super().__init__()
if threshold is None:
threshold = 0.
logger.warning('No explicit threshold level set. Threshold defaults to 0. '
'A threshold can be inferred using `infer_threshold`.')
self.threshold = threshold
self.shape = (-1, seq_len, n_features)
self.latent_dim = (latent_dim // 2) * 2
if self.latent_dim != latent_dim:
logger.warning('Odd values for `latent_dim` are not supported, because '
'of Bidirectional(LSTM(latent_dim // 2,...) in the encoder. '
f'{self.latent_dim} is used instead of {latent_dim}.)')
self.output_activation = output_activation
if threshold_net is None and seq2seq is None: # default threshold network
threshold_net = tf.keras.Sequential(
[
InputLayer(input_shape=(seq_len, self.latent_dim)),
Dense(64, activation=tf.nn.relu),
Dense(64, activation=tf.nn.relu),
])
# check if model can be loaded, otherwise initialize a Seq2Seq model
if isinstance(seq2seq, tf.keras.Model):
self.seq2seq = seq2seq
elif isinstance(latent_dim, int) and isinstance(threshold_net, tf.keras.Sequential):
encoder_net = EncoderLSTM(self.latent_dim)
decoder_net = DecoderLSTM(self.latent_dim, n_features, output_activation)
self.seq2seq = Seq2Seq(encoder_net, decoder_net, threshold_net, n_features, beta=beta)
else:
raise TypeError('No valid format detected for `seq2seq` (tf.keras.Model), '
'`latent_dim` (int) or `threshold_net` (tf.keras.Sequential)')
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = 'time-series'
self.meta['online'] = False
def fit(self,
X: np.ndarray,
loss_fn: tf.keras.losses = tf.keras.losses.mse,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
epochs: int = 20,
batch_size: int = 64,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None,
) -> None:
"""
Train Seq2Seq model.
Parameters
----------
X
Univariate or multivariate time series.
Shape equals (batch, features) or (batch, sequence length, features).
loss_fn
Loss function used for training.
optimizer
Optimizer used for training.
epochs
Number of training epochs.
batch_size
Batch size used for training.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
"""
# targets for teacher-forcing
if len(X.shape) == 2:
y = np.roll(X, -1, axis=0).reshape(self.shape)
X = X.reshape(self.shape)
else:
y = np.roll(X.reshape((-1, self.shape[-1])), -1, axis=0).reshape(self.shape)
# train arguments
args = [self.seq2seq, loss_fn, X]
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
kwargs = {'y_train': y,
'optimizer': optimizer,
'epochs': epochs,
'batch_size': batch_size,
'verbose': verbose,
'log_metric': log_metric,
'callbacks': callbacks}
# train
trainer(*args, **kwargs)
def infer_threshold(self,
X: np.ndarray,
outlier_perc: Union[int, float] = 100.,
threshold_perc: Union[int, float, np.ndarray, list] = 95.,
batch_size: int = int(1e10)
) -> None:
"""
Update the outlier threshold by using a sequence of instances from the dataset
of which the fraction of features which are outliers are known. This fraction can be across
all features or per feature.
Parameters
----------
X
Univariate or multivariate time series.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
threshold_perc
Percentage of X considered to be normal based on the outlier score.
Overall (float) or feature-wise (array or list).
batch_size
Batch size used when making predictions with the seq2seq model.
"""
orig_shape = X.shape
threshold_shape = (1, orig_shape[-1])
if len(orig_shape) == 3: # (batch_size, seq_len, n_features)
threshold_shape = (1,) + threshold_shape # type: ignore
# compute outlier scores
fscore, iscore = self.score(X, outlier_perc=outlier_perc, batch_size=batch_size)
if outlier_perc == 100.:
fscore = fscore.reshape((-1, self.shape[-1]))
# update threshold
if isinstance(threshold_perc, (int, float)) and outlier_perc == 100.:
self.threshold += np.percentile(fscore, threshold_perc)
elif isinstance(threshold_perc, (int, float)) and outlier_perc < 100.:
self.threshold += np.percentile(iscore, threshold_perc)
elif isinstance(threshold_perc, (list, np.ndarray)) and outlier_perc == 100.:
self.threshold += np.diag(np.percentile(fscore, threshold_perc, axis=0)).reshape(threshold_shape)
elif isinstance(threshold_perc, (list, np.ndarray)) and outlier_perc < 100.:
# number feature scores used for outlier score
n_score = int(np.ceil(.01 * outlier_perc * fscore.shape[1]))
# compute threshold level by feature
sorted_fscore = np.sort(fscore, axis=1)
if len(orig_shape) == 3: # (batch_size, seq_len, n_features)
sorted_fscore_perc = sorted_fscore[:, -n_score:, :] # (batch_size, n_score, n_features)
self.threshold += np.mean(sorted_fscore_perc, axis=(0, 1)).reshape(threshold_shape) # (1,1,n_features)
else: # (batch_size, n_features)
sorted_fscore_perc = sorted_fscore[:, -n_score:] # (batch_size, n_score)
self.threshold += np.mean(sorted_fscore_perc, axis=0) # float
else:
raise TypeError('Incorrect type for `threshold` and/or `threshold_perc`.')
def feature_score(self, X_orig: np.ndarray, X_recon: np.ndarray, threshold_est: np.ndarray) -> np.ndarray:
"""
Compute feature level outlier scores.
Parameters
----------
X_orig
Original time series.
X_recon
Reconstructed time series.
threshold_est
Estimated threshold from the decoder's latent space.
Returns
-------
Feature level outlier scores. Scores above 0 are outliers.
"""
fscore = (X_orig - X_recon) ** 2
# TODO: check casting if nb of features equals time dimension
fscore_adj = fscore - threshold_est - self.threshold
return fscore_adj
def instance_score(self, fscore: np.ndarray, outlier_perc: float = 100.) -> np.ndarray:
"""
Compute instance level outlier scores. `instance` in this case means the data along the
first axis of the original time series passed to the predictor.
Parameters
----------
fscore
Feature level outlier scores.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
Returns
-------
Instance level outlier scores.
"""
fscore_flat = fscore.reshape(fscore.shape[0], -1).copy()
n_score_features = int(np.ceil(.01 * outlier_perc * fscore_flat.shape[1]))
sorted_fscore = np.sort(fscore_flat, axis=1)
sorted_fscore_perc = sorted_fscore[:, -n_score_features:]
iscore = np.mean(sorted_fscore_perc, axis=1)
return iscore
def score(self, X: np.ndarray, outlier_perc: float = 100., batch_size: int = int(1e10)) \
-> Tuple[np.ndarray, np.ndarray]:
"""
Compute feature and instance level outlier scores.
Parameters
----------
X
Univariate or multivariate time series.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
batch_size
Batch size used when making predictions with the seq2seq model.
Returns
-------
Feature and instance level outlier scores.
"""
# use the seq2seq model to reconstruct instances
orig_shape = X.shape
if len(orig_shape) == 2:
X = X.reshape(self.shape)
X_recon, threshold_est = predict_batch(X, self.seq2seq.decode_seq, batch_size=batch_size)
if len(orig_shape) == 2: # reshape back to original shape
X = X.reshape(orig_shape)
X_recon = X_recon.reshape(orig_shape)
threshold_est = threshold_est.reshape(orig_shape)
# compute feature and instance level scores
fscore = self.feature_score(X, X_recon, threshold_est)
iscore = self.instance_score(fscore, outlier_perc=outlier_perc)
return fscore, iscore
def predict(self,
X: np.ndarray,
outlier_type: str = 'instance',
outlier_perc: float = 100.,
batch_size: int = int(1e10),
return_feature_score: bool = True,
return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[np.ndarray, np.ndarray]]:
"""
Compute outlier scores and transform into outlier predictions.
Parameters
----------
X
Univariate or multivariate time series.
outlier_type
Predict outliers at the 'feature' or 'instance' level.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
batch_size
Batch size used when making predictions with the seq2seq model.
return_feature_score
Whether to return feature level outlier scores.
return_instance_score
Whether to return instance level outlier scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the outlier predictions and both feature and instance level outlier scores.
"""
# compute outlier scores
fscore, iscore = self.score(X, outlier_perc=outlier_perc, batch_size=batch_size)
if outlier_type == 'feature':
outlier_score = fscore
elif outlier_type == 'instance':
outlier_score = iscore
else:
raise ValueError('`outlier_score` needs to be either `feature` or `instance`.')
# values above threshold are outliers
outlier_pred = (outlier_score > 0).astype(int)
# populate output dict
od = outlier_prediction_dict()
od['meta'] = self.meta
od['data']['is_outlier'] = outlier_pred
if return_feature_score:
od['data']['feature_score'] = fscore
if return_instance_score:
od['data']['instance_score'] = iscore
return od
| 13,595 | 40.075529 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/_mahalanobis.py
|
from typing import Union, Optional, Dict, Any
from typing import TYPE_CHECKING
from alibi_detect.exceptions import _catch_error as catch_error
from typing_extensions import Literal
import numpy as np
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
from alibi_detect.od.pytorch import MahalanobisTorch
from alibi_detect.utils.frameworks import BackendValidator
from alibi_detect.version import __version__
if TYPE_CHECKING:
import torch
backends = {
'pytorch': MahalanobisTorch
}
class Mahalanobis(BaseDetector, FitMixin, ThresholdMixin):
def __init__(
self,
min_eigenvalue: float = 1e-6,
backend: Literal['pytorch'] = 'pytorch',
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
) -> None:
"""
The Mahalanobis outlier detection method.
The Mahalanobis detector computes the directions of variation of a dataset and uses them to detect when points
are outliers by checking to see if the points vary from dataset points in unexpected ways.
When we fit the Mahalanobis detector we compute the covariance matrix of the reference data and its eigenvectors
and eigenvalues. We filter small eigenvalues for numerical stability using the `min_eigenvalue` parameter. We
then inversely weight each eigenvector by its eigenvalue.
When we score test points we project them onto the eigenvectors and compute the l2-norm of the projected point.
Because the eigenvectors are inversely weighted by the eigenvalues, the score will take into account the
difference in variance along each direction of variation. If a test point lies along a direction of high
variation then it must lie very far out to obtain a high score. If a test point lies along a direction of low
variation then it doesn't need to lie very far out to obtain a high score.
Parameters
----------
min_eigenvalue
Eigenvectors with eigenvalues below this value will be discarded. This is to ensure numerical stability.
backend
Backend used for outlier detection. Defaults to ``'pytorch'``. Options are ``'pytorch'``.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
Raises
------
NotImplementedError
If choice of `backend` is not implemented.
"""
super().__init__()
backend_str: str = backend.lower()
BackendValidator(
backend_options={'pytorch': ['pytorch']},
construct_name=self.__class__.__name__
).verify_backend(backend_str)
backend_cls = backends[backend]
self.backend = backend_cls(min_eigenvalue, device=device)
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = 'numeric'
self.meta['online'] = False
def fit(self, x_ref: np.ndarray) -> None:
"""Fit the detector on reference data.
Fitting the Mahalanobis detector amounts to computing the covariance matrix and its eigenvectors. We filter out
very small eigenvalues using the `min_eigenvalue` parameter. We then scale the eigenvectors such that the data
projected onto them has mean ``0`` and std ``1``.
Parameters
----------
x_ref
Reference data used to fit the detector.
"""
self.backend.fit(self.backend._to_backend_dtype(x_ref))
@catch_error('NotFittedError')
def score(self, x: np.ndarray) -> np.ndarray:
"""Score `x` instances using the detector.
The mahalanobis method projects `x` onto the scaled eigenvectors computed during the fit step. The score is then
the l2-norm of the projected data. The higher the score, the more outlying the instance.
Parameters
----------
x
Data to score. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Outlier scores. The shape of the scores is `(n_instances,)`. The higher the score, the more outlying the \
instance.
Raises
------
NotFittedError
If called before detector has been fit.
"""
score = self.backend.score(self.backend._to_backend_dtype(x))
return self.backend._to_frontend_dtype(score)
@catch_error('NotFittedError')
def infer_threshold(self, x: np.ndarray, fpr: float) -> None:
"""Infer the threshold for the Mahalanobis detector.
The threshold is computed so that the outlier detector would incorrectly classify `fpr` proportion of the
reference data as outliers.
Parameters
----------
x
Reference data used to infer the threshold.
fpr
False positive rate used to infer the threshold. The false positive rate is the proportion of
instances in `x` that are incorrectly classified as outliers. The false positive rate should
be in the range ``(0, 1)``.
Raises
------
ValueError
Raised if `fpr` is not in ``(0, 1)``.
NotFittedError
If called before detector has been fit.
"""
self.backend.infer_threshold(self.backend._to_backend_dtype(x), fpr)
@catch_error('NotFittedError')
def predict(self, x: np.ndarray) -> Dict[str, Any]:
"""Predict whether the instances in `x` are outliers or not.
Scores the instances in `x` and if the threshold was inferred, returns the outlier labels and p-values as well.
Parameters
----------
x
Data to predict. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Dictionary with keys 'data' and 'meta'. 'data' contains the outlier scores. If threshold inference was \
performed, 'data' also contains the threshold value, outlier labels and p-vals . The shape of the scores is \
`(n_instances,)`. The higher the score, the more anomalous the instance. 'meta' contains information about \
the detector.
Raises
------
NotFittedError
If called before detector has been fit.
"""
outputs = self.backend.predict(self.backend._to_backend_dtype(x))
output = outlier_prediction_dict()
output['data'] = {
**output['data'],
**self.backend._to_frontend_dtype(outputs)
}
output['meta'] = {
**output['meta'],
'name': self.__class__.__name__,
'detector_type': 'outlier',
'online': False,
'version': __version__,
}
return output
| 6,935 | 37.966292 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/isolationforest.py
|
import logging
import numpy as np
from sklearn.ensemble import IsolationForest
from typing import Dict, Union
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
logger = logging.getLogger(__name__)
class IForest(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
threshold: float = None,
n_estimators: int = 100,
max_samples: Union[str, int, float] = 'auto',
max_features: Union[int, float] = 1.,
bootstrap: bool = False,
n_jobs: int = 1,
data_type: str = 'tabular'
) -> None:
"""
Outlier detector for tabular data using isolation forests.
Parameters
----------
threshold
Threshold used for outlier score to determine outliers.
n_estimators
Number of base estimators in the ensemble.
max_samples
Number of samples to draw from the training data to train each base estimator.
If int, draw 'max_samples' samples.
If float, draw 'max_samples * number of features' samples.
If 'auto', max_samples = min(256, number of samples)
max_features
Number of features to draw from the training data to train each base estimator.
If int, draw 'max_features' features.
If float, draw 'max_features * number of features' features.
bootstrap
Whether to fit individual trees on random subsets of the training data, sampled with replacement.
n_jobs
Number of jobs to run in parallel for 'fit' and 'predict'.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if threshold is None:
logger.warning('No threshold level set. Need to infer threshold using `infer_threshold`.')
self.threshold = threshold
self.isolationforest = IsolationForest(n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
n_jobs=n_jobs)
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = data_type
self.meta['online'] = False
def fit(self,
X: np.ndarray,
sample_weight: np.ndarray = None
) -> None:
"""
Fit isolation forest.
Parameters
----------
X
Training batch.
sample_weight
Sample weights.
"""
self.isolationforest.fit(X, sample_weight=sample_weight)
def infer_threshold(self,
X: np.ndarray,
threshold_perc: float = 95.
) -> None:
"""
Update threshold by a value inferred from the percentage of instances considered to be
outliers in a sample of the dataset.
Parameters
----------
X
Batch of instances.
threshold_perc
Percentage of X considered to be normal based on the outlier score.
"""
# compute outlier scores
iscore = self.score(X)
# update threshold
self.threshold = np.percentile(iscore, threshold_perc)
def score(self, X: np.ndarray) -> np.ndarray:
"""
Compute outlier scores.
Parameters
----------
X
Batch of instances to analyze.
Returns
-------
Array with outlier scores for each instance in the batch.
"""
return - self.isolationforest.decision_function(X)
def predict(self,
X: np.ndarray,
return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[np.ndarray, np.ndarray]]:
"""
Compute outlier scores and transform into outlier predictions.
Parameters
----------
X
Batch of instances.
return_instance_score
Whether to return instance level outlier scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the outlier predictions and instance level outlier scores.
"""
# compute outlier scores
iscore = self.score(X)
# values above threshold are outliers
outlier_pred = (iscore > self.threshold).astype(int)
# populate output dict
od = outlier_prediction_dict()
od['meta'] = self.meta
od['data']['is_outlier'] = outlier_pred
if return_instance_score:
od['data']['instance_score'] = iscore
return od
| 5,012 | 32.871622 | 109 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/vae.py
|
import logging
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from typing import Dict, Tuple
from alibi_detect.models.tensorflow.autoencoder import VAE
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.models.tensorflow.losses import elbo
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils._types import OptimizerTF
logger = logging.getLogger(__name__)
class OutlierVAE(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
threshold: float = None,
score_type: str = 'mse', # TODO: reconstruction proba; make sure to infer correct distribution
vae: tf.keras.Model = None,
encoder_net: tf.keras.Model = None,
decoder_net: tf.keras.Model = None,
latent_dim: int = None,
samples: int = 10,
beta: float = 1.,
data_type: str = None
) -> None:
"""
VAE-based outlier detector.
Parameters
----------
threshold
Threshold used for outlier score to determine outliers.
score_type
Metric used for outlier scores. Either 'mse' (mean squared error) or
'proba' (reconstruction probabilities) supported.
vae
A trained tf.keras model if available.
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class if no 'vae' is specified.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class if no 'vae' is specified.
latent_dim
Dimensionality of the latent space.
samples
Number of samples sampled to evaluate each instance.
beta
Beta parameter for KL-divergence loss term.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if threshold is None:
logger.warning('No threshold level set. Need to infer threshold using `infer_threshold`.')
self.threshold = threshold
self.score_type = score_type
self.samples = samples
# check if model can be loaded, otherwise initialize VAE model
if isinstance(vae, tf.keras.Model):
self.vae = vae
elif isinstance(encoder_net, tf.keras.Sequential) and isinstance(decoder_net, tf.keras.Sequential):
self.vae = VAE(encoder_net, decoder_net, latent_dim, beta=beta) # define VAE model
else:
raise TypeError('No valid format detected for `vae` (tf.keras.Model) '
'or `encoder_net` and `decoder_net` (tf.keras.Sequential).')
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = data_type
self.meta['online'] = False
def fit(self,
X: np.ndarray,
loss_fn: tf.keras.losses = elbo,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
cov_elbo: dict = dict(sim=.05),
epochs: int = 20,
batch_size: int = 64,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None,
) -> None:
"""
Train VAE model.
Parameters
----------
X
Training batch.
loss_fn
Loss function used for training.
optimizer
Optimizer used for training.
cov_elbo
Dictionary with covariance matrix options in case the elbo loss function is used.
Either use the full covariance matrix inferred from X (dict(cov_full=None)),
only the variance (dict(cov_diag=None)) or a float representing the same standard deviation
for each feature (e.g. dict(sim=.05)).
epochs
Number of training epochs.
batch_size
Batch size used for training.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
"""
# train arguments
args = [self.vae, loss_fn, X]
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
kwargs = {'optimizer': optimizer,
'epochs': epochs,
'batch_size': batch_size,
'verbose': verbose,
'log_metric': log_metric,
'callbacks': callbacks}
# initialize covariance matrix if elbo loss fn is used
use_elbo = loss_fn.__name__ == 'elbo'
cov_elbo_type, cov = [*cov_elbo][0], [*cov_elbo.values()][0]
if use_elbo and cov_elbo_type in ['cov_full', 'cov_diag']:
cov = tfp.stats.covariance(X.reshape(X.shape[0], -1))
if cov_elbo_type == 'cov_diag': # infer standard deviation from covariance matrix
cov = tf.math.sqrt(tf.linalg.diag_part(cov))
if use_elbo:
kwargs['loss_fn_kwargs'] = {cov_elbo_type: tf.dtypes.cast(cov, tf.float32)}
# train
trainer(*args, **kwargs)
def infer_threshold(self,
X: np.ndarray,
outlier_type: str = 'instance',
outlier_perc: float = 100.,
threshold_perc: float = 95.,
batch_size: int = int(1e10)
) -> None:
"""
Update threshold by a value inferred from the percentage of instances considered to be
outliers in a sample of the dataset.
Parameters
----------
X
Batch of instances.
outlier_type
Predict outliers at the 'feature' or 'instance' level.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
threshold_perc
Percentage of X considered to be normal based on the outlier score.
batch_size
Batch size used when making predictions with the VAE.
"""
# compute outlier scores
fscore, iscore = self.score(X, outlier_perc=outlier_perc, batch_size=batch_size)
if outlier_type == 'feature':
outlier_score = fscore
elif outlier_type == 'instance':
outlier_score = iscore
else:
raise ValueError('`outlier_score` needs to be either `feature` or `instance`.')
# update threshold
self.threshold = np.percentile(outlier_score, threshold_perc)
def feature_score(self, X_orig: np.ndarray, X_recon: np.ndarray) -> np.ndarray:
"""
Compute feature level outlier scores.
Parameters
----------
X_orig
Batch of original instances.
X_recon
Batch of reconstructed instances.
Returns
-------
Feature level outlier scores.
"""
if self.score_type == 'mse':
fscore = np.power(X_orig - X_recon, 2)
fscore = fscore.reshape((-1, self.samples) + X_orig.shape[1:])
fscore = np.mean(fscore, axis=1)
elif self.score_type == 'proba':
pass
return fscore
def instance_score(self, fscore: np.ndarray, outlier_perc: float = 100.) -> np.ndarray:
"""
Compute instance level outlier scores.
Parameters
----------
fscore
Feature level outlier scores.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
Returns
-------
Instance level outlier scores.
"""
fscore_flat = fscore.reshape(fscore.shape[0], -1).copy()
n_score_features = int(np.ceil(.01 * outlier_perc * fscore_flat.shape[1]))
sorted_fscore = np.sort(fscore_flat, axis=1)
sorted_fscore_perc = sorted_fscore[:, -n_score_features:]
iscore = np.mean(sorted_fscore_perc, axis=1)
return iscore
def score(self, X: np.ndarray, outlier_perc: float = 100., batch_size: int = int(1e10)) \
-> Tuple[np.ndarray, np.ndarray]:
"""
Compute feature and instance level outlier scores.
Parameters
----------
X
Batch of instances.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
batch_size
Batch size used when making predictions with the VAE.
Returns
-------
Feature and instance level outlier scores.
"""
# sample reconstructed instances
X_samples = np.repeat(X, self.samples, axis=0)
X_recon = predict_batch(X_samples, self.vae, batch_size=batch_size)
# compute feature and instance level scores
fscore = self.feature_score(X_samples, X_recon) # type: ignore[arg-type]
iscore = self.instance_score(fscore, outlier_perc=outlier_perc)
return fscore, iscore
def predict(self,
X: np.ndarray,
outlier_type: str = 'instance',
outlier_perc: float = 100.,
batch_size: int = int(1e10),
return_feature_score: bool = True,
return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[np.ndarray, np.ndarray]]:
"""
Predict whether instances are outliers or not.
Parameters
----------
X
Batch of instances.
outlier_type
Predict outliers at the 'feature' or 'instance' level.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
batch_size
Batch size used when making predictions with the VAE.
return_feature_score
Whether to return feature level outlier scores.
return_instance_score
Whether to return instance level outlier scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the outlier predictions and both feature and instance level outlier scores.
"""
# compute outlier scores
fscore, iscore = self.score(X, outlier_perc=outlier_perc, batch_size=batch_size)
if outlier_type == 'feature':
outlier_score = fscore
elif outlier_type == 'instance':
outlier_score = iscore
else:
raise ValueError('`outlier_score` needs to be either `feature` or `instance`.')
# values above threshold are outliers
outlier_pred = (outlier_score > self.threshold).astype(int)
# populate output dict
od = outlier_prediction_dict()
od['meta'] = self.meta
od['data']['is_outlier'] = outlier_pred
if return_feature_score:
od['data']['feature_score'] = fscore
if return_instance_score:
od['data']['instance_score'] = iscore
return od
| 11,444 | 37.15 | 112 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/_pca.py
|
from typing import Union, Optional, Callable, Dict, Any
from typing import TYPE_CHECKING
from typing_extensions import Literal
import numpy as np
from alibi_detect.base import outlier_prediction_dict
from alibi_detect.base import BaseDetector, ThresholdMixin, FitMixin
from alibi_detect.od.pytorch import KernelPCATorch, LinearPCATorch
from alibi_detect.utils.frameworks import BackendValidator
from alibi_detect.version import __version__
from alibi_detect.exceptions import _catch_error as catch_error
if TYPE_CHECKING:
import torch
backends = {
'pytorch': (KernelPCATorch, LinearPCATorch)
}
class PCA(BaseDetector, ThresholdMixin, FitMixin):
def __init__(
self,
n_components: int,
kernel: Optional[Callable] = None,
backend: Literal['pytorch'] = 'pytorch',
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
) -> None:
"""Principal Component Analysis (PCA) outlier detector.
The detector is based on the Principal Component Analysis (PCA) algorithm. There are two variants of PCA:
linear PCA and kernel PCA. Linear PCA computes the eigenvectors of the covariance matrix of the data. Kernel
PCA computes the eigenvectors of the kernel matrix of the data.
When scoring a test instance using the linear variant compute the distance to the principal subspace spanned
by the first `n_components` eigenvectors.
When scoring a test instance using the kernel variant we project it onto the largest eigenvectors and
compute its score using the L2 norm.
If a threshold is fitted we use this to determine whether the instance is an outlier or not.
Parameters
----------
n_components:
The number of dimensions in the principal subspace. For linear pca should have
``1 <= n_components < dim(data)``. For kernel pca should have ``1 <= n_components < len(data)``.
kernel
Kernel function to use for outlier detection. If ``None``, linear PCA is used instead of the
kernel variant.
backend
Backend used for outlier detection. Defaults to ``'pytorch'``. Options are ``'pytorch'``.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
Raises
------
NotImplementedError
If choice of `backend` is not implemented.
ValueError
If `n_components` is less than 1.
"""
super().__init__()
backend_str: str = backend.lower()
BackendValidator(
backend_options={'pytorch': ['pytorch']},
construct_name=self.__class__.__name__
).verify_backend(backend_str)
kernel_backend_cls, linear_backend_cls = backends[backend]
self.backend: Union[KernelPCATorch, LinearPCATorch]
if kernel is not None:
self.backend = kernel_backend_cls(
n_components=n_components,
device=device,
kernel=kernel
)
else:
self.backend = linear_backend_cls(
n_components=n_components,
device=device,
)
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = 'numeric'
self.meta['online'] = False
def fit(self, x_ref: np.ndarray) -> None:
"""Fit the detector on reference data.
In the linear case we compute the principal components of the reference data using the
covariance matrix and then remove the largest `n_components` eigenvectors. The remaining
eigenvectors correspond to the invariant dimensions of the data. Changes in these
dimensions are used to compute the outlier score which is the distance to the principal
subspace spanned by the first `n_components` eigenvectors.
In the kernel case we compute the principal components of the reference data using the
kernel matrix and then return the largest `n_components` eigenvectors. These are then
normalized to have length equal to `1/eigenvalue`. Note that this differs from the
linear case where we remove the largest eigenvectors.
In both cases we then store the computed components to use later when we score test
instances.
Parameters
----------
x_ref
Reference data used to fit the detector.
Raises
------
ValueError
If using linear pca variant and `n_components` is greater than or equal to number of
features or if using kernel pca variant and `n_components` is greater than or equal
to number of instances.
"""
self.backend.fit(self.backend._to_backend_dtype(x_ref))
@catch_error('NotFittedError')
def score(self, x: np.ndarray) -> np.ndarray:
"""Score `x` instances using the detector.
Project `x` onto the eigenvectors and compute the score using the L2 norm.
Parameters
----------
x
Data to score. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Outlier scores. The shape of the scores is `(n_instances,)`. The higher the score, the more anomalous the \
instance.
Raises
------
NotFittedError
If called before detector has been fit.
"""
score = self.backend.score(self.backend._to_backend_dtype(x))
return self.backend._to_frontend_dtype(score)
@catch_error('NotFittedError')
def infer_threshold(self, x: np.ndarray, fpr: float) -> None:
"""Infer the threshold for the PCA detector.
The threshold is computed so that the outlier detector would incorrectly classify `fpr` proportion of the
reference data as outliers.
Parameters
----------
x
Reference data used to infer the threshold.
fpr
False positive rate used to infer the threshold. The false positive rate is the proportion of
instances in `x` that are incorrectly classified as outliers. The false positive rate should
be in the range ``(0, 1)``.
Raises
------
ValueError
Raised if `fpr` is not in ``(0, 1)``.
NotFittedError
If called before detector has been fit.
"""
self.backend.infer_threshold(self.backend._to_backend_dtype(x), fpr)
@catch_error('NotFittedError')
def predict(self, x: np.ndarray) -> Dict[str, Any]:
"""Predict whether the instances in `x` are outliers or not.
Scores the instances in `x` and if the threshold was inferred, returns the outlier labels and p-values as well.
Parameters
----------
x
Data to predict. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Dictionary with keys 'data' and 'meta'. 'data' contains the outlier scores. If threshold inference was \
performed, 'data' also contains the threshold value, outlier labels and p-vals . The shape of the scores is \
`(n_instances,)`. The higher the score, the more anomalous the instance. 'meta' contains information about \
the detector.
Raises
------
NotFittedError
If called before detector has been fit.
"""
outputs = self.backend.predict(self.backend._to_backend_dtype(x))
output = outlier_prediction_dict()
output['data'] = {
**output['data'],
**self.backend._to_frontend_dtype(outputs)
}
output['meta'] = {
**output['meta'],
'name': self.__class__.__name__,
'detector_type': 'outlier',
'online': False,
'version': __version__,
}
return output
| 8,089 | 37.160377 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/_knn.py
|
from typing import Callable, Union, Optional, Dict, Any, List, Tuple
from typing import TYPE_CHECKING
from typing_extensions import Literal
import numpy as np
from alibi_detect.base import outlier_prediction_dict
from alibi_detect.exceptions import _catch_error as catch_error
from alibi_detect.od.base import TransformProtocol, TransformProtocolType
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin
from alibi_detect.od.pytorch import KNNTorch, Ensembler
from alibi_detect.od.base import get_aggregator, get_normalizer, NormalizerLiterals, AggregatorLiterals
from alibi_detect.utils.frameworks import BackendValidator
from alibi_detect.version import __version__
if TYPE_CHECKING:
import torch
backends = {
'pytorch': (KNNTorch, Ensembler)
}
class KNN(BaseDetector, FitMixin, ThresholdMixin):
def __init__(
self,
k: Union[int, np.ndarray, List[int], Tuple[int]],
kernel: Optional[Callable] = None,
normalizer: Optional[Union[TransformProtocolType, NormalizerLiterals]] = 'PValNormalizer',
aggregator: Union[TransformProtocol, AggregatorLiterals] = 'AverageAggregator',
backend: Literal['pytorch'] = 'pytorch',
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
) -> None:
"""
k-Nearest Neighbors (kNN) outlier detector.
The kNN detector is a non-parametric method for outlier detection. The detector scores each instance
based on the distance to its neighbors. Instances with a large distance to their neighbors are more
likely to be outliers.
The detector can be initialized with `k` a single value or an array of values. If `k` is a single value then
the outlier score is the distance/kernel similarity to the k-th nearest neighbor. If `k` is an array of
values then the outlier score is the distance/kernel similarity to each of the specified `k` neighbors.
In the latter case, an `aggregator` must be specified to aggregate the scores.
Note that, in the multiple k case, a normalizer can be provided. If a normalizer is passed then it is fit in
the `infer_threshold` method and so this method must be called before the `predict` method. If this is not
done an exception is raised. If `k` is a single value then the predict method can be called without first
calling `infer_threshold` but only scores will be returned and not outlier predictions.
Parameters
----------
k
Number of nearest neighbors to compute distance to. `k` can be a single value or
an array of integers. If an array is passed, an aggregator is required to aggregate
the scores. If `k` is a single value the outlier score is the distance/kernel
similarity to the `k`-th nearest neighbor. If `k` is a list then it returns the
distance/kernel similarity to each of the specified `k` neighbors.
kernel
Kernel function to use for outlier detection. If ``None``, `torch.cdist` is used.
Otherwise if a kernel is specified then instead of using `torch.cdist` the kernel
defines the k nearest neighbor distance.
normalizer
Normalizer to use for outlier detection. If ``None``, no normalization is applied.
For a list of available normalizers, see :mod:`alibi_detect.od.pytorch.ensemble`.
aggregator
Aggregator to use for outlier detection. Can be set to ``None`` if `k` is a single
value. For a list of available aggregators, see :mod:`alibi_detect.od.pytorch.ensemble`.
backend
Backend used for outlier detection. Defaults to ``'pytorch'``. Options are ``'pytorch'``.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``.
Raises
------
ValueError
If `k` is an array and `aggregator` is None.
NotImplementedError
If choice of `backend` is not implemented.
"""
super().__init__()
backend_str: str = backend.lower()
BackendValidator(
backend_options={'pytorch': ['pytorch']},
construct_name=self.__class__.__name__
).verify_backend(backend_str)
backend_cls, ensembler_cls = backends[backend]
ensembler = None
if aggregator is None and isinstance(k, (list, np.ndarray, tuple)):
raise ValueError('If `k` is a `np.ndarray`, `list` or `tuple`, '
'the `aggregator` argument cannot be ``None``.')
if isinstance(k, (list, np.ndarray, tuple)):
ensembler = ensembler_cls(
normalizer=get_normalizer(normalizer),
aggregator=get_aggregator(aggregator)
)
self.backend = backend_cls(k, kernel=kernel, ensembler=ensembler, device=device)
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = 'numeric'
self.meta['online'] = False
def fit(self, x_ref: np.ndarray) -> None:
"""Fit the detector on reference data.
Parameters
----------
x_ref
Reference data used to fit the detector.
"""
self.backend.fit(self.backend._to_backend_dtype(x_ref))
@catch_error('NotFittedError')
@catch_error('ThresholdNotInferredError')
def score(self, x: np.ndarray) -> np.ndarray:
"""Score `x` instances using the detector.
Computes the k nearest neighbor distance/kernel similarity for each instance in `x`. If `k` is a single
value then this is the score otherwise if `k` is an array of values then the score is aggregated using
the ensembler.
Parameters
----------
x
Data to score. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Outlier scores. The shape of the scores is `(n_instances,)`. The higher the score, the more anomalous the \
instance.
Raises
------
NotFittedError
If called before detector has been fit.
ThresholdNotInferredError
If k is a list and a threshold was not inferred.
"""
score = self.backend.score(self.backend._to_backend_dtype(x))
score = self.backend._ensembler(score)
return self.backend._to_frontend_dtype(score)
@catch_error('NotFittedError')
def infer_threshold(self, x: np.ndarray, fpr: float) -> None:
"""Infer the threshold for the kNN detector.
The threshold is computed so that the outlier detector would incorrectly classify `fpr` proportion of the
reference data as outliers.
Parameters
----------
x
Reference data used to infer the threshold.
fpr
False positive rate used to infer the threshold. The false positive rate is the proportion of
instances in `x` that are incorrectly classified as outliers. The false positive rate should
be in the range ``(0, 1)``.
Raises
------
ValueError
Raised if `fpr` is not in ``(0, 1)``.
NotFittedError
If called before detector has been fit.
"""
self.backend.infer_threshold(self.backend._to_backend_dtype(x), fpr)
@catch_error('NotFittedError')
@catch_error('ThresholdNotInferredError')
def predict(self, x: np.ndarray) -> Dict[str, Any]:
"""Predict whether the instances in `x` are outliers or not.
Scores the instances in `x` and if the threshold was inferred, returns the outlier labels and p-values as well.
Parameters
----------
x
Data to predict. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Dictionary with keys 'data' and 'meta'. 'data' contains the outlier scores. If threshold inference was \
performed, 'data' also contains the threshold value, outlier labels and p-vals . The shape of the scores is \
`(n_instances,)`. The higher the score, the more anomalous the instance. 'meta' contains information about \
the detector.
Raises
------
NotFittedError
If called before detector has been fit.
ThresholdNotInferredError
If k is a list and a threshold was not inferred.
"""
outputs = self.backend.predict(self.backend._to_backend_dtype(x))
output = outlier_prediction_dict()
output['data'] = {
**output['data'],
**self.backend._to_frontend_dtype(outputs)
}
output['meta'] = {
**output['meta'],
'name': self.__class__.__name__,
'detector_type': 'outlier',
'online': False,
'version': __version__,
}
return output
| 9,117 | 40.634703 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/llr.py
|
from functools import partial
import logging
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow_probability.python.distributions.distribution import Distribution
from typing import Callable, Dict, Tuple, Union
from alibi_detect.models.tensorflow.pixelcnn import PixelCNN
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils.tensorflow.perturbation import mutate_categorical
from alibi_detect.utils._types import OptimizerTF
logger = logging.getLogger(__name__)
def build_model(dist: Union[Distribution, PixelCNN], input_shape: tuple = None, filepath: str = None) \
-> Tuple[tf.keras.Model, Union[Distribution, PixelCNN]]:
"""
Create tf.keras.Model from TF distribution.
Parameters
----------
dist
TensorFlow distribution.
input_shape
Input shape of the model.
filepath
File to load model weights from.
Returns
-------
TensorFlow model.
"""
x_in = Input(shape=input_shape)
log_prob = dist.log_prob(x_in)
model = Model(inputs=x_in, outputs=log_prob)
model.add_loss(-tf.reduce_mean(log_prob))
if isinstance(filepath, str):
model.load_weights(filepath)
return model, dist
class LLR(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
threshold: float = None,
model: Union[tf.keras.Model, Distribution, PixelCNN] = None,
model_background: Union[tf.keras.Model, Distribution, PixelCNN] = None,
log_prob: Callable = None,
sequential: bool = False,
data_type: str = None
) -> None:
"""
Likelihood Ratios for Out-of-Distribution Detection. Ren, J. et al. NeurIPS 2019.
https://arxiv.org/abs/1906.02845
Parameters
----------
threshold
Threshold used for the likelihood ratio (LLR) to determine outliers.
model
Generative model, defaults to PixelCNN.
model_background
Optional model for the background. Only needed if it is different from `model`.
log_prob
Function used to evaluate log probabilities under the model
if the model does not have a `log_prob` function.
sequential
Whether the data is sequential. Used to create targets during training.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if threshold is None:
logger.warning('No threshold level set. Need to infer threshold using `infer_threshold`.')
self.has_log_prob = True if hasattr(model, 'log_prob') else False
self.sequential = sequential
self.log_prob = log_prob
self.threshold = threshold
# semantic model trained on original data
self.dist_s = model
# background model trained on perturbed data
if model_background is None:
try:
self.dist_b = model.copy()
except AttributeError:
self.dist_b = tf.keras.models.clone_model(model)
else:
self.dist_b = model_background
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = data_type
self.meta['online'] = False
def fit(self,
X: np.ndarray,
mutate_fn: Callable = mutate_categorical,
mutate_fn_kwargs: dict = {'rate': .2, 'seed': 0, 'feature_range': (0, 255)},
mutate_batch_size: int = int(1e10),
loss_fn: tf.keras.losses = None,
loss_fn_kwargs: dict = None,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
epochs: int = 20,
batch_size: int = 64,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None
) -> None:
"""
Train semantic and background generative models.
Parameters
----------
X
Training batch.
mutate_fn
Mutation function used to generate the background dataset.
mutate_fn_kwargs
Kwargs for the mutation function used to generate the background dataset.
Default values set for an image dataset.
mutate_batch_size
Batch size used to generate the mutations for the background dataset.
loss_fn
Loss function used for training.
loss_fn_kwargs
Kwargs for loss function.
optimizer
Optimizer used for training.
epochs
Number of training epochs.
batch_size
Batch size used for training.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
"""
input_shape = X.shape[1:]
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
# Separate into two separate optimizers, one for semantic model and one for background model
optimizer_s = optimizer
optimizer_b = optimizer.__class__.from_config(optimizer.get_config())
# training arguments
kwargs = {'epochs': epochs,
'batch_size': batch_size,
'verbose': verbose,
'callbacks': callbacks}
# create background data
mutate_fn = partial(mutate_fn, **mutate_fn_kwargs)
X_back = predict_batch(X, mutate_fn, batch_size=mutate_batch_size, dtype=X.dtype)
# prepare sequential data
if self.sequential and not self.has_log_prob:
y, y_back = X[:, 1:], X_back[:, 1:] # type: ignore
X, X_back = X[:, :-1], X_back[:, :-1] # type: ignore
else:
y, y_back = None, None
# check if model needs to be built
use_build = True if self.has_log_prob and not isinstance(self.dist_s, tf.keras.Model) else False
if use_build:
# build and train semantic model
self.model_s = build_model(self.dist_s, input_shape)[0]
self.model_s.compile(optimizer=optimizer_s)
self.model_s.fit(X, **kwargs)
# build and train background model
self.model_b = build_model(self.dist_b, input_shape)[0]
self.model_b.compile(optimizer=optimizer_b)
self.model_b.fit(X_back, **kwargs)
else:
# update training arguments
kwargs.update({
'loss_fn_kwargs': loss_fn_kwargs,
'log_metric': log_metric
})
# train semantic model
args = [self.dist_s, loss_fn, X]
kwargs.update({'y_train': y, 'optimizer': optimizer_s})
trainer(*args, **kwargs)
# train background model
args = [self.dist_b, loss_fn, X_back]
kwargs.update({'y_train': y_back, 'optimizer': optimizer_b})
trainer(*args, **kwargs)
def infer_threshold(self,
X: np.ndarray,
outlier_type: str = 'instance',
threshold_perc: float = 95.,
batch_size: int = int(1e10)
) -> None:
"""
Update LLR threshold by a value inferred from the percentage of instances
considered to be outliers in a sample of the dataset.
Parameters
----------
X
Batch of instances.
outlier_type
Predict outliers at the 'feature' or 'instance' level.
threshold_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
batch_size
Batch size for the generative model evaluations.
"""
# compute outlier scores
fscore, iscore = self.score(X, batch_size=batch_size)
if outlier_type == 'feature':
outlier_score = fscore
elif outlier_type == 'instance':
outlier_score = iscore
else:
raise ValueError('`outlier_score` needs to be either `feature` or `instance`.')
# update threshold
self.threshold = np.percentile(outlier_score, threshold_perc)
def logp(self, dist, X: np.ndarray, return_per_feature: bool = False, batch_size: int = int(1e10)) \
-> np.ndarray:
"""
Compute log probability of a batch of instances under the generative model.
Parameters
----------
dist
Distribution of the model.
X
Batch of instances.
return_per_feature
Return log probability per feature.
batch_size
Batch size for the generative model evaluations.
Returns
-------
Log probabilities.
"""
logp_fn = partial(dist.log_prob, return_per_feature=return_per_feature)
# TODO: TBD: can this be any of the other types from predict_batch? i.e. tf.Tensor or tuple
return predict_batch(X, logp_fn, batch_size=batch_size) # type: ignore[return-value]
def logp_alt(self, model: tf.keras.Model, X: np.ndarray, return_per_feature: bool = False,
batch_size: int = int(1e10)) -> np.ndarray:
"""
Compute log probability of a batch of instances using the log_prob function
defined by the user.
Parameters
----------
model
Trained model.
X
Batch of instances.
return_per_feature
Return log probability per feature.
batch_size
Batch size for the generative model evaluations.
Returns
-------
Log probabilities.
"""
if self.sequential:
y, X = X[:, 1:], X[:, :-1]
else:
y = X.copy()
y_preds = predict_batch(X, model, batch_size=batch_size)
logp = self.log_prob(y, y_preds).numpy()
if return_per_feature:
return logp
else:
axis = tuple(np.arange(len(logp.shape))[1:])
return np.mean(logp, axis=axis)
def llr(self, X: np.ndarray, return_per_feature: bool, batch_size: int = int(1e10)) -> np.ndarray:
"""
Compute likelihood ratios.
Parameters
----------
X
Batch of instances.
return_per_feature
Return likelihood ratio per feature.
batch_size
Batch size for the generative model evaluations.
Returns
-------
Likelihood ratios.
"""
logp_fn = self.logp if not isinstance(self.log_prob, Callable) else self.logp_alt # type: ignore
logp_s = logp_fn(self.dist_s, X, return_per_feature=return_per_feature, batch_size=batch_size)
logp_b = logp_fn(self.dist_b, X, return_per_feature=return_per_feature, batch_size=batch_size)
return logp_s - logp_b
def feature_score(self, X: np.ndarray, batch_size: int = int(1e10)) -> np.ndarray:
""" Feature-level negative likelihood ratios. """
return - self.llr(X, True, batch_size=batch_size)
def instance_score(self, X: np.ndarray, batch_size: int = int(1e10)) -> np.ndarray:
""" Instance-level negative likelihood ratios. """
return - self.llr(X, False, batch_size=batch_size)
def score(self, X: np.ndarray, batch_size: int = int(1e10)) -> Tuple[np.ndarray, np.ndarray]:
"""
Feature-level and instance-level outlier scores.
The scores are equal to the negative likelihood ratios.
"""
fscore = self.feature_score(X, batch_size=batch_size)
iscore = self.instance_score(X, batch_size=batch_size)
return fscore, iscore
def predict(self,
X: np.ndarray,
outlier_type: str = 'instance',
batch_size: int = int(1e10),
return_feature_score: bool = True,
return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[np.ndarray, np.ndarray]]:
"""
Predict whether instances are outliers or not.
Parameters
----------
X
Batch of instances.
outlier_type
Predict outliers at the 'feature' or 'instance' level.
batch_size
Batch size used when making predictions with the generative model.
return_feature_score
Whether to return feature level outlier scores.
return_instance_score
Whether to return instance level outlier scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the outlier predictions and both feature and instance level outlier scores.
"""
# compute outlier scores
fscore, iscore = self.score(X, batch_size=batch_size)
if outlier_type == 'feature':
outlier_score = fscore
elif outlier_type == 'instance':
outlier_score = iscore
else:
raise ValueError('`outlier_score` needs to be either `feature` or `instance`.')
# values above threshold are outliers
outlier_pred = (outlier_score > self.threshold).astype(int)
# populate output dict
od = outlier_prediction_dict()
od['meta'] = self.meta
od['data']['is_outlier'] = outlier_pred
if return_feature_score:
od['data']['feature_score'] = fscore
if return_instance_score:
od['data']['instance_score'] = iscore
return od
| 14,091 | 36.280423 | 109 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/prophet.py
|
from prophet import Prophet
import logging
import pandas as pd
from typing import Dict, List, Union
from alibi_detect.base import BaseDetector, FitMixin, outlier_prediction_dict
logger = logging.getLogger(__name__)
class OutlierProphet(BaseDetector, FitMixin):
def __init__(self,
threshold: float = .8,
growth: str = 'linear',
cap: float = None,
holidays: pd.DataFrame = None,
holidays_prior_scale: float = 10.,
country_holidays: str = None,
changepoint_prior_scale: float = .05,
changepoint_range: float = .8,
seasonality_mode: str = 'additive',
daily_seasonality: Union[str, bool, int] = 'auto',
weekly_seasonality: Union[str, bool, int] = 'auto',
yearly_seasonality: Union[str, bool, int] = 'auto',
add_seasonality: List = None,
seasonality_prior_scale: float = 10.,
uncertainty_samples: int = 1000,
mcmc_samples: int = 0
) -> None:
"""
Outlier detector for time series data using fbprophet.
See https://facebook.github.io/prophet/ for more details.
Parameters
----------
threshold
Width of the uncertainty intervals of the forecast, used as outlier threshold.
Equivalent to `interval_width`. If the instance lies outside of the uncertainty intervals,
it is flagged as an outlier. If `mcmc_samples` equals 0, it is the uncertainty in the trend
using the MAP estimate of the extrapolated model. If `mcmc_samples` >0, then uncertainty
over all parameters is used.
growth
'linear' or 'logistic' to specify a linear or logistic trend.
cap
Growth cap in case growth equals 'logistic'.
holidays
pandas DataFrame with columns `holiday` (string) and `ds` (dates) and optionally
columns `lower_window` and `upper_window` which specify a range of days around
the date to be included as holidays.
holidays_prior_scale
Parameter controlling the strength of the holiday components model.
Higher values imply a more flexible trend, more prone to more overfitting.
country_holidays
Include country-specific holidays via country abbreviations.
The holidays for each country are provided by the holidays package in Python.
A list of available countries and the country name to use is available on:
https://github.com/dr-prodigy/python-holidays. Additionally, Prophet includes holidays for:
Brazil (BR), Indonesia (ID), India (IN), Malaysia (MY), Vietnam (VN), Thailand (TH),
Philippines (PH), Turkey (TU), Pakistan (PK), Bangladesh (BD), Egypt (EG), China (CN) and Russian (RU).
changepoint_prior_scale
Parameter controlling the flexibility of the automatic changepoint selection.
Large values will allow many changepoints, potentially leading to overfitting.
changepoint_range
Proportion of history in which trend changepoints will be estimated.
Higher values means more changepoints, potentially leading to overfitting.
seasonality_mode
Either 'additive' or 'multiplicative'.
daily_seasonality
Can be 'auto', True, False, or a number of Fourier terms to generate.
weekly_seasonality
Can be 'auto', True, False, or a number of Fourier terms to generate.
yearly_seasonality
Can be 'auto', True, False, or a number of Fourier terms to generate.
add_seasonality
Manually add one or more seasonality components. Pass a list of dicts containing the keys
`name`, `period`, `fourier_order` (obligatory), `prior_scale` and `mode` (optional).
seasonality_prior_scale
Parameter controlling the strength of the seasonality model. Larger values allow the model to
fit larger seasonal fluctuations, potentially leading to overfitting.
uncertainty_samples
Number of simulated draws used to estimate uncertainty intervals.
mcmc_samples
If >0, will do full Bayesian inference with the specified number of MCMC samples.
If 0, will do MAP estimation.
"""
super().__init__()
# initialize Prophet model
# TODO: add conditional seasonalities
kwargs = {
'growth': growth,
'interval_width': threshold,
'holidays': holidays,
'holidays_prior_scale': holidays_prior_scale,
'changepoint_prior_scale': changepoint_prior_scale,
'changepoint_range': changepoint_range,
'seasonality_mode': seasonality_mode,
'daily_seasonality': daily_seasonality,
'weekly_seasonality': weekly_seasonality,
'yearly_seasonality': yearly_seasonality,
'seasonality_prior_scale': seasonality_prior_scale,
'uncertainty_samples': uncertainty_samples,
'mcmc_samples': mcmc_samples
}
self.model = Prophet(**kwargs)
if country_holidays:
self.model.add_country_holidays(country_name=country_holidays)
if add_seasonality:
for s in add_seasonality:
self.model.add_seasonality(**s)
self.cap = cap
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = 'time-series'
self.meta['online'] = False
def fit(self, df: pd.DataFrame) -> None:
"""
Fit Prophet model on normal (inlier) data.
Parameters
----------
df
Dataframe with columns `ds` with timestamps and `y` with target values.
"""
if self.cap:
df['cap'] = self.cap
self.model.fit(df)
def score(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Compute outlier scores.
Parameters
----------
df
DataFrame with columns `ds` with timestamps and `y` with values which
need to be flagged as outlier or not.
Returns
-------
Array with outlier scores for each instance in the batch.
"""
if self.cap:
df['cap'] = self.cap
forecast = self.model.predict(df)
forecast['y'] = df['y'].values
forecast['score'] = (
(forecast['y'] - forecast['yhat_upper']) * (forecast['y'] >= forecast['yhat']) +
(forecast['yhat_lower'] - forecast['y']) * (forecast['y'] < forecast['yhat'])
)
return forecast
def predict(self,
df: pd.DataFrame,
return_instance_score: bool = True,
return_forecast: bool = True
) -> Dict[Dict[str, str], Dict[pd.DataFrame, pd.DataFrame]]:
"""
Compute outlier scores and transform into outlier predictions.
Parameters
----------
df
DataFrame with columns `ds` with timestamps and `y` with values which
need to be flagged as outlier or not.
return_instance_score
Whether to return instance level outlier scores.
return_forecast
Whether to return the model forecast.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the outlier predictions, instance level outlier scores and the model forecast.
"""
# compute outlier scores
forecast = self.score(df)
iscore = pd.DataFrame(data={
'ds': df['ds'].values,
'instance_score': forecast['score']
})
# values above threshold are outliers
outlier_pred = pd.DataFrame(data={
'ds': df['ds'].values,
'is_outlier': (forecast['score'] > 0.).astype(int)
})
# populate output dict
od = outlier_prediction_dict()
od['meta'] = self.meta
od['data']['is_outlier'] = outlier_pred
if return_instance_score:
od['data']['instance_score'] = iscore
if return_forecast:
od['data']['forecast'] = forecast
return od
| 8,498 | 41.283582 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/aegmm.py
|
import logging
import numpy as np
import tensorflow as tf
from typing import Callable, Dict, Tuple
from alibi_detect.models.tensorflow.autoencoder import AEGMM, eucl_cosim_features
from alibi_detect.models.tensorflow.gmm import gmm_energy, gmm_params
from alibi_detect.models.tensorflow.losses import loss_aegmm
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils._types import OptimizerTF
logger = logging.getLogger(__name__)
class OutlierAEGMM(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
threshold: float = None,
aegmm: tf.keras.Model = None,
encoder_net: tf.keras.Model = None,
decoder_net: tf.keras.Model = None,
gmm_density_net: tf.keras.Model = None,
n_gmm: int = None,
recon_features: Callable = eucl_cosim_features,
data_type: str = None
) -> None:
"""
AEGMM-based outlier detector.
Parameters
----------
threshold
Threshold used for outlier score to determine outliers.
aegmm
A trained tf.keras model if available.
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class if no 'aegmm' is specified.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class if no 'aegmm' is specified.
gmm_density_net
Layers for the GMM network wrapped in a tf.keras.Sequential class.
n_gmm
Number of components in GMM.
recon_features
Function to extract features from the reconstructed instance by the decoder.
data_type
Optionally specifiy the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if threshold is None:
logger.warning('No threshold level set. Need to infer threshold using `infer_threshold`.')
self.threshold = threshold
# check if model can be loaded, otherwise initialize AEGMM model
if isinstance(aegmm, tf.keras.Model):
self.aegmm = aegmm
elif (isinstance(encoder_net, tf.keras.Sequential) and
isinstance(decoder_net, tf.keras.Sequential) and
isinstance(gmm_density_net, tf.keras.Sequential)):
self.aegmm = AEGMM(encoder_net, decoder_net, gmm_density_net, n_gmm, recon_features)
else:
raise TypeError('No valid format detected for `aegmm` (tf.keras.Model) '
'or `encoder_net`, `decoder_net` and `gmm_density_net` (tf.keras.Sequential).')
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = data_type
self.meta['online'] = False
self.phi, self.mu, self.cov, self.L, self.log_det_cov = None, None, None, None, None
def fit(self,
X: np.ndarray,
loss_fn: tf.keras.losses = loss_aegmm,
w_energy: float = .1,
w_cov_diag: float = .005,
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
epochs: int = 20,
batch_size: int = 64,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None,
) -> None:
"""
Train AEGMM model.
Parameters
----------
X
Training batch.
loss_fn
Loss function used for training.
w_energy
Weight on sample energy loss term if default `loss_aegmm` loss fn is used.
w_cov_diag
Weight on covariance regularizing loss term if default `loss_aegmm` loss fn is used.
optimizer
Optimizer used for training.
epochs
Number of training epochs.
batch_size
Batch size used for training.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
"""
# train arguments
args = [self.aegmm, loss_fn, X]
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
kwargs = {'optimizer': optimizer,
'epochs': epochs,
'batch_size': batch_size,
'verbose': verbose,
'log_metric': log_metric,
'callbacks': callbacks,
'loss_fn_kwargs': {'w_energy': w_energy,
'w_cov_diag': w_cov_diag}
}
# train
trainer(*args, **kwargs)
# set GMM parameters
x_recon, z, gamma = self.aegmm(X)
self.phi, self.mu, self.cov, self.L, self.log_det_cov = gmm_params(z, gamma)
def infer_threshold(self,
X: np.ndarray,
threshold_perc: float = 95.,
batch_size: int = int(1e10)
) -> None:
"""
Update threshold by a value inferred from the percentage of instances considered to be
outliers in a sample of the dataset.
Parameters
----------
X
Batch of instances.
threshold_perc
Percentage of X considered to be normal based on the outlier score.
batch_size
Batch size used when making predictions with the AEGMM.
"""
# compute outlier scores
iscore = self.score(X, batch_size=batch_size)
# update threshold
self.threshold = np.percentile(iscore, threshold_perc)
def score(self, X: np.ndarray, batch_size: int = int(1e10)) -> np.ndarray:
"""
Compute outlier scores.
Parameters
----------
X
Batch of instances to analyze.
batch_size
Batch size used when making predictions with the AEGMM.
Returns
-------
Array with outlier scores for each instance in the batch.
"""
_, z, _ = predict_batch(X, self.aegmm, batch_size=batch_size)
energy, _ = gmm_energy(z, self.phi, self.mu, self.cov, self.L, self.log_det_cov, return_mean=False)
return energy.numpy()
def predict(self,
X: np.ndarray,
batch_size: int = int(1e10),
return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[np.ndarray, np.ndarray]]:
"""
Compute outlier scores and transform into outlier predictions.
Parameters
----------
X
Batch of instances.
batch_size
Batch size used when making predictions with the AEGMM.
return_instance_score
Whether to return instance level outlier scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the outlier predictions and instance level outlier scores.
"""
# compute outlier scores
iscore = self.score(X, batch_size=batch_size)
# values above threshold are outliers
outlier_pred = (iscore > self.threshold).astype(int)
# populate output dict
od = outlier_prediction_dict()
od['meta'] = self.meta
od['data']['is_outlier'] = outlier_pred
if return_instance_score:
od['data']['instance_score'] = iscore
return od
| 7,829 | 35.933962 | 107 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/ae.py
|
import logging
import numpy as np
import tensorflow as tf
from typing import Dict, Tuple
from alibi_detect.models.tensorflow.autoencoder import AE
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
from alibi_detect.utils.tensorflow.prediction import predict_batch
from alibi_detect.utils._types import OptimizerTF
logger = logging.getLogger(__name__)
class OutlierAE(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
threshold: float = None,
ae: tf.keras.Model = None,
encoder_net: tf.keras.Model = None,
decoder_net: tf.keras.Model = None,
data_type: str = None
) -> None:
"""
AE-based outlier detector.
Parameters
----------
threshold
Threshold used for outlier score to determine outliers.
ae
A trained tf.keras model if available.
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class if no 'ae' is specified.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class if no 'ae' is specified.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if threshold is None:
logger.warning('No threshold level set. Need to infer threshold using `infer_threshold`.')
self.threshold = threshold
# check if model can be loaded, otherwise initialize AE model
if isinstance(ae, tf.keras.Model):
self.ae = ae
elif isinstance(encoder_net, tf.keras.Sequential) and isinstance(decoder_net, tf.keras.Sequential):
self.ae = AE(encoder_net, decoder_net)
else:
raise TypeError('No valid format detected for `ae` (tf.keras.Model) '
'or `encoder_net`, `decoder_net` (tf.keras.Sequential).')
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = data_type
self.meta['online'] = False
def fit(self,
X: np.ndarray,
loss_fn: tf.keras.losses = tf.keras.losses.MeanSquaredError(),
optimizer: OptimizerTF = tf.keras.optimizers.Adam,
epochs: int = 20,
batch_size: int = 64,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None,
) -> None:
"""
Train AE model.
Parameters
----------
X
Training batch.
loss_fn
Loss function used for training.
optimizer
Optimizer used for training.
epochs
Number of training epochs.
batch_size
Batch size used for training.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
"""
# train arguments
args = [self.ae, loss_fn, X]
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
kwargs = {'optimizer': optimizer,
'epochs': epochs,
'batch_size': batch_size,
'verbose': verbose,
'log_metric': log_metric,
'callbacks': callbacks}
# train
trainer(*args, **kwargs)
def infer_threshold(self,
X: np.ndarray,
outlier_type: str = 'instance',
outlier_perc: float = 100.,
threshold_perc: float = 95.,
batch_size: int = int(1e10)
) -> None:
"""
Update threshold by a value inferred from the percentage of instances considered to be
outliers in a sample of the dataset.
Parameters
----------
X
Batch of instances.
outlier_type
Predict outliers at the 'feature' or 'instance' level.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
threshold_perc
Percentage of X considered to be normal based on the outlier score.
batch_size
Batch size used when making predictions with the autoencoder.
"""
# compute outlier scores
fscore, iscore = self.score(X, outlier_perc=outlier_perc, batch_size=batch_size)
if outlier_type == 'feature':
outlier_score = fscore
elif outlier_type == 'instance':
outlier_score = iscore
else:
raise ValueError('`outlier_score` needs to be either `feature` or `instance`.')
# update threshold
self.threshold = np.percentile(outlier_score, threshold_perc)
def feature_score(self, X_orig: np.ndarray, X_recon: np.ndarray) -> np.ndarray:
"""
Compute feature level outlier scores.
Parameters
----------
X_orig
Batch of original instances.
X_recon
Batch of reconstructed instances.
Returns
-------
Feature level outlier scores.
"""
fscore = np.power(X_orig - X_recon, 2)
return fscore
def instance_score(self, fscore: np.ndarray, outlier_perc: float = 100.) -> np.ndarray:
"""
Compute instance level outlier scores.
Parameters
----------
fscore
Feature level outlier scores.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
Returns
-------
Instance level outlier scores.
"""
fscore_flat = fscore.reshape(fscore.shape[0], -1).copy()
n_score_features = int(np.ceil(.01 * outlier_perc * fscore_flat.shape[1]))
sorted_fscore = np.sort(fscore_flat, axis=1)
sorted_fscore_perc = sorted_fscore[:, -n_score_features:]
iscore = np.mean(sorted_fscore_perc, axis=1)
return iscore
def score(self, X: np.ndarray, outlier_perc: float = 100., batch_size: int = int(1e10)) \
-> Tuple[np.ndarray, np.ndarray]:
"""
Compute feature and instance level outlier scores.
Parameters
----------
X
Batch of instances.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
batch_size
Batch size used when making predictions with the autoencoder.
Returns
-------
Feature and instance level outlier scores.
"""
# reconstruct instances
X_recon = predict_batch(X, self.ae, batch_size=batch_size)
# compute feature and instance level scores
fscore = self.feature_score(X, X_recon) # type: ignore[arg-type]
iscore = self.instance_score(fscore, outlier_perc=outlier_perc)
return fscore, iscore
def predict(self,
X: np.ndarray,
outlier_type: str = 'instance',
outlier_perc: float = 100.,
batch_size: int = int(1e10),
return_feature_score: bool = True,
return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[np.ndarray, np.ndarray]]:
"""
Predict whether instances are outliers or not.
Parameters
----------
X
Batch of instances.
outlier_type
Predict outliers at the 'feature' or 'instance' level.
outlier_perc
Percentage of sorted feature level outlier scores used to predict instance level outlier.
batch_size
Batch size used when making predictions with the autoencoder.
return_feature_score
Whether to return feature level outlier scores.
return_instance_score
Whether to return instance level outlier scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the outlier predictions and both feature and instance level outlier scores.
"""
# compute outlier scores
fscore, iscore = self.score(X, outlier_perc=outlier_perc, batch_size=batch_size)
if outlier_type == 'feature':
outlier_score = fscore
elif outlier_type == 'instance':
outlier_score = iscore
else:
raise ValueError('`outlier_score` needs to be either `feature` or `instance`.')
# values above threshold are outliers
outlier_pred = (outlier_score > self.threshold).astype(int)
# populate output dict
od = outlier_prediction_dict()
od['meta'] = self.meta
od['data']['is_outlier'] = outlier_pred
if return_feature_score:
od['data']['feature_score'] = fscore
if return_instance_score:
od['data']['instance_score'] = iscore
return od
| 9,396 | 35.003831 | 109 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/_gmm.py
|
from typing import Union, Optional, Dict, Any, TYPE_CHECKING
import numpy as np
from alibi_detect.utils._types import Literal
from alibi_detect.base import outlier_prediction_dict
from alibi_detect.base import BaseDetector, ThresholdMixin, FitMixin
from alibi_detect.od.pytorch import GMMTorch
from alibi_detect.od.sklearn import GMMSklearn
from alibi_detect.utils.frameworks import BackendValidator
from alibi_detect.version import __version__
from alibi_detect.exceptions import _catch_error as catch_error
if TYPE_CHECKING:
import torch
backends = {
'pytorch': GMMTorch,
'sklearn': GMMSklearn
}
class GMM(BaseDetector, ThresholdMixin, FitMixin):
def __init__(
self,
n_components: int = 1,
backend: Literal['pytorch', 'sklearn'] = 'sklearn',
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
) -> None:
"""Gaussian Mixture Model (GMM) outlier detector.
The gaussian mixture model outlier detector fits a mixture of gaussian distributions to the reference data.
Test points are scored via the negative log-likelihood under the corresponding density function.
We support two backends: ``'pytorch'`` and ``'sklearn'``. The ``'pytorch'`` backend allows for GPU acceleration
and uses gradient descent to fit the GMM. We recommend using the ``'pytorch'`` backend for large datasets. The
``'sklearn'`` backend is a pure python implementation and is recommended for smaller datasets.
Parameters
----------
n_components:
The number of mixture components. Defaults to ``1``.
backend
Backend used for outlier detection. Defaults to ``'sklearn'``. Options are ``'pytorch'`` and ``'sklearn'``.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'`` or ``'cpu'``. The device is only used if the ``'pytorch'`` backend is
used. Defaults to ``None``.
Raises
------
NotImplementedError
If choice of `backend` is not implemented.
"""
super().__init__()
backend_str: str = backend.lower()
BackendValidator(
backend_options={'pytorch': ['pytorch'], 'sklearn': ['sklearn']},
construct_name=self.__class__.__name__
).verify_backend(backend_str)
backend_cls = backends[backend]
args: Dict[str, Any] = {'n_components': n_components}
if backend == 'pytorch':
args['device'] = device
self.backend = backend_cls(**args)
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = 'numeric'
self.meta['online'] = False
def fit(
self,
x_ref: np.ndarray,
optimizer: Optional[str] = 'Adam',
learning_rate: float = 0.1,
max_epochs: Optional[int] = None,
batch_size: Optional[int] = None,
tol: float = 1e-3,
n_iter_no_change: int = 25,
n_init: int = 1,
init_params: str = 'kmeans',
verbose: int = 0,
) -> None:
"""Fit the detector on reference data.
If the ``'pytorch'`` backend is used, the detector is fitted using gradient descent. This is the recommended
backend for larger datasets.
If the ``'sklearn'`` backend is used, the detector is fitted using the EM algorithm. The ``'sklearn'``
backend is recommended for smaller datasets. For more information on the EM algorithm and the sklearn Gaussian
Mixture Model, see `here <https://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html#sklearn.mixture.GaussianMixture>`_. # noqa: E501
Parameters
----------
x_ref
Reference data used to fit the detector.
optimizer
Optimizer used to fit the detector. Only used if the ``'pytorch'`` backend is used. Defaults to ``'Adam'``.
learning_rate
Learning rate used to fit the detector. Only used if the ``'pytorch'`` backend is used. Defaults to ``0.1``.
max_epochs
Maximum number of training epochs used to fit the detector. Used for both the ``'pytorch'`` and ``'sklearn'``
backends. If the backend is ``'sklearn'``, the detector is fit using the EM algorithm and `max_epochs`
defaults to ``100``. If the backend is ``'pytorch'``, the detector is fitted using gradient descent and
`max_epochs` defaults to ``10``.
batch_size
Batch size used to fit the detector. Only used if the ``'pytorch'`` backend is used. Defaults to ``None``.
If ``None``, the entire dataset is used for each gradient update.
tol
Convergence threshold used to fit the detector. Used for both ``'sklearn'`` and ``'pytorch'`` backends.
Defaults to ``1e-3``.
n_iter_no_change
The number of iterations over which the loss must decrease by `tol` in order for optimization to continue.
Only used if the ``'pytorch'`` backend is used.
n_init
Number of initializations used to fit the detector. Only used if the ``'sklearn'`` backend is used.
Defaults to ``1``.
init_params
Initialization method used to fit the detector. Only used if the ``'sklearn'`` backend is used. Must be
one of:
'kmeans' : responsibilities are initialized using kmeans.
'kmeans++' : responsibilities are initialized using kmeans++.
'random' : responsibilities are initialized randomly.
'random_from_data' : responsibilities are initialized randomly from the data.
Defaults to ``'kmeans'``.
verbose
Verbosity level used to fit the detector. Used for both ``'sklearn'`` and ``'pytorch'`` backends. Defaults to ``0``.
Returns
-------
Dictionary with fit results. The dictionary contains the following keys depending on the backend used:
- converged: bool indicating whether EM algorithm converged.
- n_iter: number of EM iterations performed. Only returned if `backend` is ``'sklearn'``.
- n_epochs: number of gradient descent iterations performed. Only returned if `backend` is ``'pytorch'``.
- lower_bound: log-likelihood lower bound.
"""
return self.backend.fit(
self.backend._to_backend_dtype(x_ref),
**self.backend.format_fit_kwargs(locals())
)
@catch_error('NotFittedError')
def score(self, x: np.ndarray) -> np.ndarray:
"""Score `x` instances using the detector.
To score an instance, we compute the negative log-likelihood under the corresponding density function of
the fitted gaussian mixture model.
Parameters
----------
x
Data to score. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Outlier scores. The shape of the scores is `(n_instances,)`. The higher the score, the more anomalous the \
instance.
Raises
------
NotFittedError
If called before detector has been fit.
"""
score = self.backend.score(self.backend._to_backend_dtype(x))
return self.backend._to_frontend_dtype(score)
@catch_error('NotFittedError')
def infer_threshold(self, x: np.ndarray, fpr: float) -> None:
"""Infer the threshold for the GMM detector.
The threshold is computed so that the outlier detector would incorrectly classify `fpr` proportion of the
reference data as outliers.
Parameters
----------
x
Reference data used to infer the threshold.
fpr
False positive rate used to infer the threshold. The false positive rate is the proportion of
instances in `x` that are incorrectly classified as outliers. The false positive rate should
be in the range ``(0, 1)``.
Raises
------
ValueError
Raised if `fpr` is not in ``(0, 1)``.
NotFittedError
If called before detector has been fit.
"""
self.backend.infer_threshold(self.backend._to_backend_dtype(x), fpr)
@catch_error('NotFittedError')
def predict(self, x: np.ndarray) -> Dict[str, Any]:
"""Predict whether the instances in `x` are outliers or not.
Scores the instances in `x` and if the threshold was inferred, returns the outlier labels and p-values as well.
Parameters
----------
x
Data to predict. The shape of `x` should be `(n_instances, n_features)`.
Returns
-------
Dictionary with keys 'data' and 'meta'. 'data' contains the outlier scores. If threshold inference was \
performed, 'data' also contains the threshold value, outlier labels and p-vals . The shape of the scores is \
`(n_instances,)`. The higher the score, the more anomalous the instance. 'meta' contains information about \
the detector.
Raises
------
NotFittedError
If called before detector has been fit.
"""
outputs = self.backend.predict(self.backend._to_backend_dtype(x))
output = outlier_prediction_dict()
output['data'] = {
**output['data'],
**self.backend._to_frontend_dtype(outputs)
}
output['meta'] = {
**output['meta'],
'name': self.__class__.__name__,
'detector_type': 'outlier',
'online': False,
'version': __version__,
}
return output
| 9,837 | 41.042735 | 170 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/__init__.py
|
from alibi_detect.utils.missing_optional_dependency import import_optional
from .isolationforest import IForest
from .mahalanobis import Mahalanobis
from .sr import SpectralResidual
OutlierAEGMM = import_optional('alibi_detect.od.aegmm', names=['OutlierAEGMM'])
OutlierAE = import_optional('alibi_detect.od.ae', names=['OutlierAE'])
OutlierVAE = import_optional('alibi_detect.od.vae', names=['OutlierVAE'])
OutlierVAEGMM = import_optional('alibi_detect.od.vaegmm', names=['OutlierVAEGMM'])
OutlierSeq2Seq = import_optional('alibi_detect.od.seq2seq', names=['OutlierSeq2Seq'])
LLR = import_optional('alibi_detect.od.llr', names=['LLR'])
OutlierProphet = import_optional('alibi_detect.od.prophet', names=['OutlierProphet'])
__all__ = [
"OutlierAEGMM",
"IForest",
"Mahalanobis",
"OutlierAE",
"OutlierVAE",
"OutlierVAEGMM",
"OutlierSeq2Seq",
"SpectralResidual",
"LLR",
"OutlierProphet",
]
| 929 | 32.214286 | 85 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/mahalanobis.py
|
import logging
import numpy as np
from scipy.linalg import eigh
from typing import Dict, Optional
from alibi_detect.utils.discretizer import Discretizer
from alibi_detect.utils.distance import abdm, mvdm, multidim_scaling
from alibi_detect.utils.mapping import ohe2ord, ord2num
from alibi_detect.base import BaseDetector, FitMixin, ThresholdMixin, outlier_prediction_dict
logger = logging.getLogger(__name__)
EPSILON = 1e-8
class Mahalanobis(BaseDetector, FitMixin, ThresholdMixin):
def __init__(self,
threshold: float = None,
n_components: int = 3,
std_clip: int = 3,
start_clip: int = 100,
max_n: int = None,
cat_vars: dict = None,
ohe: bool = False,
data_type: str = 'tabular'
) -> None:
"""
Outlier detector for tabular data using the Mahalanobis distance.
Parameters
----------
threshold
Mahalanobis distance threshold used to classify outliers.
n_components
Number of principal components used.
std_clip
Feature-wise stdev used to clip the observations before updating the mean and cov.
start_clip
Number of observations before clipping is applied.
max_n
Algorithm behaves as if it has seen at most max_n points.
cat_vars
Dict with as keys the categorical columns and as values
the number of categories per categorical variable.
ohe
Whether the categorical variables are one-hot encoded (OHE) or not. If not OHE, they are
assumed to have ordinal encodings.
data_type
Optionally specifiy the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if threshold is None:
logger.warning('No threshold level set. Need to infer threshold using `infer_threshold`.')
self.threshold = threshold
self.n_components = n_components
self.std_clip = std_clip
self.start_clip = start_clip
self.max_n = max_n
# variables used in mapping from categorical to numerical values
# keys = categorical columns; values = numerical value for each of the categories
self.cat_vars = cat_vars
self.ohe = ohe
self.d_abs: Dict = {}
# initial parameter values
self.clip: Optional[list] = None
self.mean = 0
self.C = 0
self.n = 0
# set metadata
self.meta['detector_type'] = 'outlier'
self.meta['data_type'] = data_type
self.meta['online'] = True
def fit(self,
X: np.ndarray,
y: np.ndarray = None,
d_type: str = 'abdm',
w: float = None,
disc_perc: list = [25, 50, 75],
standardize_cat_vars: bool = True,
feature_range: tuple = (-1e10, 1e10),
smooth: float = 1.,
center: bool = True
) -> None:
"""
If categorical variables are present, then transform those to numerical values.
This step is not necessary in the absence of categorical variables.
Parameters
----------
X
Batch of instances used to infer distances between categories from.
y
Model class predictions or ground truth labels for X.
Used for 'mvdm' and 'abdm-mvdm' pairwise distance metrics.
Note that this is only compatible with classification problems. For regression problems,
use the 'abdm' distance metric.
d_type
Pairwise distance metric used for categorical variables. Currently, 'abdm', 'mvdm' and 'abdm-mvdm'
are supported. 'abdm' infers context from the other variables while 'mvdm' uses the model predictions.
'abdm-mvdm' is a weighted combination of the two metrics.
w
Weight on 'abdm' (between 0. and 1.) distance if d_type equals 'abdm-mvdm'.
disc_perc
List with percentiles used in binning of numerical features used for the 'abdm'
and 'abdm-mvdm' pairwise distance measures.
standardize_cat_vars
Standardize numerical values of categorical variables if True.
feature_range
Tuple with min and max ranges to allow for perturbed instances. Min and max ranges can be floats or
numpy arrays with dimension (1x nb of features) for feature-wise ranges.
smooth
Smoothing exponent between 0 and 1 for the distances. Lower values of l will smooth the difference in
distance metric between different features.
center
Whether to center the scaled distance measures. If False, the min distance for each feature
except for the feature with the highest raw max distance will be the lower bound of the
feature range, but the upper bound will be below the max feature range.
"""
if self.cat_vars is None:
raise TypeError('No categorical variables specified in the "cat_vars" argument.')
if d_type not in ['abdm', 'mvdm', 'abdm-mvdm']:
raise ValueError('d_type needs to be "abdm", "mvdm" or "abdm-mvdm". '
'{} is not supported.'.format(d_type))
if self.ohe:
X_ord, cat_vars_ord = ohe2ord(X, self.cat_vars)
else:
X_ord, cat_vars_ord = X, self.cat_vars
# bin numerical features to compute the pairwise distance matrices
cat_keys = list(cat_vars_ord.keys())
n_ord = X_ord.shape[1]
if d_type in ['abdm', 'abdm-mvdm'] and len(cat_keys) != n_ord:
fnames = [str(_) for _ in range(n_ord)]
disc = Discretizer(X_ord, cat_keys, fnames, percentiles=disc_perc)
X_bin = disc.discretize(X_ord)
cat_vars_bin = {k: len(disc.names[k]) for k in range(n_ord) if k not in cat_keys}
else:
X_bin = X_ord
cat_vars_bin = {}
# pairwise distances for categorical variables
if d_type == 'abdm':
d_pair = abdm(X_bin, cat_vars_ord, cat_vars_bin)
elif d_type == 'mvdm':
d_pair = mvdm(X_ord, y, cat_vars_ord, alpha=1)
if (type(feature_range[0]) == type(feature_range[1]) and # noqa
type(feature_range[0]) in [int, float]):
feature_range = (np.ones((1, n_ord)) * feature_range[0],
np.ones((1, n_ord)) * feature_range[1])
if d_type == 'abdm-mvdm':
# pairwise distances
d_abdm = abdm(X_bin, cat_vars_ord, cat_vars_bin)
d_mvdm = mvdm(X_ord, y, cat_vars_ord, alpha=1)
# multidim scaled distances
d_abs_abdm = multidim_scaling(d_abdm, n_components=2, use_metric=True,
feature_range=feature_range,
standardize_cat_vars=standardize_cat_vars,
smooth=smooth, center=center,
update_feature_range=False)[0]
d_abs_mvdm = multidim_scaling(d_mvdm, n_components=2, use_metric=True,
feature_range=feature_range,
standardize_cat_vars=standardize_cat_vars,
smooth=smooth, center=center,
update_feature_range=False)[0]
# combine abdm and mvdm
for k, v in d_abs_abdm.items():
self.d_abs[k] = v * w + d_abs_mvdm[k] * (1 - w)
if center: # center the numerical feature values
self.d_abs[k] -= .5 * (self.d_abs[k].max() + self.d_abs[k].min())
else:
self.d_abs = multidim_scaling(d_pair, n_components=2, use_metric=True,
feature_range=feature_range,
standardize_cat_vars=standardize_cat_vars,
smooth=smooth, center=center,
update_feature_range=False)[0]
def infer_threshold(self,
X: np.ndarray,
threshold_perc: float = 95.
) -> None:
"""
Update threshold by a value inferred from the percentage of instances considered to be
outliers in a sample of the dataset.
Parameters
----------
X
Batch of instances.
threshold_perc
Percentage of X considered to be normal based on the outlier score.
"""
# convert categorical variables to numerical values
X = self.cat2num(X)
# compute outlier scores
iscore = self.score(X)
# update threshold
self.threshold = np.percentile(iscore, threshold_perc)
def cat2num(self, X: np.ndarray) -> np.ndarray:
"""
Convert categorical variables to numerical values.
Parameters
----------
X
Batch of instances to analyze.
Returns
-------
Batch of instances where categorical variables are converted to numerical values.
"""
if self.cat_vars is not None: # convert categorical variables
if self.ohe:
X = ohe2ord(X, self.cat_vars)[0]
X = ord2num(X, self.d_abs)
return X
def score(self, X: np.ndarray) -> np.ndarray:
"""
Compute outlier scores.
Parameters
----------
X
Batch of instances to analyze.
Returns
-------
Array with outlier scores for each instance in the batch.
"""
n_batch, n_params = X.shape # batch size and number of features
n_components = min(self.n_components, n_params)
if self.max_n is not None:
n = min(self.n, self.max_n) # n can never be above max_n
else:
n = self.n
# clip X
if self.n > self.start_clip:
X_clip = np.clip(X, self.clip[0], self.clip[1])
else:
X_clip = X
# track mean and covariance matrix
roll_partial_means = X_clip.cumsum(axis=0) / (np.arange(n_batch) + 1).reshape((n_batch, 1))
coefs = (np.arange(n_batch) + 1.) / (np.arange(n_batch) + n + 1.)
new_means = self.mean + coefs.reshape((n_batch, 1)) * (roll_partial_means - self.mean)
new_means_offset = np.empty_like(new_means)
new_means_offset[0] = self.mean
new_means_offset[1:] = new_means[:-1]
coefs = ((n + np.arange(n_batch)) / (n + np.arange(n_batch) + 1.)).reshape((n_batch, 1, 1))
B = coefs * np.matmul((X_clip - new_means_offset)[:, :, None], (X_clip - new_means_offset)[:, None, :])
cov_batch = (n - 1.) / (n + max(1, n_batch - 1.)) * self.C + 1. / (n + max(1, n_batch - 1.)) * B.sum(axis=0)
# PCA
eigvals, eigvects = eigh(cov_batch, eigvals=(n_params - n_components, n_params - 1))
# projections
proj_x = np.matmul(X, eigvects)
proj_x_clip = np.matmul(X_clip, eigvects)
proj_means = np.matmul(new_means_offset, eigvects)
if type(self.C) == int and self.C == 0:
proj_cov = np.diag(np.zeros(n_components))
else:
proj_cov = np.matmul(eigvects.transpose(), np.matmul(self.C, eigvects))
# outlier scores are computed in the principal component space
coefs = (1. / (n + np.arange(n_batch) + 1.)).reshape((n_batch, 1, 1))
B = coefs * np.matmul((proj_x_clip - proj_means)[:, :, None], (proj_x_clip - proj_means)[:, None, :])
all_C_inv = np.zeros_like(B)
c_inv = None
for i, b in enumerate(B):
if c_inv is None:
if abs(np.linalg.det(proj_cov)) > EPSILON:
c_inv = np.linalg.inv(proj_cov)
all_C_inv[i] = c_inv
continue
else:
if n + i == 0:
continue
proj_cov = (n + i - 1.) / (n + i) * proj_cov + b
continue
else:
c_inv = (n + i - 1.) / float(n + i - 2.) * all_C_inv[i - 1]
BC1 = np.matmul(B[i - 1], c_inv)
all_C_inv[i] = c_inv - 1. / (1. + np.trace(BC1)) * np.matmul(c_inv, BC1)
# update parameters
self.mean = new_means[-1]
self.C = cov_batch
stdev = np.sqrt(np.diag(cov_batch))
self.n += n_batch
if self.n > self.start_clip:
self.clip = [self.mean - self.std_clip * stdev, self.mean + self.std_clip * stdev]
# compute outlier scores
x_diff = proj_x - proj_means
outlier_score = np.matmul(x_diff[:, None, :], np.matmul(all_C_inv, x_diff[:, :, None])).reshape(n_batch)
return outlier_score
def predict(self,
X: np.ndarray,
return_instance_score: bool = True) \
-> Dict[Dict[str, str], Dict[np.ndarray, np.ndarray]]:
"""
Compute outlier scores and transform into outlier predictions.
Parameters
----------
X
Batch of instances.
return_instance_score
Whether to return instance level outlier scores.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the outlier predictions and instance level outlier scores.
"""
# convert categorical variables to numerical values
X = self.cat2num(X)
# compute outlier scores
iscore = self.score(X)
# values above threshold are outliers
outlier_pred = (iscore > self.threshold).astype(int)
# populate output dict
od = outlier_prediction_dict()
od['meta'] = self.meta
od['data']['is_outlier'] = outlier_pred
if return_instance_score:
od['data']['instance_score'] = iscore
return od
| 14,294 | 39.495751 | 116 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test_mahalanobis.py
|
from itertools import product
import numpy as np
import pytest
from sklearn.datasets import load_iris
from alibi_detect.od import Mahalanobis
from alibi_detect.version import __version__
threshold = [None, 5.]
n_components = [2, 3]
std_clip = [2, 3]
start_clip = [10, 1000]
max_n = [None, 50]
threshold_perc = [75., 95.]
return_instance_score = [True, False]
tests = list(product(threshold, n_components, std_clip, start_clip,
max_n, threshold_perc, return_instance_score))
n_tests = len(tests)
@pytest.fixture
def mahalanobis_params(request):
return tests[request.param]
@pytest.mark.parametrize('mahalanobis_params', list(range(n_tests)), indirect=True)
def test_mahalanobis(mahalanobis_params):
threshold, n_components, std_clip, start_clip, max_n, \
threshold_perc, return_instance_score = mahalanobis_params
X, y = load_iris(return_X_y=True)
mh = Mahalanobis(threshold, n_components=n_components, std_clip=std_clip,
start_clip=start_clip, max_n=max_n)
assert mh.threshold == threshold
assert mh.n == 0
assert mh.meta == {'name': 'Mahalanobis', 'detector_type': 'outlier', 'data_type': 'tabular',
'online': True, 'version': __version__}
mh.infer_threshold(X, threshold_perc=threshold_perc)
assert mh.n == X.shape[0]
iscore = mh.score(X) # noqa
assert mh.n == 2 * X.shape[0]
assert mh.mean.shape[0] == X.shape[1]
assert mh.C.shape == (X.shape[1], X.shape[1])
assert (np.diag(mh.C) >= 0).all()
od_preds = mh.predict(X, return_instance_score=return_instance_score)
assert mh.n == 3 * X.shape[0]
assert od_preds['meta'] == mh.meta
if return_instance_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['instance_score']
> mh.threshold).astype(int).sum()
else:
assert od_preds['data']['instance_score'] is None
| 1,958 | 36.673077 | 97 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test_aegmm.py
|
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from alibi_detect.od import OutlierAEGMM
from alibi_detect.version import __version__
threshold = [None, 5.]
n_gmm = [1, 2]
w_energy = [.1, .5]
threshold_perc = [90.]
return_instance_score = [True, False]
tests = list(product(threshold, n_gmm, w_energy, threshold_perc, return_instance_score))
n_tests = len(tests)
# load and preprocess MNIST data
(X_train, _), (X_test, _) = tf.keras.datasets.mnist.load_data()
X = X_train.reshape(X_train.shape[0], -1)[:1000] # only train on 1000 instances
X = X.astype(np.float32)
X /= 255
input_dim = X.shape[1]
latent_dim = 2
@pytest.fixture
def aegmm_params(request):
return tests[request.param]
@pytest.mark.parametrize('aegmm_params', list(range(n_tests)), indirect=True)
def test_aegmm(aegmm_params):
# OutlierAEGMM parameters
threshold, n_gmm, w_energy, threshold_perc, return_instance_score = aegmm_params
# define encoder, decoder and GMM density net
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(input_dim,)),
Dense(128, activation=tf.nn.relu),
Dense(latent_dim, activation=None)
]
)
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim,)),
Dense(128, activation=tf.nn.relu),
Dense(input_dim, activation=tf.nn.sigmoid)
]
)
gmm_density_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim + 2,)),
Dense(10, activation=tf.nn.relu),
Dense(n_gmm, activation=tf.nn.softmax)
]
)
# init OutlierAEGMM
aegmm = OutlierAEGMM(
threshold=threshold,
encoder_net=encoder_net,
decoder_net=decoder_net,
gmm_density_net=gmm_density_net,
n_gmm=n_gmm
)
assert aegmm.threshold == threshold
assert aegmm.meta == {'name': 'OutlierAEGMM', 'detector_type': 'outlier', 'data_type': None,
'online': False, 'version': __version__}
# fit OutlierAEGMM, infer threshold and compute scores
aegmm.fit(X, w_energy=w_energy, epochs=5, batch_size=1000, verbose=False)
aegmm.infer_threshold(X, threshold_perc=threshold_perc)
energy = aegmm.score(X)
perc_score = 100 * (energy < aegmm.threshold).astype(int).sum() / energy.shape[0]
assert threshold_perc + 5 > perc_score > threshold_perc - 5
# make and check predictions
od_preds = aegmm.predict(X, return_instance_score=return_instance_score)
assert od_preds['meta'] == aegmm.meta
assert od_preds['data']['is_outlier'].shape == (X.shape[0],)
if return_instance_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['instance_score']
> aegmm.threshold).astype(int).sum()
else:
assert od_preds['data']['instance_score'] is None
| 3,007 | 31.695652 | 96 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test_llr.py
|
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, LSTM
from alibi_detect.od import LLR
from alibi_detect.version import __version__
input_dim = 5
hidden_dim = 20
shape = (1000, 6)
X_train = np.zeros(shape, dtype=np.int32)
X_train[:, ::2] = 1
X_test = np.zeros(shape, dtype=np.int32)
X_test[:, ::2] = 2
X_val = np.concatenate([X_train[:50], X_test[:50]])
def loss_fn(y: tf.Tensor, x: tf.Tensor) -> tf.Tensor:
y = tf.one_hot(tf.cast(y, tf.int32), input_dim)
return tf.nn.softmax_cross_entropy_with_logits(y, x, axis=-1)
def likelihood_fn(y: tf.Tensor, x: tf.Tensor) -> tf.Tensor:
return - loss_fn(y, x)
threshold = [None]
threshold_perc = [50.]
return_instance_score = [True, False]
return_feature_score = [True, False]
outlier_type = ['instance', 'feature']
tests = list(product(threshold, threshold_perc, return_instance_score,
return_feature_score, outlier_type))
n_tests = len(tests)
@pytest.fixture
def llr_params(request):
return tests[request.param]
@pytest.mark.parametrize('llr_params', list(range(n_tests)), indirect=True)
def test_llr(llr_params):
# LLR parameters
threshold, threshold_perc, return_instance_score, return_feature_score, outlier_type = llr_params
# define model and detector
inputs = Input(shape=(shape[-1] - 1,), dtype=tf.int32)
x = tf.one_hot(tf.cast(inputs, tf.int32), input_dim)
x = LSTM(hidden_dim, return_sequences=True)(x)
logits = Dense(input_dim, activation=None)(x)
model = tf.keras.Model(inputs=inputs, outputs=logits)
od = LLR(threshold=threshold, sequential=True, model=model, log_prob=likelihood_fn)
assert od.threshold == threshold
assert od.meta == {'name': 'LLR', 'detector_type': 'outlier', 'data_type': None,
'online': False, 'version': __version__}
od.fit(
X_train,
loss_fn=loss_fn,
mutate_fn_kwargs={'rate': .5, 'feature_range': (0, input_dim)},
epochs=1,
verbose=False
)
od.infer_threshold(X_val, threshold_perc=threshold_perc)
# iscore_test = od.score(X_test)[1]
# iscore_train = od.score(X_train)[1]
# assert (iscore_test > iscore_train).all()
od_preds = od.predict(X_test,
return_instance_score=return_instance_score,
return_feature_score=return_feature_score,
outlier_type=outlier_type)
assert od_preds['meta'] == od.meta
if outlier_type == 'instance':
assert od_preds['data']['is_outlier'].shape == (X_test.shape[0],)
if return_instance_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['instance_score']
> od.threshold).astype(int).sum()
elif outlier_type == 'feature':
assert od_preds['data']['is_outlier'].shape == (X_test.shape[0], X_test.shape[1] - 1)
if return_feature_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['feature_score']
> od.threshold).astype(int).sum()
if return_feature_score:
assert od_preds['data']['feature_score'].shape == (X_test.shape[0], X_test.shape[1] - 1)
else:
assert od_preds['data']['feature_score'] is None
if return_instance_score:
assert od_preds['data']['instance_score'].shape == (X_test.shape[0],)
else:
assert od_preds['data']['instance_score'] is None
| 3,587 | 34.524752 | 101 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test_ae.py
|
from itertools import product
import numpy as np
import pytest
from sklearn.datasets import load_iris
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from alibi_detect.od import OutlierAE
from alibi_detect.version import __version__
threshold = [None, 5.]
threshold_perc = [90.]
return_instance_score = [True, False]
return_feature_score = [True, False]
outlier_perc = [50, 100]
outlier_type = ['instance', 'feature']
tests = list(product(threshold, threshold_perc, return_instance_score,
return_feature_score, outlier_perc, outlier_type))
n_tests = len(tests)
# load iris data
X, y = load_iris(return_X_y=True)
X = X.astype(np.float32)
input_dim = X.shape[1]
encoding_dim = 1
@pytest.fixture
def ae_params(request):
return tests[request.param]
@pytest.mark.parametrize('ae_params', list(range(n_tests)), indirect=True)
def test_ae(ae_params):
# OutlierAE parameters
threshold, threshold_perc, return_instance_score, return_feature_score, outlier_perc, outlier_type = ae_params
# define encoder and decoder
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(input_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(encoding_dim, activation=None)
]
)
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(encoding_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(input_dim, activation=tf.nn.sigmoid)
]
)
# init OutlierAE
ae = OutlierAE(
threshold=threshold,
encoder_net=encoder_net,
decoder_net=decoder_net
)
assert ae.threshold == threshold
assert ae.meta == {'name': 'OutlierAE', 'detector_type': 'outlier', 'data_type': None,
'online': False, 'version': __version__}
# fit OutlierAE, infer threshold and compute scores
ae.fit(X, epochs=5, verbose=False)
ae.infer_threshold(X, threshold_perc=threshold_perc)
fscore, iscore = ae.score(X)
perc_score = 100 * (iscore < ae.threshold).astype(int).sum() / iscore.shape[0]
assert threshold_perc + 5 > perc_score > threshold_perc - 5
# make and check predictions
od_preds = ae.predict(X,
return_instance_score=return_instance_score,
return_feature_score=return_feature_score,
outlier_type=outlier_type,
outlier_perc=outlier_perc
)
assert od_preds['meta'] == ae.meta
if outlier_type == 'instance':
assert od_preds['data']['is_outlier'].shape == (X.shape[0],)
if return_instance_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['instance_score']
> ae.threshold).astype(int).sum()
elif outlier_type == 'feature':
assert od_preds['data']['is_outlier'].shape == X.shape
if return_feature_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['feature_score']
> ae.threshold).astype(int).sum()
if return_feature_score:
assert od_preds['data']['feature_score'].shape == X.shape
else:
assert od_preds['data']['feature_score'] is None
if return_instance_score:
assert od_preds['data']['instance_score'].shape == (X.shape[0],)
else:
assert od_preds['data']['instance_score'] is None
| 3,537 | 33.349515 | 114 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test_vae.py
|
from itertools import product
import numpy as np
import pytest
from sklearn.datasets import load_iris
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from alibi_detect.od import OutlierVAE
from alibi_detect.models.tensorflow.losses import elbo
from alibi_detect.version import __version__
threshold = [None, 5.]
score_type = ['mse']
samples = [10]
loss_fn = [elbo, tf.keras.losses.mse]
threshold_perc = [90.]
return_instance_score = [True, False]
return_feature_score = [True, False]
outlier_perc = [50, 100]
outlier_type = ['instance', 'feature']
tests = list(product(threshold, score_type, samples, loss_fn, threshold_perc,
return_instance_score, return_feature_score, outlier_perc, outlier_type))
n_tests = len(tests)
# load iris data
X, y = load_iris(return_X_y=True)
X = X.astype(np.float32)
input_dim = X.shape[1]
latent_dim = 2
@pytest.fixture
def vae_params(request):
return tests[request.param]
@pytest.mark.parametrize('vae_params', list(range(n_tests)), indirect=True)
def test_vae(vae_params):
# OutlierVAE parameters
threshold, score_type, samples, loss_fn, threshold_perc, return_instance_score, \
return_feature_score, outlier_perc, outlier_type = vae_params
# define encoder and decoder
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(input_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(latent_dim, activation=None)
]
)
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(input_dim, activation=tf.nn.sigmoid)
]
)
# init OutlierVAE
vae = OutlierVAE(
threshold=threshold,
score_type=score_type,
encoder_net=encoder_net,
decoder_net=decoder_net,
latent_dim=latent_dim,
samples=samples
)
assert vae.threshold == threshold
assert vae.meta == {'name': 'OutlierVAE', 'detector_type': 'outlier', 'data_type': None,
'online': False, 'version': __version__}
# fit OutlierVAE, infer threshold and compute scores
vae.fit(X, loss_fn=loss_fn, epochs=5, verbose=False)
vae.infer_threshold(X, threshold_perc=threshold_perc)
fscore, iscore = vae.score(X)
perc_score = 100 * (iscore < vae.threshold).astype(int).sum() / iscore.shape[0]
assert threshold_perc + 5 > perc_score > threshold_perc - 5
# make and check predictions
od_preds = vae.predict(X,
return_instance_score=return_instance_score,
return_feature_score=return_feature_score,
outlier_type=outlier_type,
outlier_perc=outlier_perc
)
assert od_preds['meta'] == vae.meta
if outlier_type == 'instance':
assert od_preds['data']['is_outlier'].shape == (X.shape[0],)
if return_instance_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['instance_score']
> vae.threshold).astype(int).sum()
elif outlier_type == 'feature':
assert od_preds['data']['is_outlier'].shape == X.shape
if return_feature_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['feature_score']
> vae.threshold).astype(int).sum()
if return_feature_score:
assert od_preds['data']['feature_score'].shape == X.shape
else:
assert od_preds['data']['feature_score'] is None
if return_instance_score:
assert od_preds['data']['instance_score'].shape == (X.shape[0],)
else:
assert od_preds['data']['instance_score'] is None
| 3,862 | 33.801802 | 94 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test_iforest.py
|
from itertools import product
import pytest
from sklearn.datasets import load_iris
from alibi_detect.od import IForest
from alibi_detect.version import __version__
threshold = [None, 0.]
threshold_perc = [75., 95.]
return_instance_score = [True, False]
tests = list(product(threshold, threshold_perc, return_instance_score))
n_tests = len(tests)
@pytest.fixture
def iforest_params(request):
threshold, threshold_perc, return_instance_score = tests[request.param]
return threshold, threshold_perc, return_instance_score
@pytest.mark.parametrize('iforest_params', list(range(n_tests)), indirect=True)
def test_isolation_forest(iforest_params):
threshold, threshold_perc, return_instance_score = iforest_params
X, y = load_iris(return_X_y=True)
iforest = IForest(threshold)
assert iforest.threshold == threshold
assert iforest.meta == {'name': 'IForest', 'detector_type': 'outlier', 'data_type': 'tabular',
'online': False, 'version': __version__}
iforest.fit(X)
iforest.infer_threshold(X, threshold_perc=threshold_perc)
iscore = iforest.score(X)
perc_score = 100 * (iscore < iforest.threshold).astype(int).sum() / iscore.shape[0]
assert threshold_perc + 5 > perc_score > threshold_perc - 5
od_preds = iforest.predict(X, return_instance_score=return_instance_score)
assert od_preds['meta'] == iforest.meta
assert od_preds['data']['is_outlier'].sum() == (iscore > iforest.threshold).astype(int).sum()
if not return_instance_score:
assert od_preds['data']['instance_score'] is None
| 1,583 | 39.615385 | 98 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test_seq2seq.py
|
from itertools import product
import numpy as np
import pytest
from alibi_detect.od import OutlierSeq2Seq
from alibi_detect.utils.perturbation import inject_outlier_ts
from alibi_detect.version import __version__
n_features = [1, 2]
seq_len = [20, 50]
threshold = [None, 5.]
threshold_perc = [90.]
return_instance_score = [True, False]
return_feature_score = [True, False]
outlier_perc = [100]
outlier_type = ['instance', 'feature']
tests = list(product(n_features, seq_len, threshold, threshold_perc,
return_instance_score, return_feature_score, outlier_perc, outlier_type))
n_tests = len(tests)
latent_dim = 20
@pytest.fixture
def seq2seq_params(request):
return tests[request.param]
@pytest.mark.parametrize('seq2seq_params', list(range(n_tests)), indirect=True)
def test_seq2seq(seq2seq_params):
# OutlierSeq2Seq parameters
n_features, seq_len, threshold, threshold_perc, return_instance_score, \
return_feature_score, outlier_perc, outlier_type = seq2seq_params
# create artificial sine time series
X = np.sin(np.linspace(-50, 50, 10000)).astype(np.float32).reshape((-1, n_features))
# create outliers for threshold and detection
X_threshold = inject_outlier_ts(X, perc_outlier=100 - threshold_perc, perc_window=10, n_std=10., min_std=9.).data
X_outlier = inject_outlier_ts(X, perc_outlier=100 - threshold_perc, perc_window=10, n_std=10., min_std=9.).data
# define architecture
od = OutlierSeq2Seq(n_features, seq_len, threshold=threshold, latent_dim=latent_dim)
if threshold is None:
assert od.threshold == 0.
else:
assert od.threshold == threshold
assert od.meta == {'name': 'OutlierSeq2Seq', 'detector_type': 'outlier', 'data_type': 'time-series',
'online': False, 'version': __version__}
# fit OutlierSeq2Seq
od.fit(X, epochs=2, verbose=False)
# create some outliers and infer threshold
od.infer_threshold(X_threshold, threshold_perc=threshold_perc)
# compute scores and check ranges
fscore, iscore = od.score(X_threshold, outlier_perc=outlier_perc)
if isinstance(od.threshold, np.ndarray):
perc_score = 100 * (fscore < 0).astype(int).sum() / iscore.shape[0] / n_features
assert threshold_perc + 5 > perc_score > threshold_perc - 5
# create outliers and make predictions
od_preds = od.predict(X_outlier,
return_instance_score=return_instance_score,
return_feature_score=return_feature_score,
outlier_type=outlier_type,
outlier_perc=outlier_perc)
assert od_preds['meta'] == od.meta
if outlier_type == 'instance':
assert od_preds['data']['is_outlier'].shape == (X.shape[0],)
if return_instance_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['instance_score'] > 0).astype(int).sum()
elif outlier_type == 'feature':
assert od_preds['data']['is_outlier'].shape == X.shape
if return_feature_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['feature_score'] > 0).astype(int).sum()
if return_feature_score:
assert od_preds['data']['feature_score'].shape == X.shape
else:
assert od_preds['data']['feature_score'] is None
if return_instance_score:
assert od_preds['data']['instance_score'].shape == (X.shape[0],)
else:
assert od_preds['data']['instance_score'] is None
| 3,525 | 38.617978 | 117 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test_ensemble.py
|
import pytest
import torch
from alibi_detect.od.pytorch import ensemble
from alibi_detect.exceptions import NotFittedError
def test_pval_normalizer():
"""Test the PValNormalizer
- Test the PValNormalizer correctly normalizes data passed to it
- Test the PValNormalizer throws the correct errors if not fit
"""
normalizer = ensemble.PValNormalizer()
x = torch.randn(3, 10)
x_ref = torch.randn(64, 10)
# unfit normalizer raises exception
with pytest.raises(NotFittedError) as err:
normalizer(x)
assert err.value.args[0] == 'PValNormalizer has not been fit!'
normalizer.fit(x_ref)
x_norm = normalizer(x)
# compute the p-values explicitly and compare to the normalizer
# output.
assert torch.all(0 < x_norm)
assert torch.all(x_norm < 1)
for i in range(3):
for j in range(10):
comp_pval = ((x_ref[:, j] > x[i][j]).to(torch.float32)).sum() + 1
comp_pval /= (x_ref.shape[0] + 1)
normalizer_pval = x_norm[i][j].to(torch.float32)
assert torch.isclose(1 - comp_pval, normalizer_pval, atol=1e-4)
# Test the scriptability of the normalizer
normalizer = torch.jit.script(normalizer)
x_norm_2 = normalizer(x)
assert torch.all(x_norm_2 == x_norm)
def test_shift_and_scale_normalizer():
"""Test the ShiftAndScaleNormalizer
- Test the ShiftAndScaleNormalizer correctly normalizes data passed to it
- Test the ShiftAndScaleNormalizer throws the correct errors if not fit.
"""
normalizer = ensemble.ShiftAndScaleNormalizer()
x = torch.randn(3, 10) * 3 + 2
x_ref = torch.randn(5000, 10) * 3 + 2
# unfit normalizer raises exception
with pytest.raises(NotFittedError) as err:
normalizer(x)
assert err.value.args[0] == 'ShiftAndScaleNormalizer has not been fit!'
# test the normalizer correctly shifts and scales the data
normalizer.fit(x_ref)
x_norm = normalizer(x)
assert torch.isclose(x_norm.mean(), torch.tensor(0.), atol=0.1)
assert torch.isclose(x_norm.std(), torch.tensor(1.), atol=0.1)
# Test the scriptability of the normalizer
normalizer = torch.jit.script(normalizer)
x_norm_2 = normalizer(x)
assert torch.all(x_norm_2 == x_norm)
def test_average_aggregator():
"""Test the AverageAggregator
- Test the AverageAggregator correctly aggregates data passed to it.
- Test the AverageAggregator can be torch scripted
"""
aggregator = ensemble.AverageAggregator()
scores = torch.randn((3, 10))
# test the aggregator correctly averages the scores
aggregated_scores = aggregator(scores)
assert torch.all(torch.isclose(aggregated_scores, scores.mean(dim=1)))
assert aggregated_scores.shape == (3, )
# test the scriptability of the aggregator
aggregator = torch.jit.script(aggregator)
aggregated_scores_2 = aggregator(scores)
assert torch.all(aggregated_scores_2 == aggregated_scores)
def test_weighted_average_aggregator():
"""Test the AverageAggregator
- Test the AverageAggregator correctly aggregates data passed to it
- Test the AverageAggregator throws an error if the weights are not valid
- Test the AverageAggregator can be torch scripted
"""
weights = abs(torch.randn((10)))
with pytest.raises(ValueError) as err:
aggregator = ensemble.AverageAggregator(weights=weights)
assert err.value.args[0] == 'Weights must sum to 1.'
# test the aggregator correctly weights the scores when computing the
# average
weights /= weights.sum()
aggregator = ensemble.AverageAggregator(weights=weights)
scores = torch.randn((3, 10))
aggregated_scores = aggregator(scores)
torch.allclose(aggregated_scores, (weights @ scores.T))
assert aggregated_scores.shape == (3, )
# test the scriptability of the aggregator
aggregator = torch.jit.script(aggregator)
aggregated_scores_2 = aggregator(scores)
assert torch.all(aggregated_scores_2 == aggregated_scores)
def test_topk_aggregator():
"""Test the TopKAggregator
- Test the TopKAggregator correctly aggregates data passed to it
- Test the TopKAggregator can be torch scripted
"""
aggregator = ensemble.TopKAggregator(k=4)
scores = torch.randn((3, 10))
# test the aggregator correctly computes the top k scores
aggregated_scores = aggregator(scores)
assert aggregated_scores.shape == (3, )
scores_sorted, _ = torch.sort(scores)
torch.allclose(scores_sorted[:, -4:].mean(dim=1), aggregated_scores)
# test the scriptability of the aggregator
aggregator = torch.jit.script(aggregator)
aggregated_scores_2 = aggregator(scores)
assert torch.all(aggregated_scores_2 == aggregated_scores)
def test_max_aggregator():
"""Test the MaxAggregator
- Test the MaxAggregator correctly aggregates data passed to it
- Test the MaxAggregator can be torch scripted
"""
aggregator = ensemble.MaxAggregator()
scores = torch.randn((3, 10))
# test the aggregator correctly computes the max scores
aggregated_scores = aggregator(scores)
assert aggregated_scores.shape == (3, )
max_vals, _ = scores.max(dim=1)
torch.all(max_vals == aggregated_scores)
# test the scriptability of the aggregator
aggregator = torch.jit.script(aggregator)
aggregated_scores_2 = aggregator(scores)
assert torch.all(aggregated_scores_2 == aggregated_scores)
def test_min_aggregator():
"""Test the MinAggregator
- Test the MinAggregator correctly aggregates data passed to it
- Test the MinAggregator can be torch scripted
"""
aggregator = ensemble.MinAggregator()
scores = torch.randn((3, 10))
# test the aggregator correctly computes the min scores
aggregated_scores = aggregator(scores)
assert aggregated_scores.shape == (3, )
min_vals, _ = scores.min(dim=1)
torch.all(min_vals == aggregated_scores)
# test the scriptability of the aggregator
aggregator = torch.jit.script(aggregator)
aggregated_scores_2 = aggregator(scores)
assert torch.all(aggregated_scores_2 == aggregated_scores)
@pytest.mark.parametrize('aggregator', ['AverageAggregator', 'MaxAggregator', 'MinAggregator', 'TopKAggregator'])
@pytest.mark.parametrize('normalizer', ['PValNormalizer', 'ShiftAndScaleNormalizer'])
def test_ensembler(aggregator, normalizer):
"""Test the Ensembler for each combination of aggregator and normalizer
- Test the ensembler correctly aggregates and normalizes the scores
- Test the ensembler can be torch scripted
"""
aggregator = getattr(ensemble, aggregator)()
normalizer = getattr(ensemble, normalizer)()
ensembler = ensemble.Ensembler(aggregator=aggregator, normalizer=normalizer)
x = torch.randn(3, 10)
x_ref = torch.randn(64, 10)
# test the ensembler correctly aggregates and normalizes the scores
ensembler.fit(x_ref)
x_norm = ensembler(x)
# test the scriptability of the ensembler
ensembler = torch.jit.script(ensembler)
x_norm_2 = ensembler(x)
assert torch.all(x_norm_2 == x_norm)
| 7,122 | 34.08867 | 113 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test_prophet.py
|
import datetime
from itertools import product
import numpy as np
import pandas as pd
import pytest
from alibi_detect.od import OutlierProphet
from alibi_detect.version import __version__
growth = ['linear', 'logistic']
return_instance_score = [True, False]
return_forecast = [True, False]
d_fit = {
'ds': pd.date_range(datetime.date.today(), periods=100),
'y': np.random.randn(100)
}
df_fit = pd.DataFrame(data=d_fit)
d_test = {
'ds': pd.date_range(d_fit['ds'][-1] + datetime.timedelta(1), periods=100),
'y': np.random.randn(100)
}
df_test = pd.DataFrame(data=d_test)
tests = list(product(growth, return_instance_score, return_forecast))
n_tests = len(tests)
@pytest.fixture
def prophet_params(request):
return tests[request.param]
@pytest.mark.parametrize('prophet_params', list(range(n_tests)), indirect=True)
def test_prophet(prophet_params):
prophet = pytest.importorskip('prophet', reason="Prophet tests skipped as Prophet not installed")
growth, return_instance_score, return_forecast = prophet_params
od = OutlierProphet(growth=growth)
assert isinstance(od.model, prophet.forecaster.Prophet)
assert od.meta == {'name': 'OutlierProphet', 'detector_type': 'outlier', 'data_type': 'time-series',
'online': False, 'version': __version__}
if growth == 'logistic':
df_fit['cap'] = 10.
df_test['cap'] = 10.
od.fit(df_fit)
forecast = od.score(df_test)
fcst_cols = list(forecast.columns)
check_cols = ['ds', 'yhat', 'yhat_lower', 'yhat_upper', 'score', 'y']
assert all(check_col in fcst_cols for check_col in check_cols)
od_preds = od.predict(df_test,
return_instance_score=return_instance_score,
return_forecast=return_forecast)
assert od_preds['meta'] == od.meta
assert (od_preds['data']['is_outlier']['ds'] == df_test['ds']).all()
assert od_preds['data']['is_outlier']['is_outlier'].shape[0] == df_test['ds'].shape[0]
if not return_instance_score:
assert od_preds['data']['instance_score'] is None
if not return_forecast:
with pytest.raises(KeyError):
od_preds['data']['forecast']
| 2,197 | 35.032787 | 104 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test_sr.py
|
import pytest
import numpy as np
from alibi_detect.od import SpectralResidual
from alibi_detect.version import __version__
@pytest.fixture(scope='module')
def signal():
np.random.seed(0)
# create normal time series and one with perturbations
t = np.linspace(0, 0.5, 1000)
X = np.sin(40 * 2 * np.pi * t) + 0.5 * np.sin(90 * 2 * np.pi * t)
idx_pert = np.random.choice(np.arange(1000), size=10, replace=False) # ensure we perturb exactly 10 points
X_pert = X.copy()
X_pert[idx_pert] = 10
return {"t": t, "X": X, "X_pert": X_pert}
@pytest.mark.parametrize('window_amp', [10, 20])
@pytest.mark.parametrize('window_local', [20, 30])
@pytest.mark.parametrize('n_est_points', [10, 20])
@pytest.mark.parametrize('return_instance_score', [True, False])
def test_detector(signal, window_amp, window_local, n_est_points, return_instance_score):
t, X, X_pert = signal["t"], signal['X'], signal['X_pert']
threshold = 6
od = SpectralResidual(threshold=threshold, window_amp=window_amp,
window_local=window_local, n_est_points=n_est_points)
assert od.threshold == threshold
assert od.meta == {'name': 'SpectralResidual',
'detector_type': 'outlier',
'data_type': 'time-series',
'online': True,
'version': __version__}
preds_in = od.predict(X, t, return_instance_score=return_instance_score)
assert preds_in['data']['is_outlier'].sum() <= 2.
if return_instance_score:
assert preds_in['data']['is_outlier'].sum() == (preds_in['data']['instance_score']
> od.threshold).astype(int).sum()
else:
assert preds_in['data']['instance_score'] is None
preds_out = od.predict(X_pert, t, return_instance_score=return_instance_score)
assert preds_out['data']['is_outlier'].sum() >= 10 # check if we detect at least the number of perturbed points
if return_instance_score:
assert preds_out['data']['is_outlier'].sum() == (preds_out['data']['instance_score']
> od.threshold).astype(int).sum()
else:
assert preds_out['data']['instance_score'] is None
assert preds_out['meta'] == od.meta
@pytest.mark.parametrize('method', ['constant', 'replicate', 'reflect'])
@pytest.mark.parametrize('side', ['left', 'right', 'bilateral'])
def test_padding(method, side):
np.random.seed(0)
for _ in range(100):
X_size = np.random.randint(low=10, high=1000)
W_size = np.random.randint(low=2, high=X_size - 1)
X = np.random.randint(low=0, high=10, size=X_size)
W = np.random.randint(low=0, high=10, size=W_size)
X_pad = SpectralResidual.pad_same(X=X, W=W, method=method, side=side)
X_conv = np.convolve(X_pad, W, 'valid')
assert X_conv.shape[0] == X_size
# length of the padding for laterals
pad_right = (W_size - 1) // 2
pad_left = W_size - 1 - pad_right
if method == 'constant':
if side == 'left':
assert np.all(X_pad[:W_size - 1] == 0)
elif side == 'right':
assert np.all(X_pad[-W_size + 1:] == 0)
else:
if pad_left > 0:
assert np.all(X_pad[:pad_left] == 0)
if pad_right > 0:
assert np.all(X_pad[-pad_right:] == 0)
elif method == 'replicate':
if side == 'left':
assert np.all(X_pad[:W_size - 1] == X[0])
elif side == 'right':
assert np.all(X_pad[-W_size + 1:] == X[-1])
else:
if pad_left > 0:
assert np.all(X_pad[:pad_left] == X[0])
if pad_right > 0:
assert np.all(X_pad[-pad_right:] == X[-1])
else:
if side == 'left':
assert np.all(X_pad[:W_size - 1] == X[1:W_size][::-1])
elif side == 'right':
assert np.all(X_pad[-W_size + 1:] == X[-2:-W_size - 1:-1])
else:
if pad_left > 0:
assert np.all(X_pad[:pad_left] == X[1:pad_left + 1][::-1])
if pad_right > 0:
assert np.all(X_pad[-pad_right:] == X[-pad_right - 1:-1][::-1])
| 4,371 | 40.245283 | 116 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test_vaegmm.py
|
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from alibi_detect.od import OutlierVAEGMM
from alibi_detect.version import __version__
threshold = [None, 5.]
n_gmm = [1, 2]
w_energy = [.1, .5]
w_recon = [0., 1e-7]
samples = [1, 10]
threshold_perc = [90.]
return_instance_score = [True, False]
tests = list(product(threshold, n_gmm, w_energy, w_recon, samples, threshold_perc, return_instance_score))
n_tests = len(tests)
# load and preprocess MNIST data
(X_train, _), (X_test, _) = tf.keras.datasets.mnist.load_data()
X = X_train.reshape(X_train.shape[0], -1)[:1000] # only train on 1000 instances
X = X.astype(np.float32)
X /= 255
input_dim = X.shape[1]
latent_dim = 2
@pytest.fixture
def vaegmm_params(request):
return tests[request.param]
@pytest.mark.parametrize('vaegmm_params', list(range(n_tests)), indirect=True)
def test_vaegmm(vaegmm_params):
# OutlierVAEGMM parameters
threshold, n_gmm, w_energy, w_recon, samples, threshold_perc, return_instance_score = vaegmm_params
# define encoder, decoder and GMM density net
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(input_dim,)),
Dense(128, activation=tf.nn.relu),
Dense(latent_dim, activation=None)
]
)
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim,)),
Dense(128, activation=tf.nn.relu),
Dense(input_dim, activation=tf.nn.sigmoid)
]
)
gmm_density_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim + 2,)),
Dense(10, activation=tf.nn.relu),
Dense(n_gmm, activation=tf.nn.softmax)
]
)
# init OutlierAEGMM
vaegmm = OutlierVAEGMM(
threshold=threshold,
encoder_net=encoder_net,
decoder_net=decoder_net,
gmm_density_net=gmm_density_net,
n_gmm=n_gmm,
latent_dim=latent_dim,
samples=samples
)
assert vaegmm.threshold == threshold
assert vaegmm.meta == {'name': 'OutlierVAEGMM', 'detector_type': 'outlier', 'data_type': None,
'online': False, 'version': __version__}
# fit OutlierAEGMM, infer threshold and compute scores
vaegmm.fit(X, w_recon=w_recon, w_energy=w_energy, epochs=5, batch_size=1000, verbose=False)
vaegmm.infer_threshold(X, threshold_perc=threshold_perc)
energy = vaegmm.score(X)
perc_score = 100 * (energy < vaegmm.threshold).astype(int).sum() / energy.shape[0]
assert threshold_perc + 5 > perc_score > threshold_perc - 5
# make and check predictions
od_preds = vaegmm.predict(X, return_instance_score=return_instance_score)
assert od_preds['meta'] == vaegmm.meta
assert od_preds['data']['is_outlier'].shape == (X.shape[0],)
if return_instance_score:
assert od_preds['data']['is_outlier'].sum() == (od_preds['data']['instance_score']
> vaegmm.threshold).astype(int).sum()
else:
assert od_preds['data']['instance_score'] is None
| 3,175 | 32.083333 | 106 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test__gmm/test__gmm_sklearn_backend.py
|
import pytest
import numpy as np
from alibi_detect.od.sklearn.gmm import GMMSklearn
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
def test_gmm_sklearn_scoring():
"""Test GMM detector sklearn scoring method.
Tests the scoring method of the GMM sklearn backend detector.
"""
gmm_sklearn = GMMSklearn(n_components=2)
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = np.random.multivariate_normal(mean, cov, 1000)
gmm_sklearn.fit(x_ref)
x_1 = np.array([[8., 8.]])
scores_1 = gmm_sklearn.score(x_1)
x_2 = np.random.multivariate_normal(mean, cov, 1)
scores_2 = gmm_sklearn.score(x_2)
x_3 = np.array([[-10., 10.]])
scores_3 = gmm_sklearn.score(x_3)
# test correct ordering of scores given outlyingness of data
assert scores_1 < scores_2 < scores_3
# test that detector correctly detects true outlier
gmm_sklearn.infer_threshold(x_ref, 0.01)
x = np.concatenate((x_1, x_2, x_3))
outputs = gmm_sklearn.predict(x)
assert np.all(outputs.is_outlier == np.array([False, False, True]))
assert np.all(gmm_sklearn(x) == np.array([False, False, True]))
# test that 0.01 of the in distribution data is flagged as outliers
x = np.random.multivariate_normal(mean, cov, 1000)
outputs = gmm_sklearn.predict(x)
assert (outputs.is_outlier.sum()/1000) - 0.01 < 0.01
def test_gmm_sklearn_backend_fit_errors():
"""Test gmm detector sklearn backend fit errors.
Tests the correct errors are raised when using the GMMSklearn backend detector.
"""
gmm_sklearn = GMMSklearn(n_components=2)
assert not gmm_sklearn.fitted
# Test that the backend raises an error if it is not fitted before
# calling forward method.
x = np.random.randn(1, 10)
with pytest.raises(NotFittedError) as err:
gmm_sklearn(x)
assert str(err.value) == 'GMMSklearn has not been fit!'
# Test that the backend raises an error if it is not fitted before
# predicting.
with pytest.raises(NotFittedError) as err:
gmm_sklearn.predict(x)
assert str(err.value) == 'GMMSklearn has not been fit!'
# Test the backend updates _fitted flag on fit.
x_ref = np.random.randn(1024, 10)
gmm_sklearn.fit(x_ref)
assert gmm_sklearn.fitted
# Test that the backend raises an if the forward method is called without the
# threshold being inferred.
with pytest.raises(ThresholdNotInferredError) as err:
gmm_sklearn(x)
assert str(err.value) == 'GMMSklearn has no threshold set, call `infer_threshold` to fit one!'
# Test that the backend can call predict without the threshold being inferred.
assert gmm_sklearn.predict(x)
def test_gmm_sklearn_fit():
"""Test GMM detector sklearn backend fit method.
Tests the scoring method of the GMMSklearn backend detector.
"""
gmm_sklearn = GMMSklearn(n_components=1)
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = np.random.multivariate_normal(mean, cov, 1000)
fit_results = gmm_sklearn.fit(x_ref, tol=0.01)
assert fit_results['converged']
assert fit_results['n_iter'] < 10
assert fit_results['lower_bound'] < 1
| 3,185 | 33.258065 | 98 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test__gmm/test__gmm_pytorch_backend.py
|
import pytest
import numpy as np
import torch
from alibi_detect.od.pytorch.gmm import GMMTorch
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
def test_gmm_pytorch_scoring():
"""Test GMM detector pytorch scoring method.
Tests the scoring method of the GMMTorch pytorch backend detector.
"""
gmm_torch = GMMTorch(n_components=1)
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
gmm_torch.fit(x_ref)
x_1 = torch.tensor(np.array([[8., 8.]]))
scores_1 = gmm_torch.score(x_1)
x_2 = torch.tensor(np.random.multivariate_normal(mean, cov, 1))
scores_2 = gmm_torch.score(x_2)
x_3 = torch.tensor(np.array([[-10., 10.]]))
scores_3 = gmm_torch.score(x_3)
# test correct ordering of scores given outlyingness of data
assert scores_1 < scores_2 < scores_3
# test that detector correctly detects true outlier
gmm_torch.infer_threshold(x_ref, 0.01)
x = torch.cat((x_1, x_2, x_3))
outputs = gmm_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, False, True]))
assert torch.all(gmm_torch(x) == torch.tensor([False, False, True]))
# test that 0.01 of the in distribution data is flagged as outliers
x = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
outputs = gmm_torch.predict(x)
assert (outputs.is_outlier.sum()/1000) - 0.01 < 0.01
def test_gmm_torch_backend_ts(tmp_path):
"""Test GMM detector backend is torch-scriptable and savable."""
gmm_torch = GMMTorch(n_components=2)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
gmm_torch.fit(x_ref)
gmm_torch.infer_threshold(x_ref, 0.1)
pred_1 = gmm_torch(x)
gmm_torch = torch.jit.script(gmm_torch)
pred_2 = gmm_torch(x)
assert torch.all(pred_1 == pred_2)
gmm_torch.save(tmp_path / 'gmm_torch.pt')
gmm_torch = torch.load(tmp_path / 'gmm_torch.pt')
pred_2 = gmm_torch(x)
assert torch.all(pred_1 == pred_2)
def test_gmm_pytorch_backend_fit_errors():
"""Test gmm detector pytorch backend fit errors.
Tests the correct errors are raised when using the GMMTorch pytorch backend detector.
"""
gmm_torch = GMMTorch(n_components=2)
assert not gmm_torch.fitted
# Test that the backend raises an error if it is not fitted before
# calling forward method.
x = torch.tensor(np.random.randn(1, 10))
with pytest.raises(NotFittedError) as err:
gmm_torch(x)
assert str(err.value) == 'GMMTorch has not been fit!'
# Test that the backend raises an error if it is not fitted before
# predicting.
with pytest.raises(NotFittedError) as err:
gmm_torch.predict(x)
assert str(err.value) == 'GMMTorch has not been fit!'
# Test the backend updates _fitted flag on fit.
x_ref = torch.tensor(np.random.randn(1024, 10))
gmm_torch.fit(x_ref)
assert gmm_torch.fitted
# Test that the backend raises an if the forward method is called without the
# threshold being inferred.
with pytest.raises(ThresholdNotInferredError) as err:
gmm_torch(x)
assert str(err.value) == 'GMMTorch has no threshold set, call `infer_threshold` to fit one!'
# Test that the backend can call predict without the threshold being inferred.
assert gmm_torch.predict(x)
def test_gmm_pytorch_fit():
"""Test GMM detector pytorch fit method.
Tests pytorch detector checks for convergence and stops early if it does.
"""
gmm_torch = GMMTorch(n_components=1)
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
fit_results = gmm_torch.fit(x_ref, tol=0.01)
assert fit_results['converged']
assert fit_results['n_epochs'] < 10
assert fit_results['lower_bound'] < 1
| 3,906 | 33.575221 | 96 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test__gmm/test__gmm.py
|
import pytest
import numpy as np
import torch
from alibi_detect.od._gmm import GMM
from alibi_detect.exceptions import NotFittedError
from sklearn.datasets import make_moons
@pytest.mark.parametrize('backend', ['pytorch', 'sklearn'])
def test_unfitted_gmm_score(backend):
"""Test GMM detector raises exceptions when not fitted."""
gmm_detector = GMM(n_components=2, backend=backend)
x = np.array([[0, 10], [0.1, 0]])
x_ref = np.random.randn(100, 2)
with pytest.raises(NotFittedError) as err:
gmm_detector.infer_threshold(x_ref, 0.1)
assert str(err.value) == 'GMM has not been fit!'
with pytest.raises(NotFittedError) as err:
gmm_detector.score(x)
assert str(err.value) == 'GMM has not been fit!'
# test predict raises exception when not fitted
with pytest.raises(NotFittedError) as err:
gmm_detector.predict(x)
assert str(err.value) == 'GMM has not been fit!'
@pytest.mark.parametrize('backend', ['pytorch', 'sklearn'])
def test_fitted_gmm_score(backend):
"""Test GMM detector score method.
Test GMM detector that has been fitted on reference data but has not had a threshold
inferred can still score data using the predict method. Test that it does not raise an error
but does not return `threshold`, `p_value` and `is_outlier` values.
"""
gmm_detector = GMM(n_components=1, backend=backend)
x_ref = np.random.randn(100, 2)
gmm_detector.fit(x_ref)
x = np.array([[0, 10], [0.1, 0]])
scores = gmm_detector.score(x)
y = gmm_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > 5
assert y['instance_score'][1] < 2
assert all(y['instance_score'] == scores)
assert not y['threshold_inferred']
assert y['threshold'] is None
assert y['is_outlier'] is None
assert y['p_value'] is None
@pytest.mark.parametrize('backend', ['pytorch', 'sklearn'])
def test_fitted_gmm_predict(backend):
"""Test GMM detector predict method.
Test GMM detector that has been fitted on reference data and has had a threshold
inferred can score data using the predict method as well as predict outliers. Test that it
returns `threshold`, `p_value` and `is_outlier` values.
"""
gmm_detector = GMM(n_components=1, backend=backend)
x_ref = np.random.randn(100, 2)
gmm_detector.fit(x_ref)
gmm_detector.infer_threshold(x_ref, 0.1)
x = np.array([[0, 10], [0, 0.1]])
y = gmm_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > 5
assert y['instance_score'][1] < 2
assert y['threshold_inferred']
assert y['threshold'] is not None
assert y['p_value'].all()
assert (y['is_outlier'] == [True, False]).all()
@pytest.mark.parametrize('backend', ['pytorch', 'sklearn'])
def test_gmm_integration(backend):
"""Test GMM detector on moons dataset.
Test GMM detector on a more complex 2d example. Test that the detector can be fitted
on reference data and infer a threshold. Test that it differentiates between inliers and outliers.
"""
gmm_detector = GMM(n_components=8, backend=backend)
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
gmm_detector.fit(X_ref)
gmm_detector.infer_threshold(X_ref, 0.1)
result = gmm_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[-1, 1.5]])
result = gmm_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
def test_gmm_torchscript(tmp_path):
"""Tests user can torch-script gmm detector."""
gmm_detector = GMM(n_components=8, backend='pytorch')
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
gmm_detector.fit(X_ref)
gmm_detector.infer_threshold(X_ref, 0.1)
x_outlier = np.array([[-1, 1.5]])
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
ts_gmm = torch.jit.script(gmm_detector.backend)
y = ts_gmm(x)
assert torch.all(y == torch.tensor([False, True]))
ts_gmm.save(tmp_path / 'gmm.pt')
ts_gmm = torch.load(tmp_path / 'gmm.pt')
y = ts_gmm(x)
assert torch.all(y == torch.tensor([False, True]))
@pytest.mark.parametrize('backend', ['pytorch', 'sklearn'])
def test_gmm_fit(backend):
"""Test GMM detector fit method.
Tests detector checks for convergence and stops early if it does.
"""
gmm = GMM(n_components=1, backend=backend)
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
fit_results = gmm.fit(x_ref, tol=0.01, batch_size=32)
assert isinstance(fit_results['lower_bound'], float)
assert fit_results['converged']
assert fit_results['lower_bound'] < 1
| 4,890 | 34.963235 | 102 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test__lof/test__lof_backend.py
|
import pytest
import torch
from alibi_detect.od.pytorch.lof import LOFTorch
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.od.pytorch.ensemble import Ensembler, PValNormalizer, AverageAggregator
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
@pytest.fixture(scope='function')
def ensembler(request):
return Ensembler(
normalizer=PValNormalizer(),
aggregator=AverageAggregator()
)
def test_lof_torch_backend():
"""
Test the lof torch backend can be correctly initialized, fit and used to
predict outliers.
"""
lof_torch = LOFTorch(k=5)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
lof_torch.fit(x_ref)
outputs = lof_torch.predict(x)
assert outputs.instance_score.shape == (3, )
assert outputs.is_outlier is None
assert outputs.p_value is None
scores = lof_torch.score(x)
assert torch.all(scores == outputs.instance_score)
lof_torch.infer_threshold(x_ref, 0.1)
outputs = lof_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, False, True]))
assert torch.all(lof_torch(x) == torch.tensor([False, False, True]))
def test_lof_torch_backend_ensemble(ensembler):
"""
Test the lof torch backend can be correctly initialized as an ensemble, fit
on data and used to predict outliers.
"""
lof_torch = LOFTorch(k=[4, 5], ensembler=ensembler)
x_ref = torch.randn((1024, 10))
lof_torch.fit(x_ref)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
lof_torch.infer_threshold(x_ref, 0.1)
outputs = lof_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, False, True]))
assert torch.all(lof_torch(x) == torch.tensor([False, False, True]))
def test_lof_torch_backend_ensemble_ts(tmp_path, ensembler):
"""
Test the lof torch backend can be initialized as an ensemble and
torch scripted, as well as saved and loaded to and from disk.
"""
lof_torch = LOFTorch(k=[4, 5], ensembler=ensembler)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
lof_torch.fit(x_ref)
lof_torch.infer_threshold(x_ref, 0.1)
pred_1 = lof_torch(x)
lof_torch = torch.jit.script(lof_torch)
pred_2 = lof_torch(x)
assert torch.all(pred_1 == pred_2)
lof_torch.save(tmp_path / 'lof_torch.pt')
lof_torch = torch.load(tmp_path / 'lof_torch.pt')
pred_2 = lof_torch(x)
assert torch.all(pred_1 == pred_2)
def test_lof_torch_backend_ts(tmp_path):
"""
Test the lof torch backend can be initialized and torch scripted, as well as
saved and loaded to and from disk.
"""
lof_torch = LOFTorch(k=7)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
lof_torch.fit(x_ref)
lof_torch.infer_threshold(x_ref, 0.1)
pred_1 = lof_torch(x)
lof_torch = torch.jit.script(lof_torch)
pred_2 = lof_torch(x)
assert torch.all(pred_1 == pred_2)
lof_torch.save(tmp_path / 'lof_torch.pt')
lof_torch = torch.load(tmp_path / 'lof_torch.pt')
pred_2 = lof_torch(x)
assert torch.all(pred_1 == pred_2)
def test_lof_kernel(ensembler):
"""
Test the lof torch backend can be correctly initialized with a kernel, fit
on data and used to predict outliers.
"""
kernel = GaussianRBF(sigma=torch.tensor((1)))
lof_torch = LOFTorch(k=[4, 5], kernel=kernel, ensembler=ensembler)
x_ref = torch.randn((1024, 10))
lof_torch.fit(x_ref)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
lof_torch.infer_threshold(x_ref, 0.1)
outputs = lof_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([0, 0, 1]))
assert torch.all(lof_torch(x) == torch.tensor([0, 0, 1]))
@pytest.mark.skip(reason="Can't convert GaussianRBF to torch script due to torch script type constraints")
def test_lof_kernel_ts(ensembler):
"""
Test the lof torch backend can be correctly initialized with a kernel,
and torch scripted, as well as saved and loaded to and from disk.
"""
kernel = GaussianRBF(sigma=torch.tensor((0.25)))
lof_torch = LOFTorch(k=[4, 5], kernel=kernel, ensembler=ensembler)
x_ref = torch.randn((1024, 10))
lof_torch.fit(x_ref)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
lof_torch.infer_threshold(x_ref, 0.1)
pred_1 = lof_torch(x)
lof_torch = torch.jit.script(lof_torch)
pred_2 = lof_torch(x)
assert torch.all(pred_1 == pred_2)
def test_lof_torch_backend_ensemble_fit_errors(ensembler):
"""Tests the correct errors are raised when using the LOFTorch backend as an ensemble."""
lof_torch = LOFTorch(k=[4, 5], ensembler=ensembler)
# Test that the backend raises an error if it is not fitted before
# calling forward method.
x = torch.randn((1, 10))
with pytest.raises(NotFittedError) as err:
lof_torch(x)
assert str(err.value) == 'LOFTorch has not been fit!'
# Test that the backend raises an error if it is not fitted before
# predicting.
with pytest.raises(NotFittedError) as err:
lof_torch.predict(x)
assert str(err.value) == 'LOFTorch has not been fit!'
# Test the backend updates fitted flag on fit.
x_ref = torch.randn((1024, 10))
lof_torch.fit(x_ref)
assert lof_torch.fitted
# Test that the backend raises an if the forward method is called without the
# threshold being inferred.
with pytest.raises(ThresholdNotInferredError) as err:
lof_torch(x)
assert str(err.value) == 'LOFTorch has no threshold set, call `infer_threshold` to fit one!'
# Test that the backend can call predict without the threshold being inferred.
with pytest.raises(ThresholdNotInferredError) as err:
lof_torch.predict(x)
assert str(err.value) == 'LOFTorch has no threshold set, call `infer_threshold` to fit one!'
def test_lof_torch_backend_fit_errors():
"""Tests the correct errors are raised when using the LOFTorch backend as a single detector."""
lof_torch = LOFTorch(k=4)
# Test that the backend raises an error if it is not fitted before
# calling forward method.
x = torch.randn((1, 10))
with pytest.raises(NotFittedError) as err:
lof_torch(x)
assert str(err.value) == 'LOFTorch has not been fit!'
# Test that the backend raises an error if it is not fitted before
# predicting.
with pytest.raises(NotFittedError) as err:
lof_torch.predict(x)
assert str(err.value) == 'LOFTorch has not been fit!'
# Test the backend updates fitted flag on fit.
x_ref = torch.randn((1024, 10))
lof_torch.fit(x_ref)
assert lof_torch.fitted
# Test that the backend raises an if the forward method is called without the
# threshold being inferred.
with pytest.raises(ThresholdNotInferredError) as err:
lof_torch(x)
assert str(err.value) == 'LOFTorch has no threshold set, call `infer_threshold` to fit one!'
# Test that the backend can call predict without the threshold being inferred.
lof_torch.predict(x)
def test_lof_infer_threshold_value_errors():
"""Tests the correct errors are raised when using incorrect choice of fpr for the LOFTorch backend detector."""
lof_torch = LOFTorch(k=4)
x = torch.randn((1024, 10))
lof_torch.fit(x)
# fpr must be greater than 1/len(x) otherwise it excludes all points in the reference dataset
with pytest.raises(ValueError) as err:
lof_torch.infer_threshold(x, 1/1025)
assert str(err.value) == '`fpr` must be greater than `1/len(x)=0.0009765625`.'
# fpr must be between 0 and 1
with pytest.raises(ValueError) as err:
lof_torch.infer_threshold(x, 1.1)
assert str(err.value) == '`fpr` must be in `(0, 1)`.'
lof_torch.infer_threshold(x, 0.99)
lof_torch.infer_threshold(x, 1/1023)
| 7,975 | 35.090498 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test__lof/test__lof.py
|
import pytest
import numpy as np
import torch
from alibi_detect.od._lof import LOF
from alibi_detect.od.pytorch.ensemble import AverageAggregator, TopKAggregator, MaxAggregator, \
MinAggregator, ShiftAndScaleNormalizer, PValNormalizer
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
from sklearn.datasets import make_moons
def make_lof_detector(k=5, aggregator=None, normalizer=None):
lof_detector = LOF(
k=k, aggregator=aggregator,
normalizer=normalizer
)
x_ref = np.random.randn(100, 2)
lof_detector.fit(x_ref)
lof_detector.infer_threshold(x_ref, 0.1)
return lof_detector
def test_unfitted_lof_single_score():
lof_detector = LOF(k=10)
x = np.array([[0, 10], [0.1, 0]])
x_ref = np.random.randn(100, 2)
# test infer_threshold raises exception when not fitted
with pytest.raises(NotFittedError) as err:
_ = lof_detector.infer_threshold(x_ref, 0.1)
assert str(err.value) == 'LOF has not been fit!'
# test score raises exception when not fitted
with pytest.raises(NotFittedError) as err:
_ = lof_detector.score(x)
assert str(err.value) == 'LOF has not been fit!'
# test predict raises exception when not fitted
with pytest.raises(NotFittedError) as err:
_ = lof_detector.predict(x)
assert str(err.value) == 'LOF has not been fit!'
def test_fitted_lof_score():
"""
Test fitted but not threshold inferred non-ensemble detectors can still score data using the predict method.
Unlike the ensemble detectors, the non-ensemble detectors do not require the ensembler to be fit in the
infer_threshold method. See the test_fitted_lof_ensemble_score test for the ensemble case.
"""
lof_detector = LOF(k=10)
x_ref = np.random.randn(100, 2)
lof_detector.fit(x_ref)
x = np.array([[0, 10], [0.1, 0]])
y = lof_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > y['instance_score'][1]
assert not y['threshold_inferred']
assert y['threshold'] is None
assert y['is_outlier'] is None
assert y['p_value'] is None
def test_fitted_lof_ensemble_score():
"""
Test fitted but not threshold inferred ensemble detectors correctly raise an error when calling
the predict method. This is because the ensembler is fit in the infer_threshold method.
"""
lof_detector = LOF(k=[10, 14, 18])
x_ref = np.random.randn(100, 2)
lof_detector.fit(x_ref)
x = np.array([[0, 10], [0.1, 0]])
with pytest.raises(ThresholdNotInferredError):
lof_detector.predict(x)
with pytest.raises(ThresholdNotInferredError):
lof_detector.score(x)
def test_incorrect_lof_ensemble_init():
# test lof ensemble with aggregator passed as None raises exception
with pytest.raises(ValueError) as err:
LOF(k=[8, 9, 10], aggregator=None)
assert str(err.value) == ('If `k` is a `np.ndarray`, `list` or `tuple`, '
'the `aggregator` argument cannot be ``None``.')
def test_fitted_lof_predict():
"""
Test that a detector fitted on data and with threshold inferred correctly, will score
and label outliers, as well as return the p-values using the predict method. Also Check
that the score method gives the same results.
"""
lof_detector = make_lof_detector(k=10)
x_ref = np.random.randn(100, 2)
lof_detector.infer_threshold(x_ref, 0.1)
x = np.array([[0, 10], [0, 0.1]])
y = lof_detector.predict(x)
y = y['data']
scores = lof_detector.score(x)
assert np.all(y['instance_score'] == scores)
assert y['instance_score'][0] > y['instance_score'][1]
assert y['threshold_inferred']
assert y['threshold'] is not None
assert y['p_value'].all()
assert (y['is_outlier'] == [True, False]).all()
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None])
def test_unfitted_lof_ensemble(aggregator, normalizer):
lof_detector = LOF(
k=[8, 9, 10],
aggregator=aggregator(),
normalizer=normalizer()
)
x = np.array([[0, 10], [0.1, 0]])
# Test unfit lof ensemble raises exception when calling predict method.
with pytest.raises(NotFittedError) as err:
_ = lof_detector.predict(x)
assert str(err.value) == 'LOF has not been fit!'
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None])
def test_fitted_lof_ensemble(aggregator, normalizer):
lof_detector = LOF(
k=[8, 9, 10],
aggregator=aggregator(),
normalizer=normalizer()
)
x_ref = np.random.randn(100, 2)
lof_detector.fit(x_ref)
x = np.array([[0, 10], [0, 0.1]])
# test ensemble raises ThresholdNotInferredError if only fit and not threshold inferred and
# the normalizer is not None.
if normalizer() is not None:
with pytest.raises(ThresholdNotInferredError):
lof_detector.predict(x)
else:
lof_detector.predict(x)
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None])
def test_fitted_lof_ensemble_predict(aggregator, normalizer):
lof_detector = make_lof_detector(
k=[8, 9, 10],
aggregator=aggregator(),
normalizer=normalizer()
)
x = np.array([[0, 10], [0, 0.1]])
# test fitted detectors with inferred thresholds can score data using the predict method.
y = lof_detector.predict(x)
y = y['data']
assert y['threshold_inferred']
assert y['threshold'] is not None
assert y['p_value'].all()
assert (y['is_outlier'] == [True, False]).all()
# test fitted detectors with inferred thresholds can score data using the score method.
scores = lof_detector.score(x)
assert np.all(y['instance_score'] == scores)
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None])
def test_lof_ensemble_torch_script(aggregator, normalizer):
lof_detector = make_lof_detector(k=[5, 6, 7], aggregator=aggregator(), normalizer=normalizer())
ts_lof = torch.jit.script(lof_detector.backend)
x = torch.tensor([[0, 10], [0, 0.1]])
# test torchscripted ensemble lof detector can be saved and loaded correctly.
y = ts_lof(x)
assert torch.all(y == torch.tensor([True, False]))
def test_lof_single_torchscript():
lof_detector = make_lof_detector(k=5)
ts_lof = torch.jit.script(lof_detector.backend)
x = torch.tensor([[0, 10], [0, 0.1]])
# test torchscripted single lof detector can be saved and loaded correctly.
y = ts_lof(x)
assert torch.all(y == torch.tensor([True, False]))
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator, lambda: 'AverageAggregator',
lambda: 'TopKAggregator', lambda: 'MaxAggregator',
lambda: 'MinAggregator'])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None,
lambda: 'ShiftAndScaleNormalizer', lambda: 'PValNormalizer'])
def test_lof_ensemble_integration(tmp_path, aggregator, normalizer):
"""Test lof ensemble detector on moons dataset.
Tests ensemble lof detector with every combination of aggregator and normalizer on the moons dataset.
Fits and infers thresholds in each case. Verifies that the detector can correctly detect inliers
and outliers and that it can be serialized using the torchscript.
"""
lof_detector = LOF(
k=[10, 14, 18],
aggregator=aggregator(),
normalizer=normalizer()
)
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
lof_detector.fit(X_ref)
lof_detector.infer_threshold(X_ref, 0.1)
result = lof_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[-1, 1.5]])
result = lof_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
ts_lof = torch.jit.script(lof_detector.backend)
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
y = ts_lof(x)
assert torch.all(y == torch.tensor([False, True]))
ts_lof.save(tmp_path / 'lof.pt')
lof_detector = torch.load(tmp_path / 'lof.pt')
y = lof_detector(x)
assert torch.all(y == torch.tensor([False, True]))
def test_lof_integration(tmp_path):
"""Test lof detector on moons dataset.
Tests lof detector on the moons dataset. Fits and infers thresholds and verifies that the detector can
correctly detect inliers and outliers. Checks that it can be serialized using the torchscript.
"""
lof_detector = LOF(k=18)
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
lof_detector.fit(X_ref)
lof_detector.infer_threshold(X_ref, 0.1)
result = lof_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[-1, 1.5]])
result = lof_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
ts_lof = torch.jit.script(lof_detector.backend)
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
y = ts_lof(x)
assert torch.all(y == torch.tensor([False, True]))
ts_lof.save(tmp_path / 'lof.pt')
lof_detector = torch.load(tmp_path / 'lof.pt')
y = lof_detector(x)
assert torch.all(y == torch.tensor([False, True]))
| 10,349 | 37.333333 | 112 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test__knn/test__knn.py
|
import pytest
import numpy as np
import torch
from alibi_detect.od._knn import KNN
from alibi_detect.od.pytorch.ensemble import AverageAggregator, TopKAggregator, MaxAggregator, \
MinAggregator, ShiftAndScaleNormalizer, PValNormalizer
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
from sklearn.datasets import make_moons
def make_knn_detector(k=5, aggregator=None, normalizer=None):
knn_detector = KNN(
k=k, aggregator=aggregator,
normalizer=normalizer
)
x_ref = np.random.randn(100, 2)
knn_detector.fit(x_ref)
knn_detector.infer_threshold(x_ref, 0.1)
return knn_detector
def test_unfitted_knn_single_score():
knn_detector = KNN(k=10)
x = np.array([[0, 10], [0.1, 0]])
x_ref = np.random.randn(100, 2)
# test infer_threshold raises exception when not fitted
with pytest.raises(NotFittedError) as err:
_ = knn_detector.infer_threshold(x_ref, 0.1)
assert str(err.value) == 'KNN has not been fit!'
# test score raises exception when not fitted
with pytest.raises(NotFittedError) as err:
_ = knn_detector.score(x)
assert str(err.value) == 'KNN has not been fit!'
# test predict raises exception when not fitted
with pytest.raises(NotFittedError) as err:
_ = knn_detector.predict(x)
assert str(err.value) == 'KNN has not been fit!'
def test_fitted_knn_score():
"""
Test fitted but not threshold inferred non-ensemble detectors can still score data using the predict method.
Unlike the ensemble detectors, the non-ensemble detectors do not require the ensembler to be fit in the
infer_threshold method. See the test_fitted_knn_ensemble_score test for the ensemble case.
"""
knn_detector = KNN(k=10)
x_ref = np.random.randn(100, 2)
knn_detector.fit(x_ref)
x = np.array([[0, 10], [0.1, 0]])
y = knn_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > 5
assert y['instance_score'][1] < 1
assert not y['threshold_inferred']
assert y['threshold'] is None
assert y['is_outlier'] is None
assert y['p_value'] is None
def test_fitted_knn_ensemble_score():
"""
Test fitted but not threshold inferred ensemble detectors correctly raise an error when calling
the predict method. This is because the ensembler is fit in the infer_threshold method.
"""
knn_detector = KNN(k=[10, 14, 18])
x_ref = np.random.randn(100, 2)
knn_detector.fit(x_ref)
x = np.array([[0, 10], [0.1, 0]])
with pytest.raises(ThresholdNotInferredError):
knn_detector.predict(x)
with pytest.raises(ThresholdNotInferredError):
knn_detector.score(x)
def test_incorrect_knn_ensemble_init():
# test knn ensemble with aggregator passed as None raises exception
with pytest.raises(ValueError) as err:
KNN(k=[8, 9, 10], aggregator=None)
assert str(err.value) == ('If `k` is a `np.ndarray`, `list` or `tuple`, '
'the `aggregator` argument cannot be ``None``.')
def test_fitted_knn_predict():
"""
Test that a detector fitted on data and with threshold inferred correctly, will score
and label outliers, as well as return the p-values using the predict method. Also Check
that the score method gives the same results.
"""
knn_detector = make_knn_detector(k=10)
x_ref = np.random.randn(100, 2)
knn_detector.infer_threshold(x_ref, 0.1)
x = np.array([[0, 10], [0, 0.1]])
y = knn_detector.predict(x)
y = y['data']
scores = knn_detector.score(x)
assert np.all(y['instance_score'] == scores)
assert y['instance_score'][0] > 5
assert y['instance_score'][1] < 1
assert y['threshold_inferred']
assert y['threshold'] is not None
assert y['p_value'].all()
assert (y['is_outlier'] == [True, False]).all()
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None])
def test_unfitted_knn_ensemble(aggregator, normalizer):
knn_detector = KNN(
k=[8, 9, 10],
aggregator=aggregator(),
normalizer=normalizer()
)
x = np.array([[0, 10], [0.1, 0]])
# Test unfit knn ensemble raises exception when calling predict method.
with pytest.raises(NotFittedError) as err:
_ = knn_detector.predict(x)
assert str(err.value) == 'KNN has not been fit!'
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None])
def test_fitted_knn_ensemble(aggregator, normalizer):
knn_detector = KNN(
k=[8, 9, 10],
aggregator=aggregator(),
normalizer=normalizer()
)
x_ref = np.random.randn(100, 2)
knn_detector.fit(x_ref)
x = np.array([[0, 10], [0, 0.1]])
# test ensemble raises ThresholdNotInferredError if only fit and not threshold inferred and
# the normalizer is not None.
if normalizer() is not None:
with pytest.raises(ThresholdNotInferredError):
knn_detector.predict(x)
else:
knn_detector.predict(x)
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None])
def test_fitted_knn_ensemble_predict(aggregator, normalizer):
knn_detector = make_knn_detector(
k=[8, 9, 10],
aggregator=aggregator(),
normalizer=normalizer()
)
x = np.array([[0, 10], [0, 0.1]])
# test fitted detectors with inferred thresholds can score data using the predict method.
y = knn_detector.predict(x)
y = y['data']
assert y['threshold_inferred']
assert y['threshold'] is not None
assert y['p_value'].all()
assert (y['is_outlier'] == [True, False]).all()
# test fitted detectors with inferred thresholds can score data using the score method.
scores = knn_detector.score(x)
assert np.all(y['instance_score'] == scores)
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None])
def test_knn_ensemble_torch_script(aggregator, normalizer):
knn_detector = make_knn_detector(k=[5, 6, 7], aggregator=aggregator(), normalizer=normalizer())
tsknn = torch.jit.script(knn_detector.backend)
x = torch.tensor([[0, 10], [0, 0.1]])
# test torchscripted ensemble knn detector can be saved and loaded correctly.
y = tsknn(x)
assert torch.all(y == torch.tensor([True, False]))
def test_knn_single_torchscript():
knn_detector = make_knn_detector(k=5)
tsknn = torch.jit.script(knn_detector.backend)
x = torch.tensor([[0, 10], [0, 0.1]])
# test torchscripted single knn detector can be saved and loaded correctly.
y = tsknn(x)
assert torch.all(y == torch.tensor([True, False]))
@pytest.mark.parametrize("aggregator", [AverageAggregator, lambda: TopKAggregator(k=7),
MaxAggregator, MinAggregator, lambda: 'AverageAggregator',
lambda: 'TopKAggregator', lambda: 'MaxAggregator',
lambda: 'MinAggregator'])
@pytest.mark.parametrize("normalizer", [ShiftAndScaleNormalizer, PValNormalizer, lambda: None,
lambda: 'ShiftAndScaleNormalizer', lambda: 'PValNormalizer'])
def test_knn_ensemble_integration(tmp_path, aggregator, normalizer):
"""Test knn ensemble detector on moons dataset.
Tests ensemble knn detector with every combination of aggregator and normalizer on the moons dataset.
Fits and infers thresholds in each case. Verifies that the detector can correctly detect inliers
and outliers and that it can be serialized using the torchscript.
"""
knn_detector = KNN(
k=[10, 14, 18],
aggregator=aggregator(),
normalizer=normalizer()
)
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
knn_detector.fit(X_ref)
knn_detector.infer_threshold(X_ref, 0.1)
result = knn_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[-1, 1.5]])
result = knn_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
ts_knn = torch.jit.script(knn_detector.backend)
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
y = ts_knn(x)
assert torch.all(y == torch.tensor([False, True]))
ts_knn.save(tmp_path / 'knn.pt')
knn_detector = torch.load(tmp_path / 'knn.pt')
y = knn_detector(x)
assert torch.all(y == torch.tensor([False, True]))
def test_knn_integration(tmp_path):
"""Test knn detector on moons dataset.
Tests knn detector on the moons dataset. Fits and infers thresholds and verifies that the detector can
correctly detect inliers and outliers. Checks that it can be serialized using the torchscript.
"""
knn_detector = KNN(k=18)
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
knn_detector.fit(X_ref)
knn_detector.infer_threshold(X_ref, 0.1)
result = knn_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[-1, 1.5]])
result = knn_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
ts_knn = torch.jit.script(knn_detector.backend)
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
y = ts_knn(x)
assert torch.all(y == torch.tensor([False, True]))
ts_knn.save(tmp_path / 'knn.pt')
knn_detector = torch.load(tmp_path / 'knn.pt')
y = knn_detector(x)
assert torch.all(y == torch.tensor([False, True]))
| 10,379 | 37.161765 | 112 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test__knn/test__knn_backend.py
|
import pytest
import torch
from alibi_detect.od.pytorch.knn import KNNTorch
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.od.pytorch.ensemble import Ensembler, PValNormalizer, AverageAggregator
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
@pytest.fixture(scope='function')
def ensembler(request):
return Ensembler(
normalizer=PValNormalizer(),
aggregator=AverageAggregator()
)
def test_knn_torch_backend():
"""
Test the knn torch backend can be correctly initialized, fit and used to
predict outliers.
"""
knn_torch = KNNTorch(k=5)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
knn_torch.fit(x_ref)
outputs = knn_torch.predict(x)
assert outputs.instance_score.shape == (3, )
assert outputs.is_outlier is None
assert outputs.p_value is None
scores = knn_torch.score(x)
assert torch.all(scores == outputs.instance_score)
knn_torch.infer_threshold(x_ref, 0.1)
outputs = knn_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, False, True]))
assert torch.all(knn_torch(x) == torch.tensor([False, False, True]))
def test_knn_torch_backend_ensemble(ensembler):
"""
Test the knn torch backend can be correctly initialized as an ensemble, fit
on data and used to predict outliers.
"""
knn_torch = KNNTorch(k=[4, 5], ensembler=ensembler)
x_ref = torch.randn((1024, 10))
knn_torch.fit(x_ref)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
knn_torch.infer_threshold(x_ref, 0.1)
outputs = knn_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, False, True]))
assert torch.all(knn_torch(x) == torch.tensor([False, False, True]))
def test_knn_torch_backend_ensemble_ts(tmp_path, ensembler):
"""
Test the knn torch backend can be initialized as an ensemble and
torchscripted, as well as saved and loaded to and from disk.
"""
knn_torch = KNNTorch(k=[4, 5], ensembler=ensembler)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
knn_torch.fit(x_ref)
knn_torch.infer_threshold(x_ref, 0.1)
pred_1 = knn_torch(x)
knn_torch = torch.jit.script(knn_torch)
pred_2 = knn_torch(x)
assert torch.all(pred_1 == pred_2)
knn_torch.save(tmp_path / 'knn_torch.pt')
knn_torch = torch.load(tmp_path / 'knn_torch.pt')
pred_2 = knn_torch(x)
assert torch.all(pred_1 == pred_2)
def test_knn_torch_backend_ts(tmp_path):
"""
Test the knn torch backend can be initialized and torchscripted, as well as
saved and loaded to and from disk.
"""
knn_torch = KNNTorch(k=7)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
knn_torch.fit(x_ref)
knn_torch.infer_threshold(x_ref, 0.1)
pred_1 = knn_torch(x)
knn_torch = torch.jit.script(knn_torch)
pred_2 = knn_torch(x)
assert torch.all(pred_1 == pred_2)
knn_torch.save(tmp_path / 'knn_torch.pt')
knn_torch = torch.load(tmp_path / 'knn_torch.pt')
pred_2 = knn_torch(x)
assert torch.all(pred_1 == pred_2)
def test_knn_kernel(ensembler):
"""
Test the knn torch backend can be correctly initialized with a kernel, fit
on data and used to predict outliers.
"""
kernel = GaussianRBF(sigma=torch.tensor((0.25)))
knn_torch = KNNTorch(k=[4, 5], kernel=kernel, ensembler=ensembler)
x_ref = torch.randn((1024, 10))
knn_torch.fit(x_ref)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
knn_torch.infer_threshold(x_ref, 0.1)
outputs = knn_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, False, True]))
assert torch.all(knn_torch(x) == torch.tensor([False, False, True]))
@pytest.mark.skip(reason="Can't convert GaussianRBF to torchscript due to torchscript type constraints")
def test_knn_kernel_ts(ensembler):
"""
Test the knn torch backend can be correctly initialized with a kernel,
and torchscripted, as well as saved and loaded to and from disk.
"""
kernel = GaussianRBF(sigma=torch.tensor((0.25)))
knn_torch = KNNTorch(k=[4, 5], kernel=kernel, ensembler=ensembler)
x_ref = torch.randn((1024, 10))
knn_torch.fit(x_ref)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
knn_torch.infer_threshold(x_ref, 0.1)
pred_1 = knn_torch(x)
knn_torch = torch.jit.script(knn_torch)
pred_2 = knn_torch(x)
assert torch.all(pred_1 == pred_2)
def test_knn_torch_backend_ensemble_fit_errors(ensembler):
"""Tests the correct errors are raised when using the KNNTorch backend as an ensemble."""
knn_torch = KNNTorch(k=[4, 5], ensembler=ensembler)
# Test that the backend raises an error if it is not fitted before
# calling forward method.
x = torch.randn((1, 10))
with pytest.raises(NotFittedError) as err:
knn_torch(x)
assert str(err.value) == 'KNNTorch has not been fit!'
# Test that the backend raises an error if it is not fitted before
# predicting.
with pytest.raises(NotFittedError) as err:
knn_torch.predict(x)
assert str(err.value) == 'KNNTorch has not been fit!'
# Test the backend updates fitted flag on fit.
x_ref = torch.randn((1024, 10))
knn_torch.fit(x_ref)
assert knn_torch.fitted
# Test that the backend raises an if the forward method is called without the
# threshold being inferred.
with pytest.raises(ThresholdNotInferredError) as err:
knn_torch(x)
assert str(err.value) == 'KNNTorch has no threshold set, call `infer_threshold` to fit one!'
# Test that the backend can call predict without the threshold being inferred.
with pytest.raises(ThresholdNotInferredError) as err:
knn_torch.predict(x)
assert str(err.value) == 'KNNTorch has no threshold set, call `infer_threshold` to fit one!'
def test_knn_torch_backend_fit_errors():
"""Tests the correct errors are raised when using the KNNTorch backend as a single detector."""
knn_torch = KNNTorch(k=4)
# Test that the backend raises an error if it is not fitted before
# calling forward method.
x = torch.randn((1, 10))
with pytest.raises(NotFittedError) as err:
knn_torch(x)
assert str(err.value) == 'KNNTorch has not been fit!'
# Test that the backend raises an error if it is not fitted before
# predicting.
with pytest.raises(NotFittedError) as err:
knn_torch.predict(x)
assert str(err.value) == 'KNNTorch has not been fit!'
# Test the backend updates fitted flag on fit.
x_ref = torch.randn((1024, 10))
knn_torch.fit(x_ref)
assert knn_torch.fitted
# Test that the backend raises an if the forward method is called without the
# threshold being inferred.
with pytest.raises(ThresholdNotInferredError) as err:
knn_torch(x)
assert str(err.value) == 'KNNTorch has no threshold set, call `infer_threshold` to fit one!'
# Test that the backend can call predict without the threshold being inferred.
knn_torch.predict(x)
def test_knn_infer_threshold_value_errors():
"""Tests the correct errors are raised when using incorrect choice of fpr for the KNNTorch backend detector."""
knn_torch = KNNTorch(k=4)
x = torch.randn((1024, 10))
knn_torch.fit(x)
# fpr must be greater than 1/len(x) otherwise it excludes all points in the reference dataset
with pytest.raises(ValueError) as err:
knn_torch.infer_threshold(x, 1/1025)
assert str(err.value) == '`fpr` must be greater than `1/len(x)=0.0009765625`.'
# fpr must be between 0 and 1
with pytest.raises(ValueError) as err:
knn_torch.infer_threshold(x, 1.1)
assert str(err.value) == '`fpr` must be in `(0, 1)`.'
knn_torch.infer_threshold(x, 0.99)
knn_torch.infer_threshold(x, 1/1023)
| 7,995 | 35.180995 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test__pca/test__pca.py
|
import pytest
import numpy as np
import torch
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.od._pca import PCA
from alibi_detect.exceptions import NotFittedError
from sklearn.datasets import make_moons
def fit_PCA_detector(detector):
pca_detector = detector()
x_ref = np.random.randn(100, 3)
pca_detector.fit(x_ref)
pca_detector.infer_threshold(x_ref, 0.1)
return pca_detector
@pytest.mark.parametrize('detector', [
lambda: PCA(n_components=3),
lambda: PCA(n_components=3, kernel=GaussianRBF())
])
def test_unfitted_PCA_single_score(detector):
"""Test pca detector throws errors when not fitted."""
pca = detector()
x = np.array([[0, 10, 11], [0.1, 0, 11]])
x_ref = np.random.randn(100, 3)
# test infer_threshold raises exception when not fitted
with pytest.raises(NotFittedError) as err:
pca.infer_threshold(x_ref, 0.1)
assert str(err.value) == \
f'{pca.__class__.__name__} has not been fit!'
# test score raises exception when not fitted
with pytest.raises(NotFittedError) as err:
pca.score(x)
assert str(err.value) == \
f'{pca.__class__.__name__} has not been fit!'
# test predict raises exception when not fitted
with pytest.raises(NotFittedError) as err:
pca.predict(x)
assert str(err.value) == \
f'{pca.__class__.__name__} has not been fit!'
def test_pca_value_errors():
with pytest.raises(ValueError) as err:
PCA(n_components=0)
assert str(err.value) == 'n_components must be at least 1'
with pytest.raises(ValueError) as err:
pca = PCA(n_components=4)
pca.fit(np.random.randn(100, 3))
assert str(err.value) == 'n_components must be less than the number of features.'
with pytest.raises(ValueError) as err:
pca = PCA(n_components=10, kernel=GaussianRBF())
pca.fit(np.random.randn(9, 3))
assert str(err.value) == 'n_components must be less than the number of reference instances.'
@pytest.mark.parametrize('detector', [
lambda: PCA(n_components=2),
lambda: PCA(n_components=2, kernel=GaussianRBF())
])
def test_fitted_PCA_score(detector):
"""Test Linear and Kernel PCA detector score method.
Test Linear and Kernel PCA detector that has been fitted on reference data but has not had a threshold
inferred can still score data using the predict method. Test that it does not raise an error
and does not return `threshold`, `p_value` and `is_outlier` values.
"""
pca_detector = detector()
x_ref = np.random.randn(100, 3)
pca_detector.fit(x_ref)
x = np.array([[0, 10, 0], [0.1, 0, 0]])
y = pca_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > y['instance_score'][1]
assert not y['threshold_inferred']
assert y['threshold'] is None
assert y['is_outlier'] is None
assert y['p_value'] is None
@pytest.mark.parametrize('detector', [
lambda: PCA(n_components=2),
lambda: PCA(n_components=2, kernel=GaussianRBF())
])
def test_fitted_PCA_predict(detector):
"""Test Linear and Kernel PCA detector predict method.
Test Linear and Kernel PCA detector that has been fitted on reference data and has had a threshold
inferred can score data using the predict method. Test that it does not raise an error and does
return `threshold`, `p_value` and `is_outlier` values.
"""
pca_detector = fit_PCA_detector(detector)
x_ref = np.random.randn(100, 3)
pca_detector.infer_threshold(x_ref, 0.1)
x = np.array([[0, 10, 0], [0.1, 0, 0]])
y = pca_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > y['instance_score'][1]
assert y['threshold_inferred']
assert y['threshold'] is not None
assert y['p_value'].all()
assert (y['is_outlier'] == [True, False]).all()
def test_PCA_integration(tmp_path):
"""Test Linear PCA detector on moons dataset.
Test the Linear PCA detector on a more complex 2d example. Test that the detector can be fitted
on reference data and infer a threshold. Test that it differentiates between inliers and outliers.
Test that the detector can be scripted.
"""
pca_detector = PCA(n_components=1)
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
pca_detector.fit(X_ref)
pca_detector.infer_threshold(X_ref, 0.1)
result = pca_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[0, -3]])
result = pca_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
ts_PCA = torch.jit.script(pca_detector.backend)
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
y = ts_PCA(x)
assert torch.all(y == torch.tensor([False, True]))
ts_PCA.save(tmp_path / 'pca.pt')
pca_detector = PCA(n_components=1)
pca_detector = torch.load(tmp_path / 'pca.pt')
y = pca_detector(x)
assert torch.all(y == torch.tensor([False, True]))
def test_kernel_PCA_integration():
"""Test kernel PCA detector on moons dataset.
Test the kernel PCA detector on a more complex 2d example. Test that the detector can be fitted
on reference data and infer a threshold. Test that it differentiates between inliers and outliers.
"""
pca_detector = PCA(n_components=10, kernel=GaussianRBF())
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
pca_detector.fit(X_ref)
pca_detector.infer_threshold(X_ref, 0.1)
result = pca_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[1, 1]])
result = pca_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
@pytest.mark.skip(reason='GaussianRBF kernel does not have torchscript support yet.')
def test_kernel_PCA_integration_ts():
"""Test the kernel PCA detector can be scripted."""
pca_detector = PCA(n_components=10, kernel=GaussianRBF())
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
pca_detector.fit(X_ref)
pca_detector.infer_threshold(X_ref, 0.1)
x_outlier = np.array([[1, 1]])
ts_PCA = torch.jit.script(pca_detector.backend)
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
y = ts_PCA(x)
assert torch.all(y == torch.tensor([False, True]))
| 6,603 | 35.893855 | 106 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test__pca/test__pca_backend.py
|
import pytest
import torch
import numpy as np
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.od.pytorch.pca import LinearPCATorch, KernelPCATorch
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
@pytest.mark.parametrize('backend_detector', [
lambda: LinearPCATorch(n_components=5),
lambda: KernelPCATorch(n_components=5, kernel=GaussianRBF())
])
def test_pca_torch_backend_fit_errors(backend_detector):
"""Test Linear and Kernel PCA detector backend fit errors.
Test that an unfit detector backend raises an error when calling predict or score. Test that the
detector backend raises an error when calling the forward method while the threshold has not been
inferred.
"""
pca_torch = backend_detector()
assert not pca_torch.fitted
x = torch.randn((1, 10))
with pytest.raises(NotFittedError) as err:
pca_torch(x)
assert str(err.value) == f'{pca_torch.__class__.__name__} has not been fit!'
with pytest.raises(NotFittedError) as err:
pca_torch.predict(x)
assert str(err.value) == f'{pca_torch.__class__.__name__} has not been fit!'
x_ref = torch.randn((1024, 10))
pca_torch.fit(x_ref)
assert pca_torch.fitted
with pytest.raises(ThresholdNotInferredError) as err:
pca_torch(x)
assert str(err.value) == (f'{pca_torch.__class__.__name__} has no threshold set, '
'call `infer_threshold` to fit one!')
assert pca_torch.predict(x)
@pytest.mark.parametrize('backend_detector', [
lambda: LinearPCATorch(n_components=1),
lambda: KernelPCATorch(n_components=1, kernel=GaussianRBF())
])
def test_pca_scoring(backend_detector):
"""Test Linear and Kernel PCATorch detector backend scoring methods.
Test that the detector correctly detects true outliers and that the correct proportion of in
distribution data is flagged as outliers.
"""
pca_torch = backend_detector()
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
pca_torch.fit(x_ref)
x_1 = torch.tensor([[8., 8.]], dtype=torch.float64)
scores_1 = pca_torch.score(x_1)
x_2 = torch.tensor([[10., 8.]], dtype=torch.float64)
scores_2 = pca_torch.score(x_2)
x_3 = torch.tensor([[8., 20.]], dtype=torch.float64)
scores_3 = pca_torch.score(x_3)
# test correct ordering of scores given outlyingness of data
assert scores_1 < scores_2 < scores_3
# test that detector correctly detects true Outlier
pca_torch.infer_threshold(x_ref, 0.01)
x = torch.cat((x_1, x_2, x_3))
outputs = pca_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, False, True]))
assert torch.all(pca_torch(x) == torch.tensor([False, False, True]))
# test that 0.01 of the in distribution data is flagged as outliers
x = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
outputs = pca_torch.predict(x)
assert (outputs.is_outlier.sum()/1000) - 0.01 < 0.005
def test_pca_linear_torch_backend_ts(tmp_path):
"""Test Linear PCA detector backend is torch-scriptable and savable."""
pca_torch = LinearPCATorch(n_components=5)
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
pca_torch.fit(x_ref)
pca_torch.infer_threshold(x_ref, 0.1)
pred_1 = pca_torch(x)
pca_torch = torch.jit.script(pca_torch)
pred_2 = pca_torch(x)
assert torch.all(pred_1 == pred_2)
pca_torch.save(tmp_path / 'pca_torch.pt')
pca_torch = torch.load(tmp_path / 'pca_torch.pt')
pred_2 = pca_torch(x)
assert torch.all(pred_1 == pred_2)
@pytest.mark.skip(reason='GaussianRBF kernel does not have torchscript support yet.')
def test_pca_kernel_torch_backend_ts(tmp_path):
"""Test Kernel PCA detector backend is torch-scriptable and savable."""
pca_torch = KernelPCATorch(n_components=5, kernel=GaussianRBF())
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
pca_torch.fit(x_ref)
pca_torch.infer_threshold(x_ref, 0.1)
pred_1 = pca_torch(x)
pca_torch = torch.jit.script(pca_torch)
pred_2 = pca_torch(x)
assert torch.all(pred_1 == pred_2)
pca_torch.save(tmp_path / 'pca_torch.pt')
pca_torch = torch.load(tmp_path / 'pca_torch.pt')
pred_2 = pca_torch(x)
assert torch.all(pred_1 == pred_2)
| 4,470 | 34.768 | 101 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test__svm/test__svm_pytorch_backend.py
|
import pytest
import numpy as np
import torch
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.od.pytorch.svm import BgdSVMTorch, SgdSVMTorch
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
@pytest.mark.parametrize('backend_cls', [BgdSVMTorch, SgdSVMTorch])
def test_svm_pytorch_scoring(backend_cls):
"""Test SVM detector pytorch scoring method.
Tests the scoring method of the SVMTorch pytorch backend detector.
"""
sigma = torch.tensor(2)
svm_torch = backend_cls(
n_components=100,
kernel=GaussianRBF(sigma=sigma),
nu=0.1
)
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
svm_torch.fit(x_ref)
x_1 = torch.tensor(np.array([[8., 8.]]))
scores_1 = svm_torch.score(x_1)
x_2 = torch.tensor(np.array([[13., 13.]]))
scores_2 = svm_torch.score(x_2)
x_3 = torch.tensor(np.array([[-100., 100.]]))
scores_3 = svm_torch.score(x_3)
# test correct ordering of scores given relative outlyingness of data
assert scores_1 < scores_2 < scores_3
# test that detector correctly detects true outlier
svm_torch.infer_threshold(x_ref, 0.01)
x = torch.cat((x_1, x_2, x_3))
outputs = svm_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, True, True]))
assert torch.all(svm_torch(x) == torch.tensor([False, True, True]))
# test that 0.01 of the in distribution data is flagged as outliers
x = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
outputs = svm_torch.predict(x)
assert (outputs.is_outlier.sum()/1000) - 0.01 < 0.01
@pytest.mark.skip(reason="Can't convert GaussianRBF to torchscript due to torchscript type constraints")
@pytest.mark.parametrize('backend_cls', [BgdSVMTorch, SgdSVMTorch])
def test_svm_torch_backend_ts(tmp_path, backend_cls):
"""Test SVM detector backend is torch-scriptable and savable."""
svm_torch = backend_cls(n_components=10, kernel=GaussianRBF())
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
svm_torch.fit(x_ref, nu=0.01)
svm_torch.infer_threshold(x_ref, 0.1)
pred_1 = svm_torch(x)
svm_torch = torch.jit.script(svm_torch)
pred_2 = svm_torch(x)
assert torch.all(pred_1 == pred_2)
svm_torch.save(tmp_path / 'svm_torch.pt')
svm_torch = torch.load(tmp_path / 'svm_torch.pt')
pred_2 = svm_torch(x)
assert torch.all(pred_1 == pred_2)
@pytest.mark.parametrize('backend_cls', [BgdSVMTorch, SgdSVMTorch])
def test_svm_pytorch_backend_fit_errors(backend_cls):
"""Test SVM detector pytorch backend fit errors.
Tests the correct errors are raised when using the SVMTorch pytorch backend detector.
"""
svm_torch = backend_cls(n_components=100, kernel=GaussianRBF(), nu=0.1)
assert not svm_torch.fitted
# Test that the backend raises an error if it is not fitted before
# calling forward method.
x = torch.tensor(np.random.randn(1, 10))
with pytest.raises(NotFittedError) as err:
svm_torch(x)
assert str(err.value) == f'{backend_cls.__name__} has not been fit!'
# Test that the backend raises an error if it is not fitted before
# predicting.
with pytest.raises(NotFittedError) as err:
svm_torch.predict(x)
assert str(err.value) == f'{backend_cls.__name__} has not been fit!'
# Test the backend updates _fitted flag on fit.
x_ref = torch.tensor(np.random.randn(1024, 10))
svm_torch.fit(x_ref)
assert svm_torch.fitted
# Test that the backend raises an if the forward method is called without the
# threshold being inferred.
with pytest.raises(ThresholdNotInferredError) as err:
svm_torch(x)
assert str(err.value) == f'{backend_cls.__name__} has no threshold set, call `infer_threshold` to fit one!'
# Test that the backend can call predict without the threshold being inferred.
assert svm_torch.predict(x)
@pytest.mark.parametrize('backend_cls', [BgdSVMTorch, SgdSVMTorch])
def test_svm_pytorch_fit(backend_cls):
"""Test SVM detector pytorch fit method.
Tests pytorch detector checks for convergence and stops early if it does.
"""
kernel = GaussianRBF(torch.tensor(1.))
svm_torch = backend_cls(n_components=1, kernel=kernel, nu=0.01)
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
fit_results = svm_torch.fit(x_ref, tol=0.01)
assert fit_results['converged']
assert fit_results['n_iter'] < 100
assert fit_results.get('lower_bound', 0) < 1
def test_sgd_bgd_diffs():
n_components = 300
bgd_svm = BgdSVMTorch(n_components=n_components, kernel=GaussianRBF(sigma=torch.tensor(2)), nu=0.05)
sgd_svm = SgdSVMTorch(n_components=n_components, kernel=GaussianRBF(sigma=torch.tensor(2)), nu=0.05)
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
bgd_svm.fit(x_ref)
sgd_svm.fit(x_ref)
test_x = x_ref[:1000]
diffs = (sgd_svm.score(test_x) - bgd_svm.score(test_x)).numpy()
assert np.abs(diffs.mean()) < 0.1
| 5,250 | 36.241135 | 111 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test__svm/test__svm.py
|
import pytest
import numpy as np
import torch
from alibi_detect.od._svm import SVM
from alibi_detect.exceptions import NotFittedError
from alibi_detect.utils.pytorch import GaussianRBF
from sklearn.datasets import make_moons
@pytest.mark.parametrize('optimization', ['sgd', 'bgd'])
def test_unfitted_svm_score(optimization):
"""Test SVM detector raises exceptions when not fitted."""
svm_detector = SVM(
n_components=10,
backend='pytorch',
kernel=GaussianRBF(torch.tensor(2)),
optimization=optimization,
nu=0.1
)
x = np.array([[0, 10], [0.1, 0]])
x_ref = np.random.randn(100, 2)
with pytest.raises(NotFittedError) as err:
svm_detector.infer_threshold(x_ref, 0.1)
assert str(err.value) == 'SVM has not been fit!'
with pytest.raises(NotFittedError) as err:
svm_detector.score(x)
assert str(err.value) == 'SVM has not been fit!'
# test predict raises exception when not fitted
with pytest.raises(NotFittedError) as err:
svm_detector.predict(x)
assert str(err.value) == 'SVM has not been fit!'
@pytest.mark.parametrize('optimization,device', [('sgd', 'gpu'), ('bgd', 'cpu')])
def test_svm_device_warnings(optimization, device):
"""Test SVM detector device warnings."""
warning_msgs = {
'sgd': ('If using the `sgd` optimization option with GPU then only the Nystroem approximation'
' portion of the method will utilize the GPU. Consider using the `bgd` option which will'
' run everything on the GPU.'),
'bgd': ('The `bgd` optimization option is best suited for GPU. '
'If you want to use CPU, consider using the `sgd` option.')
}
with pytest.warns(UserWarning) as warning:
_ = SVM(
n_components=10,
backend='pytorch',
kernel=GaussianRBF(torch.tensor(2)),
optimization=optimization,
device=device,
nu=0.1
)
assert len(warning) == 1
assert str(warning[0].message) == warning_msgs[optimization]
def test_svm_optimization_error():
"""Test SVM detector raises correct errors for wrong optimization kwargs."""
with pytest.raises(ValueError) as err:
_ = SVM(
n_components=10,
backend='pytorch',
kernel=GaussianRBF(torch.tensor(2)),
optimization='not_an_option',
device='cpu',
nu=0.1
)
assert str(err.value) == 'Optimization not_an_option not recognized. Choose from `sgd` or `bgd`.'
def test_svm_n_components_error():
"""Test SVM detector raises correct errors for wrong value of n_components."""
with pytest.raises(ValueError) as err:
_ = SVM(
n_components=0,
backend='pytorch',
kernel=GaussianRBF(torch.tensor(2)),
optimization='bgd',
device='cpu',
nu=0.1
)
assert str(err.value) == 'n_components must be a positive integer, got 0.'
@pytest.mark.parametrize('optimization', [('sgd'), ('bgd')])
def test_fitted_svm_score(optimization):
"""Test SVM detector score method.
Test SVM detector that has been fitted on reference data but has not had a threshold
inferred can still score data using the predict method. Test that it does not raise an error
but does not return `threshold`, `p_value` and `is_outlier` values.
"""
svm_detector = SVM(
n_components=10,
backend='pytorch',
kernel=GaussianRBF(torch.tensor(2)),
optimization=optimization,
nu=0.1
)
x_ref = np.random.randn(100, 2)
svm_detector.fit(x_ref)
x = np.array([[0, 10], [0.1, 0]])
scores = svm_detector.score(x)
y = svm_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > -0.01
assert y['instance_score'][1] < -0.8
assert all(y['instance_score'] == scores)
assert not y['threshold_inferred']
assert y['threshold'] is None
assert y['is_outlier'] is None
assert y['p_value'] is None
@pytest.mark.parametrize('optimization', [('sgd'), ('bgd')])
def test_fitted_svm_predict(optimization):
"""Test SVM detector predict method.
Test SVM detector that has been fitted on reference data and has had a threshold
inferred can score data using the predict method as well as predict outliers. Test that it
returns `threshold`, `p_value` and `is_outlier` values.
"""
svm_detector = SVM(
n_components=10,
backend='pytorch',
kernel=GaussianRBF(torch.tensor(2)),
optimization=optimization,
nu=0.1
)
x_ref = np.random.randn(100, 2)
svm_detector.fit(x_ref)
svm_detector.infer_threshold(x_ref, 0.1)
x = np.array([[0, 10], [0, 0.1]])
y = svm_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > -0.01
assert y['instance_score'][1] < -0.8
assert y['threshold_inferred']
assert y['threshold'] is not None
assert y['p_value'].all()
assert (y['is_outlier'] == [True, False]).all()
@pytest.mark.parametrize('optimization', ['sgd', 'bgd'])
@pytest.mark.parametrize('n_components', [None, 100])
@pytest.mark.parametrize('kernel', [None, GaussianRBF(torch.tensor(2))])
def test_svm_integration(optimization, n_components, kernel):
"""Test SVM detector on moons dataset.
Test SVM detector on a more complex 2d example. Test that the detector can be fitted
on reference data and infer a threshold. Test that it differentiates between inliers and outliers.
"""
svm_detector = SVM(
n_components=n_components,
nu=0.1,
backend='pytorch',
kernel=kernel,
optimization=optimization,
)
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
svm_detector.fit(X_ref)
svm_detector.infer_threshold(X_ref, 0.1)
result = svm_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[-1, 1.5]])
result = svm_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
@pytest.mark.skip(reason="Can't convert default kernel GaussianRBF to torchscript due to torchscript type constraints")
def test_svm_torchscript(tmp_path):
"""Tests user can torch-script svm detector."""
sigma = torch.tensor(0.2)
svm_detector = SVM(
n_components=100,
backend='pytorch',
kernel=GaussianRBF(sigma=sigma)
)
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
svm_detector.fit(X_ref, nu=0.1)
svm_detector.infer_threshold(X_ref, 0.1)
x_outlier = np.array([[-1, 1.5]])
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
ts_svm = torch.jit.script(svm_detector.backend)
y = ts_svm(x)
assert torch.all(y == torch.tensor([False, True]))
ts_svm.save(tmp_path / 'svm.pt')
ts_svm = torch.load(tmp_path / 'svm.pt')
y = ts_svm(x)
assert torch.all(y == torch.tensor([False, True]))
@pytest.mark.parametrize('optimization', ['sgd', 'bgd'])
def test_svm_fit(optimization):
"""Test SVM detector fit method.
Tests pytorch detector checks for convergence and stops early if it does.
"""
kernel = GaussianRBF(torch.tensor(1.))
svm = SVM(
n_components=10,
kernel=kernel,
nu=0.01,
optimization=optimization,
)
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
fit_results = svm.fit(x_ref, tol=0.01)
assert fit_results['converged']
assert fit_results['n_iter'] < 100
assert fit_results.get('lower_bound', 0) < 1
# 'sgd' optimization does not return lower bound
if optimization == 'bgd':
assert isinstance(fit_results['lower_bound'], float)
| 7,983 | 32.974468 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test__mahalanobis/test__mahalanobis.py
|
import pytest
import numpy as np
import torch
from alibi_detect.od._mahalanobis import Mahalanobis
from alibi_detect.exceptions import NotFittedError
from sklearn.datasets import make_moons
def make_mahalanobis_detector():
mahalanobis_detector = Mahalanobis()
x_ref = np.random.randn(100, 2)
mahalanobis_detector.fit(x_ref)
mahalanobis_detector.infer_threshold(x_ref, 0.1)
return mahalanobis_detector
def test_unfitted_mahalanobis_single_score():
"""Test Mahalanobis detector throws errors when not fitted."""
mahalanobis_detector = Mahalanobis()
x = np.array([[0, 10], [0.1, 0]])
x_ref = np.random.randn(100, 2)
with pytest.raises(NotFittedError) as err:
mahalanobis_detector.infer_threshold(x_ref, 0.1)
assert str(err.value) == 'Mahalanobis has not been fit!'
with pytest.raises(NotFittedError) as err:
mahalanobis_detector.score(x)
assert str(err.value) == 'Mahalanobis has not been fit!'
# test predict raises exception when not fitted
with pytest.raises(NotFittedError) as err:
mahalanobis_detector.predict(x)
assert str(err.value) == 'Mahalanobis has not been fit!'
def test_fitted_mahalanobis_score():
"""Test Mahalanobis detector score method.
Test Mahalanobis detector that has been fitted on reference data but has not had a threshold
inferred can still score data using the predict method. Test that it does not raise an error
but does not return `threshold`, `p_value` and `is_outlier` values.
"""
mahalanobis_detector = Mahalanobis()
x_ref = np.random.randn(100, 2)
mahalanobis_detector.fit(x_ref)
x = np.array([[0, 10], [0.1, 0]])
scores = mahalanobis_detector.score(x)
y = mahalanobis_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > 5
assert y['instance_score'][1] < 1
assert all(y['instance_score'] == scores)
assert not y['threshold_inferred']
assert y['threshold'] is None
assert y['is_outlier'] is None
assert y['p_value'] is None
def test_fitted_mahalanobis_predict():
"""Test Mahalanobis detector predict method.
Test Mahalanobis detector that has been fitted on reference data and has had a threshold
inferred can score data using the predict method as well as predict outliers. Test that it
returns `threshold`, `p_value` and `is_outlier` values.
"""
mahalanobis_detector = make_mahalanobis_detector()
x_ref = np.random.randn(100, 2)
mahalanobis_detector.infer_threshold(x_ref, 0.1)
x = np.array([[0, 10], [0, 0.1]])
y = mahalanobis_detector.predict(x)
y = y['data']
assert y['instance_score'][0] > 5
assert y['instance_score'][1] < 1
assert y['threshold_inferred']
assert y['threshold'] is not None
assert y['p_value'].all()
assert (y['is_outlier'] == [True, False]).all()
def test_mahalanobis_integration(tmp_path):
"""Test Mahalanobis detector on moons dataset.
Test Mahalanobis detector on a more complex 2d example. Test that the detector can be fitted
on reference data and infer a threshold. Test that it differentiates between inliers and outliers.
Test that the detector can be scripted.
"""
mahalanobis_detector = Mahalanobis()
X_ref, _ = make_moons(1001, shuffle=True, noise=0.05, random_state=None)
X_ref, x_inlier = X_ref[0:1000], X_ref[1000][None]
mahalanobis_detector.fit(X_ref)
mahalanobis_detector.infer_threshold(X_ref, 0.1)
result = mahalanobis_detector.predict(x_inlier)
result = result['data']['is_outlier'][0]
assert not result
x_outlier = np.array([[-1, 1.5]])
result = mahalanobis_detector.predict(x_outlier)
result = result['data']['is_outlier'][0]
assert result
ts_mahalanobis = torch.jit.script(mahalanobis_detector.backend)
x = torch.tensor([x_inlier[0], x_outlier[0]], dtype=torch.float32)
y = ts_mahalanobis(x)
assert torch.all(y == torch.tensor([False, True]))
ts_mahalanobis.save(tmp_path / 'mahalanobis.pt')
mahalanobis_detector = torch.load(tmp_path / 'mahalanobis.pt')
y = mahalanobis_detector(x)
assert torch.all(y == torch.tensor([False, True]))
| 4,177 | 35.973451 | 102 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/tests/test__mahalanobis/test__mahalanobis_backend.py
|
import pytest
import torch
import numpy as np
from alibi_detect.od.pytorch.mahalanobis import MahalanobisTorch
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
def test_mahalanobis_linear_scoring():
"""Test Mahalanobis detector linear scoring method.
Test that the Mahalanobis detector `_compute_linear_proj` method correctly whitens the x_ref data
and that the score method correctly orders different test points. Test that the detector correctly
detects true outliers and that the correct proportion of in distribution data is flagged as
outliers.
"""
mahalanobis_torch = MahalanobisTorch()
mean = [8, 8]
cov = [[2., 0.], [0., 1.]]
x_ref = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
mahalanobis_torch.fit(x_ref)
p = mahalanobis_torch._compute_linear_proj(mahalanobis_torch.x_ref)
# test that the x_ref is whitened by the data
assert p.mean() < 0.1
assert p.std() - 1 < 0.1
x_1 = torch.tensor([[8., 8.]])
scores_1 = mahalanobis_torch.score(x_1)
x_2 = torch.tensor(np.random.multivariate_normal(mean, cov, 1))
scores_2 = mahalanobis_torch.score(x_2)
x_3 = torch.tensor([[-10., 10.]])
scores_3 = mahalanobis_torch.score(x_3)
# test correct ordering of scores given outlyingness of data
assert scores_1 < scores_2 < scores_3
# test that detector correctly detects true Outlier
mahalanobis_torch.infer_threshold(x_ref, 0.01)
x = torch.cat((x_1, x_2, x_3))
outputs = mahalanobis_torch.predict(x)
assert torch.all(outputs.is_outlier == torch.tensor([False, False, True]))
assert torch.all(mahalanobis_torch(x) == torch.tensor([False, False, True]))
# test that 0.01 of the in distribution data is flagged as outliers
x = torch.tensor(np.random.multivariate_normal(mean, cov, 1000))
outputs = mahalanobis_torch.predict(x)
assert (outputs.is_outlier.sum()/1000) - 0.01 < 0.005
def test_mahalanobis_torch_backend_ts(tmp_path):
"""Test Mahalanobis detector backend is torch-scriptable and savable."""
mahalanobis_torch = MahalanobisTorch()
x = torch.randn((3, 10)) * torch.tensor([[1], [1], [100]])
x_ref = torch.randn((1024, 10))
mahalanobis_torch.fit(x_ref)
mahalanobis_torch.infer_threshold(x_ref, 0.1)
pred_1 = mahalanobis_torch(x)
mahalanobis_torch = torch.jit.script(mahalanobis_torch)
pred_2 = mahalanobis_torch(x)
assert torch.all(pred_1 == pred_2)
mahalanobis_torch.save(tmp_path / 'mahalanobis_torch.pt')
mahalanobis_torch = torch.load(tmp_path / 'mahalanobis_torch.pt')
pred_2 = mahalanobis_torch(x)
assert torch.all(pred_1 == pred_2)
def test_mahalanobis_torch_backend_fit_errors():
"""Test Mahalanobis detector backend fit errors.
Test that an unfit detector backend raises an error when calling predict or score. Test that the
detector backend raises an error when calling the forward method while the threshold has not been
inferred.
"""
mahalanobis_torch = MahalanobisTorch()
assert not mahalanobis_torch.fitted
x = torch.randn((1, 10))
with pytest.raises(NotFittedError) as err:
mahalanobis_torch(x)
assert str(err.value) == 'MahalanobisTorch has not been fit!'
with pytest.raises(NotFittedError) as err:
mahalanobis_torch.predict(x)
assert str(err.value) == 'MahalanobisTorch has not been fit!'
x_ref = torch.randn((1024, 10))
mahalanobis_torch.fit(x_ref)
assert mahalanobis_torch.fitted
with pytest.raises(ThresholdNotInferredError) as err:
mahalanobis_torch(x)
assert str(err.value) == 'MahalanobisTorch has no threshold set, call `infer_threshold` to fit one!'
assert mahalanobis_torch.predict(x)
| 3,764 | 36.277228 | 104 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/sklearn/base.py
|
from typing import List, Union, Optional, Dict
from dataclasses import dataclass, asdict
from abc import ABC, abstractmethod
from typing_extensions import Self
import numpy as np
from alibi_detect.exceptions import NotFittedError, ThresholdNotInferredError
@dataclass
class SklearnOutlierDetectorOutput:
"""Output of the outlier detector."""
threshold_inferred: bool
instance_score: np.ndarray
threshold: Optional[np.ndarray]
is_outlier: Optional[np.ndarray]
p_value: Optional[np.ndarray]
class FitMixinSklearn(ABC):
fitted = False
@abstractmethod
def fit(self, x_ref: np.ndarray) -> Self:
"""Abstract fit method.
Parameters
----------
x
`torch.Tensor` to fit object on.
"""
return self
def _set_fitted(self) -> Self:
"""Sets the fitted attribute to True.
Should be called within the object fit method.
"""
self.fitted = True
return self
def check_fitted(self):
"""Checks to make sure object has been fitted.
Raises
------
NotFittedError
Raised if method called and object has not been fit.
"""
if not self.fitted:
raise NotFittedError(self.__class__.__name__)
class SklearnOutlierDetector(FitMixinSklearn, ABC):
"""Base class for sklearn backend outlier detection algorithms."""
threshold_inferred = False
threshold = None
@abstractmethod
def score(self, x: np.ndarray) -> np.ndarray:
"""Score the data.
Parameters
----------
x
Data to score.
"""
pass
def check_threshold_inferred(self):
"""Check if threshold is inferred.
Raises
------
ThresholdNotInferredError
Raised if threshold is not inferred.
"""
if not self.threshold_inferred:
raise ThresholdNotInferredError(self.__class__.__name__)
@staticmethod
def _to_frontend_dtype(
arg: Union[np.ndarray, SklearnOutlierDetectorOutput]
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
"""Converts input to frontend data format.
This is an interface method that ensures that the output of the outlier detector is in a common format for
different backends. If `arg` is a `SklearnOutlierDetectorOutput` object, we unpack it into a `dict` and
return it.
Parameters
----------
x
Data to convert.
Returns
-------
`np.ndarray` or dictionary containing frontend compatible data.
"""
if isinstance(arg, SklearnOutlierDetectorOutput):
return asdict(arg)
return arg
@staticmethod
def _to_backend_dtype(x: Union[List, np.ndarray]) -> np.ndarray:
"""Converts data from the frontend to the backend format.
This is an interface method that ensures that the input of the chosen outlier detector backend is in the correct
format. In the case of the Sklearn backend, we ensure the data is a numpy array.
Parameters
----------
x
Data to convert.
"""
return np.asarray(x)
def _classify_outlier(self, scores: np.ndarray) -> Optional[np.ndarray]:
"""Classify the data as outlier or not.
Parameters
----------
scores
Scores to classify. Larger scores indicate more likely outliers.
Returns
-------
`np.ndarray` or ``None``
"""
if (self.threshold_inferred and self.threshold is not None):
return (scores > self.threshold).astype(int)
return None
def _p_vals(self, scores: np.ndarray) -> np.ndarray:
"""Compute p-values for the scores.
Parameters
----------
scores
Scores to compute p-values for.
Returns
-------
`np.ndarray` or ``None``
"""
return (1 + (scores[:, None] < self.val_scores).sum(-1))/len(self.val_scores) \
if self.threshold_inferred else None
def infer_threshold(self, x: np.ndarray, fpr: float) -> None:
"""Infer the threshold for the data. Prerequisite for outlier predictions.
Parameters
----------
x
Data to infer the threshold for.
fpr
False positive rate to use for threshold inference.
Raises
------
ValueError
Raised if `fpr` is not in ``(0, 1)``.
ValueError
Raised if `fpr` is less than ``1/len(x)``.
"""
if not 0 < fpr < 1:
raise ValueError('`fpr` must be in `(0, 1)`.')
if fpr < 1/len(x):
raise ValueError(f'`fpr` must be greater than `1/len(x)={1/len(x)}`.')
self.val_scores = self.score(x)
self.threshold = np.quantile(self.val_scores, 1-fpr, interpolation='higher') # type: ignore[call-overload]
self.threshold_inferred = True
def predict(self, x: np.ndarray) -> SklearnOutlierDetectorOutput:
"""Predict outlier labels for the data.
Computes the outlier scores. If the detector is not fit on reference data we raise an error.
If the threshold is inferred, the outlier labels and p-values are also computed and returned.
Otherwise, the outlier labels and p-values are set to ``None``.
Parameters
----------
x
Data to predict.
Returns
-------
`SklearnOutlierDetectorOutput`
Output of the outlier detector.
Raises
------
ValueError
Raised if the detector is not fit on reference data.
"""
self.check_fitted()
scores = self.score(x)
return SklearnOutlierDetectorOutput(
instance_score=scores,
is_outlier=self._classify_outlier(scores),
p_value=self._p_vals(scores),
threshold_inferred=self.threshold_inferred,
threshold=self.threshold
)
def __call__(self, x: np.ndarray) -> np.ndarray:
"""Classify outliers.
Parameters
----------
x
Data to classify.
"""
scores = self.score(x)
self.check_threshold_inferred()
return self._classify_outlier(scores)
| 6,383 | 28.018182 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/sklearn/gmm.py
|
import numpy as np
from typing import Dict
from sklearn.mixture import GaussianMixture
from alibi_detect.od.sklearn.base import SklearnOutlierDetector
class GMMSklearn(SklearnOutlierDetector):
def __init__(
self,
n_components: int,
):
"""sklearn backend for the Gaussian Mixture Model (GMM) outlier detector.
Parameters
----------
n_components
Number of components in gaussian mixture model.
Raises
------
ValueError
If `n_components` is less than 1.
"""
super().__init__()
if n_components < 1:
raise ValueError('n_components must be at least 1')
self.n_components = n_components
def fit( # type: ignore[override]
self,
x_ref: np.ndarray,
tol: float = 1e-3,
max_iter: int = 100,
n_init: int = 1,
init_params: str = 'kmeans',
verbose: int = 0,
) -> Dict:
"""Fit the SKLearn GMM model`.
Parameters
----------
x_ref
Reference data.
tol
Convergence threshold. EM iterations will stop when the lower bound average gain is below this threshold.
max_iter
Maximum number of EM iterations to perform.
n_init
Number of initializations to perform.
init_params
Method used to initialize the weights, the means and the precisions. Must be one of:
'kmeans' : responsibilities are initialized using kmeans.
'kmeans++' : responsibilities are initialized using kmeans++.
'random' : responsibilities are initialized randomly.
'random_from_data' : responsibilities are initialized randomly from the data.
verbose
Enable verbose output. If 1 then it prints the current initialization and each iteration step. If greater
than 1 then it prints also the log probability and the time needed for each step.
Returns
-------
Dictionary with fit results. The dictionary contains the following keys:
- converged: bool indicating whether EM algorithm converged.
- n_iter: number of EM iterations performed.
- lower_bound: log-likelihood lower bound.
"""
self.gmm = GaussianMixture(
n_components=self.n_components,
tol=tol,
max_iter=max_iter,
n_init=n_init,
init_params=init_params,
verbose=verbose,
)
self.gmm = self.gmm.fit(
x_ref,
)
self._set_fitted()
return {
'converged': self.gmm.converged_,
'n_iter': self.gmm.n_iter_,
'lower_bound': self.gmm.lower_bound_
}
def format_fit_kwargs(self, fit_kwargs: Dict) -> Dict:
"""Format kwargs for `fit` method.
Parameters
----------
kwargs
dictionary of Kwargs to format. See `fit` method for details.
Returns
-------
Formatted kwargs.
"""
return dict(
tol=fit_kwargs.get('tol', 1e-3),
max_iter=(lambda v: 100 if v is None else v)(fit_kwargs.get('max_epochs', None)),
n_init=fit_kwargs.get('n_init', 1),
init_params=fit_kwargs.get('init_params', 'kmeans'),
verbose=fit_kwargs.get('verbose', 0),
)
def score(self, x: np.ndarray) -> np.ndarray:
"""Computes the score of `x`
Parameters
----------
x
`np.ndarray` with leading batch dimension.
Returns
-------
`np.ndarray` of scores with leading batch dimension.
Raises
------
NotFittedError
Raised if method called and detector has not been fit.
"""
self.check_fitted()
return - self.gmm.score_samples(x)
| 3,928 | 30.432 | 117 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/sklearn/__init__.py
|
from alibi_detect.od.sklearn.gmm import GMMSklearn # noqa: F401
| 65 | 32 | 64 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/pytorch/svm.py
|
import warnings
from typing import Callable, Dict, Optional, Tuple, Union
import numpy as np
import torch
from sklearn.linear_model import SGDOneClassSVM
from sklearn.utils.extmath import safe_sparse_dot
from tqdm import tqdm
from typing_extensions import Literal, Self
from alibi_detect.od.pytorch.base import TorchOutlierDetector
from alibi_detect.utils.pytorch.losses import hinge_loss
from alibi_detect.utils.pytorch.kernels import GaussianRBF
class SVMTorch(TorchOutlierDetector):
ensemble = False
def __init__(
self,
nu: float,
kernel: 'torch.nn.Module' = None,
n_components: Optional[int] = None,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""Pytorch backend for the Support Vector Machine (SVM) outlier detector.
Parameters
----------
nu
The proportion of the training data that should be considered outliers. Note that this does
not necessarily correspond to the false positive rate on test data, which is still defined when
calling the `infer_threshold` method.
kernel
Kernel function to use for outlier detection.
n_components
Number of components in the Nystroem approximation, by default uses all of them.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
"""
super().__init__(device=device)
self.n_components = n_components
if kernel is None:
kernel = GaussianRBF()
self.kernel = kernel
self.nystroem = _Nystroem(
self.kernel,
self.n_components
)
self.nu = nu
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Detect if `x` is an outlier.
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor` of ``bool`` values with leading batch dimension.
Raises
------
ThresholdNotInferredException
If called before detector has had `infer_threshold` method called.
"""
scores = self.score(x)
if not torch.jit.is_scripting():
self.check_threshold_inferred()
preds = scores > self.threshold
return preds
class SgdSVMTorch(SVMTorch):
ensemble = False
def __init__(
self,
nu: float,
kernel: 'torch.nn.Module' = None,
n_components: Optional[int] = None,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""SGD Optimization backend for the One class support vector machine (SVM) outlier detector.
Parameters
----------
nu
The proportion of the training data that should be considered outliers. Note that this does
not necessarily correspond to the false positive rate on test data, which is still defined when
calling the `infer_threshold` method.
kernel
Kernel function to use for outlier detection.
n_components
Number of components in the Nystroem approximation, by default uses all of them.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
"""
if (isinstance(device, str) and device in ('gpu', 'cuda')) or \
(isinstance(device, torch.device) and device.type == 'cuda'):
warnings.warn(('If using the `sgd` optimization option with GPU then only the Nystroem approximation'
' portion of the method will utilize the GPU. Consider using the `bgd` option which will'
' run everything on the GPU.'))
super().__init__(
device=device,
n_components=n_components,
kernel=kernel,
nu=nu,
)
def fit( # type: ignore[override]
self,
x_ref: torch.Tensor,
tol: float = 1e-6,
max_iter: int = 1000,
verbose: int = 0,
) -> Dict:
"""Fit the Nystroem approximation and Sklearn `SGDOneClassSVM` SVM model.
Parameters
----------
x_ref
Training data.
tol
The decrease in loss required over the previous ``n_iter_no_change`` iterations in order to
continue optimizing.
max_iter
The maximum number of optimization steps.
verbose
Verbosity level during training. ``0`` is silent, ``1`` a progress bar.
Returns
-------
Dictionary with fit results. The dictionary contains the following keys:
- converged: `bool` indicating whether training converged.
- n_iter: number of iterations performed.
"""
x_nys = self.nystroem.fit(x_ref).transform(x_ref)
self.svm = SGDOneClassSVM(
tol=tol,
max_iter=max_iter,
verbose=verbose,
nu=self.nu
)
x_nys = x_nys.cpu().numpy()
self.svm = self.svm.fit(x_nys)
self._set_fitted()
return {
'converged': self.svm.n_iter_ < max_iter,
'n_iter': self.svm.n_iter_,
}
def format_fit_kwargs(self, fit_kwargs: Dict) -> Dict:
"""Format kwargs for `fit` method.
Parameters
----------
fit_kwargs
dictionary of Kwargs to format. See `fit` method for details.
Returns
-------
Formatted kwargs.
"""
return dict(
tol=fit_kwargs.get('tol', 1e-3),
max_iter=fit_kwargs.get('max_iter', 1000),
verbose=fit_kwargs.get('verbose', 0),
)
def score(self, x: torch.Tensor) -> torch.Tensor:
"""Computes the score of `x`
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor¬` of scores with leading batch dimension.
Raises
------
NotFittedError
Raised if method called and detector has not been fit.
"""
self.check_fitted()
x_nys = self.nystroem.transform(x)
x_nys = x_nys.cpu().numpy()
coef_ = self.svm.coef_ / (self.svm.coef_ ** 2).sum()
x_nys = self.svm._validate_data(x_nys, accept_sparse="csr", reset=False)
result = safe_sparse_dot(x_nys, coef_.T, dense_output=True).ravel()
return - self._to_backend_dtype(result)
class BgdSVMTorch(SVMTorch):
ensemble = False
def __init__(
self,
nu: float,
kernel: 'torch.nn.Module' = None,
n_components: Optional[int] = None,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""Pytorch backend for the Support Vector Machine (SVM) outlier detector.
Parameters
----------
nu
The proportion of the training data that should be considered outliers. Note that this does
not necessarily correspond to the false positive rate on test data, which is still defined when
calling the `infer_threshold` method.
kernel
Kernel function to use for outlier detection.
n_components
Number of components in the Nystroem approximation, by default uses all of them.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
"""
if (isinstance(device, str) and device == 'cpu') or \
(isinstance(device, torch.device) and device.type == 'cpu'):
warnings.warn(('The `bgd` optimization option is best suited for GPU. If '
'you want to use CPU, consider using the `sgd` option.'))
super().__init__(
device=device,
n_components=n_components,
kernel=kernel,
nu=nu,
)
def fit( # type: ignore[override]
self,
x_ref: torch.Tensor,
step_size_range: Tuple[float, float] = (1e-8, 1.0),
n_step_sizes: int = 16,
tol: float = 1e-6,
n_iter_no_change: int = 25,
max_iter: int = 1000,
verbose: int = 0,
) -> Dict:
"""Fit the Nystroem approximation and python SVM model.
Parameters
----------
x_ref
Training data.
step_size_range
The range of values to be considered for the gradient descent step size at each iteration. This is
specified as a tuple of the form `(min_eta, max_eta)`.
n_step_sizes
The number of step sizes in the defined range to be tested for loss reduction. This many points are spaced
equidistantly along the range in log space.
tol
The decrease in loss required over the previous n_iter_no_change iterations in order to continue optimizing.
n_iter_no_change
The number of iterations over which the loss must decrease by `tol` in order for optimization to continue.
max_iter
The maximum number of optimization steps.
verbose
Verbosity level during training. ``0`` is silent, ``1`` a progress bar.
Returns
-------
Dictionary with fit results. The dictionary contains the following keys:
- converged: `bool` indicating whether training converged.
- n_iter: number of iterations performed.
- lower_bound: loss lower bound.
"""
x_nys = self.nystroem.fit(x_ref).transform(x_ref)
n, d = x_nys.shape
min_eta, max_eta = step_size_range
etas = torch.tensor(
np.linspace(
np.log(min_eta),
np.log(max_eta),
n_step_sizes
),
dtype=x_nys.dtype,
device=self.device
).exp()
# Initialise coeffs/preds/loss
coeffs = torch.zeros(d, dtype=x_nys.dtype, device=self.device)
intercept = torch.zeros(1, dtype=x_nys.dtype, device=self.device)
preds = x_nys @ coeffs + intercept
loss = self.nu * (coeffs.square().sum()/2 + intercept) + hinge_loss(preds)
min_loss, min_loss_coeffs, min_loss_intercept = loss, coeffs, intercept
iter, t_since_improv = 0, 0
converged = False
with tqdm(total=max_iter, disable=not verbose) as pbar:
while not converged:
pbar.update(1)
# First two lines give form of sgd update (for each candidate step size)
sup_vec_inds = (preds < 1)
cand_coeffs = coeffs[:, None] * \
(1-etas*self.nu) + etas*(x_nys[sup_vec_inds].sum(0)/n)[:, None]
cand_intercept = intercept - etas*self.nu + (sup_vec_inds.sum()/n)
# Compute loss for each candidate step size and choose the best
cand_preds = x_nys @ cand_coeffs + cand_intercept
cand_losses = self.nu * (cand_coeffs.square().sum(0)/2 + cand_intercept) + hinge_loss(cand_preds)
best_step_size = cand_losses.argmin()
coeffs, intercept = cand_coeffs[:, best_step_size], cand_intercept[best_step_size]
preds, loss = cand_preds[:, best_step_size], cand_losses[best_step_size]
# Keep track of best performing coefficients and time since improving (by more than tol)
if loss < min_loss:
if loss < min_loss - tol:
t_since_improv = 0
min_loss, min_loss_coeffs, min_loss_intercept = loss, coeffs, intercept
else:
t_since_improv += 1
# Decide whether to continue
if iter > max_iter or t_since_improv > n_iter_no_change:
self.coeffs = min_loss_coeffs
self.intercept = min_loss_intercept
converged = True
break
else:
iter += 1
if verbose and isinstance(pbar, tqdm):
pbar.set_postfix(dict(loss=loss.cpu().detach().numpy().item()))
self._set_fitted()
return {
'converged': converged,
'lower_bound': self._to_frontend_dtype(min_loss),
'n_iter': iter
}
def format_fit_kwargs(self, fit_kwargs: Dict) -> Dict:
"""Format kwargs for `fit` method.
Parameters
----------
fit_kwargs
dictionary of Kwargs to format. See `fit` method for details.
Returns
-------
Formatted kwargs.
"""
return dict(
step_size_range=fit_kwargs.get('step_size_range', (1e-8, 1.0)),
n_iter_no_change=fit_kwargs.get('n_iter_no_change', 25),
tol=fit_kwargs.get('tol', 1e-6),
verbose=fit_kwargs.get('verbose', 0),
n_step_sizes=fit_kwargs.get('n_step_sizes', 16),
max_iter=fit_kwargs.get('max_iter', 1000)
)
def score(self, x: torch.Tensor) -> torch.Tensor:
"""Computes the score of `x`
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor` of scores with leading batch dimension.
Raises
------
NotFittedError
Raised if method called and detector has not been fit.
"""
if not torch.jit.is_scripting():
self.check_fitted()
x_nys = self.nystroem.transform(x)
coeffs = torch.nn.functional.normalize(self.coeffs, dim=-1)
preds = x_nys @ coeffs
return -preds
class _Nystroem:
def __init__(
self,
kernel: Callable,
n_components: Optional[int] = None
) -> None:
"""Nystroem Approximation of a kernel.
Parameters
----------
kernel
Kernel function.
n_components
Number of components in the Nystroem approximation. By default uses all of them.
"""
self.kernel = kernel
self.n_components = n_components
def fit(
self,
x: torch.Tensor
) -> Self:
"""Fit the Nystroem approximation.
Parameters
----------
x
`torch.Tensor` of shape ``(n, d)`` where ``n`` is the number of samples and ``d`` is the dimensionality of
the data.
"""
n = len(x)
n_components = n if self.n_components is None else self.n_components
inds = torch.randperm(n)[:n_components]
self.z = x[inds]
K_zz = self.kernel(self.z, self.z)
K_zz += 1e-16 + torch.eye(n_components, device=K_zz.device)
U, S, V = torch.linalg.svd(K_zz)
self.K_zz_root_inv = (U / S.sqrt()) @ V
return self
def transform(
self,
x: torch.Tensor
) -> torch.Tensor:
"""Transform `x` into the Nystroem approximation.
Parameters
----------
x
`torch.Tensor` of shape ``(n, d)`` where ``n`` is the number of samples and ``d`` is the dimensionality of
the data.
Returns
-------
`torch.Tensor` of shape ``(n, n_components)`` where ``n_components`` is the number of components in the
Nystroem approximation.
"""
K_xz = self.kernel(x, self.z)
return K_xz @ self.K_zz_root_inv
| 15,972 | 34.416851 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/pytorch/base.py
|
from typing import List, Union, Optional, Dict
from typing_extensions import Literal
from dataclasses import dataclass, fields
from abc import ABC, abstractmethod
import numpy as np
import torch
from alibi_detect.od.pytorch.ensemble import FitMixinTorch
from alibi_detect.utils.pytorch.misc import get_device
from alibi_detect.exceptions import ThresholdNotInferredError
@dataclass
class TorchOutlierDetectorOutput:
"""Output of the outlier detector."""
threshold_inferred: bool
instance_score: torch.Tensor
threshold: Optional[torch.Tensor]
is_outlier: Optional[torch.Tensor]
p_value: Optional[torch.Tensor]
def to_frontend_dtype(self):
result = {}
for f in fields(self):
value = getattr(self, f.name)
if isinstance(value, torch.Tensor):
result[f.name] = value.cpu().detach().numpy()
else:
result[f.name] = value
return result
def _tensor_to_frontend_dtype(x: Union[torch.Tensor, np.ndarray, float]) -> Union[np.ndarray, float]:
if isinstance(x, torch.Tensor):
x = x.cpu().detach().numpy()
if isinstance(x, np.ndarray) and x.ndim == 0:
x = x.item()
return x # type: ignore[return-value]
def _raise_type_error(x):
raise TypeError(f'x is type={type(x)} but must be one of TorchOutlierDetectorOutput or a torch Tensor')
def to_frontend_dtype(x: Union[torch.Tensor, TorchOutlierDetectorOutput]) -> Union[np.ndarray, Dict[str, np.ndarray]]:
"""Converts any `torch` tensors found in input to `numpy` arrays.
Takes a `torch` tensor or `TorchOutlierDetectorOutput` and converts any `torch` tensors found to `numpy` arrays
Parameters
----------
x
Data to convert.
Returns
-------
`np.ndarray` or dictionary of containing `numpy` arrays
"""
return {
'TorchOutlierDetectorOutput': lambda x: x.to_frontend_dtype(),
'Tensor': _tensor_to_frontend_dtype
}.get(
x.__class__.__name__,
_raise_type_error
)(x)
class TorchOutlierDetector(torch.nn.Module, FitMixinTorch, ABC):
"""Base class for torch backend outlier detection algorithms."""
threshold_inferred = False
threshold = None
def __init__(
self,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
self.device = get_device(device)
super().__init__()
@abstractmethod
def score(self, x: torch.Tensor) -> torch.Tensor:
"""Score the data.
Parameters
----------
x
Data to score.
"""
pass
@torch.jit.unused
def check_threshold_inferred(self):
"""Check if threshold is inferred.
Raises
------
ThresholdNotInferredError
Raised if threshold is not inferred.
"""
if not self.threshold_inferred:
raise ThresholdNotInferredError(self.__class__.__name__)
@staticmethod
def _to_frontend_dtype(
arg: Union[torch.Tensor, TorchOutlierDetectorOutput]
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
"""Converts input to frontend data format.
This is an interface method that ensures that the output of the outlier detector is in a common format for
different backends. Mostly this means converting `torch.tensors` to `np.ndarray`. If `arg` is a
`TorchOutlierDetectorOutput` object, we call its `to_frontend_dtype` method. Otherwise, if `arg` is a
`torch.Tensor`, we convert it to a `numpy` array.
Parameters
----------
x
Data to convert.
Returns
-------
`np.ndarray` or dictionary of containing `numpy` arrays
"""
return to_frontend_dtype(arg)
def _to_backend_dtype(self, x: Union[List, np.ndarray]) -> torch.Tensor:
"""Converts data from the frontend to the backend format.
This is an interface method that ensures that the input of the chosen outlier detector backend is in the correct
format.
Parameters
----------
x
Data to convert.
"""
return torch.as_tensor(x, dtype=torch.float32, device=self.device)
def _ensembler(self, x: torch.Tensor) -> torch.Tensor:
"""Aggregates and normalizes the data
If the detector has an ensembler attribute we use it to aggregate and normalize the data.
Parameters
----------
x
Data to aggregate and normalize.
Returns
-------
`torch.Tensor` or original data without alteration
Raises
------
ThresholdNotInferredError
If the detector is an ensemble, and the ensembler used to aggregate the outlier scores has a fittable
component, then the detector threshold must be inferred before predictions can be made. This is because
while the scoring functionality of the detector is fit within the `.fit` method on the training data
the ensembler has to be fit on the validation data along with the threshold and this is done in the
`.infer_threshold` method.
"""
if hasattr(self, 'ensembler') and self.ensembler is not None:
# `type: ignore` here because self.ensembler here causes an error with mypy when using torch.jit.script.
# For some reason it thinks self.ensembler is a torch.Tensor and therefore is not callable.
if not torch.jit.is_scripting():
if not self.ensembler.fitted: # type: ignore
self.check_threshold_inferred()
return self.ensembler(x) # type: ignore
else:
return x
def _classify_outlier(self, scores: torch.Tensor) -> torch.Tensor:
"""Classify the data as outlier or not.
Parameters
----------
scores
Scores to classify. Larger scores indicate more likely outliers.
Returns
-------
`torch.Tensor` or ``None``
"""
return (scores > self.threshold).to(torch.int8) if self.threshold_inferred else None
def _p_vals(self, scores: torch.Tensor) -> torch.Tensor:
"""Compute p-values for the scores.
Parameters
----------
scores
Scores to compute p-values for.
Returns
-------
`torch.Tensor` or ``None``
"""
return (1 + (scores[:, None] < self.val_scores).sum(-1))/len(self.val_scores) \
if self.threshold_inferred else None
def infer_threshold(self, x: torch.Tensor, fpr: float):
"""Infer the threshold for the data. Prerequisite for outlier predictions.
Parameters
----------
x
Data to infer the threshold for.
fpr
False positive rate to use for threshold inference.
Raises
------
ValueError
Raised if `fpr` is not in ``(0, 1)``.
ValueError
Raised if `fpr` is less than ``1/len(x)``.
"""
if not 0 < fpr < 1:
raise ValueError('`fpr` must be in `(0, 1)`.')
if fpr < 1/len(x):
raise ValueError(f'`fpr` must be greater than `1/len(x)={1/len(x)}`.')
self.val_scores = self.score(x)
if self.ensemble:
self.val_scores = self.ensembler.fit(self.val_scores).transform(self.val_scores) # type: ignore
self.threshold = torch.quantile(self.val_scores, 1-fpr, interpolation='higher')
self.threshold_inferred = True
def predict(self, x: torch.Tensor) -> TorchOutlierDetectorOutput:
"""Predict outlier labels for the data.
Computes the outlier scores. If the detector is not fit on reference data we raise an error.
If the threshold is inferred, the outlier labels and p-values are also computed and returned.
Otherwise, the outlier labels and p-values are set to ``None``.
Parameters
----------
x
Data to predict.
Returns
-------
Output of the outlier detector. Includes the p-values, outlier labels, instance scores and threshold.
Raises
------
ValueError
Raised if the detector is not fit on reference data.
"""
self.check_fitted()
raw_scores = self.score(x)
scores = self._ensembler(raw_scores)
return TorchOutlierDetectorOutput(
instance_score=scores,
is_outlier=self._classify_outlier(scores),
p_value=self._p_vals(scores),
threshold_inferred=self.threshold_inferred,
threshold=self.threshold
)
| 8,752 | 32.408397 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/pytorch/knn.py
|
from typing import Optional, Union, List, Tuple
from typing_extensions import Literal
import numpy as np
import torch
from alibi_detect.od.pytorch.ensemble import Ensembler
from alibi_detect.od.pytorch.base import TorchOutlierDetector
class KNNTorch(TorchOutlierDetector):
def __init__(
self,
k: Union[np.ndarray, List, Tuple, int],
kernel: Optional[torch.nn.Module] = None,
ensembler: Optional[Ensembler] = None,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""PyTorch backend for KNN detector.
Parameters
----------
k
Number of nearest neighbors to compute distance to. `k` can be a single value or
an array of integers. If `k` is a single value the outlier score is the distance/kernel
similarity to the `k`-th nearest neighbor. If `k` is a list then it returns the distance/kernel
similarity to each of the specified `k` neighbors.
kernel
If a kernel is specified then instead of using `torch.cdist` the kernel defines the `k` nearest
neighbor distance.
ensembler
If `k` is an array of integers then the ensembler must not be ``None``. Should be an instance
of :py:obj:`alibi_detect.od.pytorch.ensemble.ensembler`. Responsible for combining
multiple scores into a single score.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``.
"""
super().__init__(device=device)
self.kernel = kernel
self.ensemble = isinstance(k, (np.ndarray, list, tuple))
self.ks = torch.tensor(k) if self.ensemble else torch.tensor([k], device=self.device)
self.ensembler = ensembler
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Detect if `x` is an outlier.
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor` of ``bool`` values with leading batch dimension.
Raises
------
ThresholdNotInferredError
If called before detector has had `infer_threshold` method called.
"""
raw_scores = self.score(x)
scores = self._ensembler(raw_scores)
if not torch.jit.is_scripting():
self.check_threshold_inferred()
preds = scores > self.threshold
return preds
def score(self, x: torch.Tensor) -> torch.Tensor:
"""Computes the score of `x`
Parameters
----------
x
The tensor of instances. First dimension corresponds to batch.
Returns
-------
Tensor of scores for each element in `x`.
Raises
------
NotFittedError
If called before detector has been fit.
"""
self.check_fitted()
K = -self.kernel(x, self.x_ref) if self.kernel is not None else torch.cdist(x, self.x_ref)
bot_k_dists = torch.topk(K, int(torch.max(self.ks)), dim=1, largest=False)
all_knn_dists = bot_k_dists.values[:, self.ks-1]
return all_knn_dists if self.ensemble else all_knn_dists[:, 0]
def fit(self, x_ref: torch.Tensor):
"""Fits the detector
Parameters
----------
x_ref
The Dataset tensor.
"""
self.x_ref = x_ref
self._set_fitted()
| 3,635 | 34.647059 | 107 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/pytorch/gmm.py
|
from typing import Optional, Union, Dict, Type
from typing_extensions import Literal
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from alibi_detect.utils.pytorch.data import TorchDataset
from alibi_detect.od.pytorch.base import TorchOutlierDetector
from alibi_detect.models.pytorch.gmm import GMMModel
from alibi_detect.utils.pytorch.misc import get_optimizer
class GMMTorch(TorchOutlierDetector):
ensemble = False
def __init__(
self,
n_components: int,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""Pytorch backend for the Gaussian Mixture Model (GMM) outlier detector.
Parameters
----------
n_components
Number of components in gaussian mixture model.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
Raises
------
ValueError
If `n_components` is less than 1.
"""
super().__init__(device=device)
if n_components < 1:
raise ValueError('n_components must be at least 1')
self.n_components = n_components
def fit( # type: ignore[override]
self,
x_ref: torch.Tensor,
optimizer: Type[torch.optim.Optimizer] = torch.optim.Adam,
learning_rate: float = 0.1,
max_epochs: int = 10,
batch_size: int = 32,
tol: float = 1e-3,
n_iter_no_change: int = 25,
verbose: int = 0,
) -> Dict:
"""Fit the GMM model.
Parameters
----------
x_ref
Training data.
optimizer
Optimizer used to train the model.
learning_rate
Learning rate used to train the model.
max_epochs
Maximum number of training epochs.
batch_size
Batch size used to train the model.
tol
Convergence threshold. Training iterations will stop when the lower bound average
gain is below this threshold.
n_iter_no_change
The number of iterations over which the loss must decrease by `tol` in order for
optimization to continue.
verbose
Verbosity level during training. 0 is silent, 1 a progress bar.
Returns
-------
Dictionary with fit results. The dictionary contains the following keys:
- converged: bool indicating whether training converged.
- n_epochs: number of gradient descent iterations performed.
- lower_bound: log-likelihood lower bound.
"""
self.model = GMMModel(self.n_components, x_ref.shape[-1]).to(self.device)
x_ref = x_ref.to(torch.float32)
batch_size = len(x_ref) if batch_size is None else batch_size
dataset = TorchDataset(x_ref)
dataloader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=True
)
optimizer_instance: torch.optim.Optimizer = optimizer( # type: ignore[call-arg]
self.model.parameters(),
lr=learning_rate
)
self.model.train()
min_loss = None
converged = False
epoch = 0
while not converged and epoch < max_epochs:
epoch += 1
dl = tqdm(
enumerate(dataloader),
total=len(dataloader),
disable=not verbose
)
loss_ma = 0
for step, x in dl:
x = x.to(self.device)
nll = self.model(x).mean()
optimizer_instance.zero_grad()
nll.backward()
optimizer_instance.step()
if verbose and isinstance(dl, tqdm):
loss_ma = loss_ma + (nll.item() - loss_ma) / (step + 1)
dl.set_description(f'Epoch {epoch + 1}/{max_epochs}')
dl.set_postfix(dict(loss_ma=loss_ma))
if min_loss is None or nll < min_loss - tol:
t_since_improv = 0
min_loss = nll
else:
t_since_improv += 1
if t_since_improv > n_iter_no_change:
converged = True
break
self._set_fitted()
return {
'converged': converged,
'lower_bound': self._to_frontend_dtype(min_loss),
'n_epochs': epoch
}
def format_fit_kwargs(self, fit_kwargs: Dict) -> Dict:
"""Format kwargs for `fit` method.
Parameters
----------
kwargs
dictionary of Kwargs to format. See `fit` method for details.
Returns
-------
Formatted kwargs.
"""
return dict(
optimizer=get_optimizer(fit_kwargs.get('optimizer')),
learning_rate=fit_kwargs.get('learning_rate', 0.1),
batch_size=fit_kwargs.get('batch_size', None),
max_epochs=(lambda v: 10 if v is None else v)(fit_kwargs.get('max_epochs', None)),
verbose=fit_kwargs.get('verbose', 0),
tol=fit_kwargs.get('tol', 1e-3),
n_iter_no_change=fit_kwargs.get('n_iter_no_change', 25)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Detect if `x` is an outlier.
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor` of ``bool`` values with leading batch dimension.
Raises
------
ThresholdNotInferredException
If called before detector has had `infer_threshold` method called.
"""
scores = self.score(x)
if not torch.jit.is_scripting():
self.check_threshold_inferred()
preds = scores > self.threshold
return preds
def score(self, x: torch.Tensor) -> torch.Tensor:
"""Computes the score of `x`
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor` of scores with leading batch dimension.
Raises
------
NotFittedError
Raised if method called and detector has not been fit.
"""
if not torch.jit.is_scripting():
self.check_fitted()
x = x.to(torch.float32)
preds = self.model(x.to(self.device))
return preds
| 6,651 | 31.291262 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/pytorch/pca.py
|
from typing import Optional, Union, Callable
from typing_extensions import Literal
import torch
from alibi_detect.od.pytorch.base import TorchOutlierDetector
class PCATorch(TorchOutlierDetector):
ensemble = False
def __init__(
self,
n_components: int,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""PyTorch backend for PCA detector.
Parameters
----------
n_components:
The number of dimensions in the principal subspace. For linear PCA should have
``1 <= n_components < dim(data)``. For kernel pca should have ``1 <= n_components < len(data)``.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
Raises
------
ValueError
If `n_components` is less than 1.
"""
super().__init__(device=device)
self.n_components = n_components
if n_components < 1:
raise ValueError('n_components must be at least 1')
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Detect if `x` is an outlier.
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor` of ``bool`` values with leading batch dimension.
Raises
------
ThresholdNotInferredException
If called before detector has had `infer_threshold` method called.
"""
scores = self.score(x)
if not torch.jit.is_scripting():
self.check_threshold_inferred()
preds = scores > self.threshold
return preds
def score(self, x: torch.Tensor) -> torch.Tensor:
"""Computes the score of `x`
Parameters
----------
x
The tensor of instances. First dimension corresponds to batch.
Returns
-------
Tensor of scores for each element in `x`.
Raises
------
NotFitException
If called before detector has been fit.
"""
self.check_fitted()
score = self._score(x)
return score
def fit(self, x_ref: torch.Tensor) -> None:
"""Fits the PCA detector.
Parameters
----------
x_ref
The Dataset tensor.
"""
self.pcs = self._fit(x_ref)
self._set_fitted()
def _fit(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
def _score(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
class LinearPCATorch(PCATorch):
def __init__(
self,
n_components: int,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""Linear variant of the PyTorch backend for PCA detector.
Parameters
----------
n_components:
The number of dimensions in the principal subspace.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
"""
super().__init__(device=device, n_components=n_components)
def _fit(self, x: torch.Tensor) -> torch.Tensor:
"""Compute the principal components of the reference data.
We compute the principal components of the reference data using the covariance matrix and then
remove the largest `n_components` eigenvectors. The remaining eigenvectors correspond to the
invariant dimensions of the data. Changes in these dimensions are used to compute the outlier
score which is the distance to the principal subspace spanned by the first `n_components`
eigenvectors.
Parameters
----------
x
The reference data.
Returns
-------
The principal components of the reference data.
Raises
------
ValueError
If `n_components` is greater than or equal to number of features
"""
if self.n_components >= x.shape[1]:
raise ValueError("n_components must be less than the number of features.")
self.x_ref_mean = x.mean(0)
x -= self.x_ref_mean
cov_mat = (x.t() @ x)/(len(x)-1)
_, V = torch.linalg.eigh(cov_mat)
return V[:, :-self.n_components]
def _score(self, x: torch.Tensor) -> torch.Tensor:
"""Compute the outlier score.
Centers the data and projects it onto the principal components. The score is then the sum of the
squared projections.
Parameters
----------
x
The test data.
Returns
-------
The outlier score.
"""
x_cen = x - self.x_ref_mean
x_pcs = x_cen @ self.pcs
return (x_pcs**2).sum(1)
class KernelPCATorch(PCATorch):
def __init__(
self,
n_components: int,
kernel: Optional[Callable],
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""Kernel variant of the PyTorch backend for PCA detector.
Parameters
----------
n_components:
The number of dimensions in the principal subspace.
kernel
Kernel function to use for outlier detection.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
"""
super().__init__(device=device, n_components=n_components)
self.kernel = kernel
def _fit(self, x: torch.Tensor) -> torch.Tensor:
"""Compute the principal components of the reference data.
We compute the principal components of the reference data using the kernel matrix and then
return the largest `n_components` eigenvectors. These are then normalized to have length
equal to `1/eigenvalue`. Note that this differs from the linear case where we remove the
largest eigenvectors.
Parameters
----------
x
The reference data.
Returns
-------
The principal components of the reference data.
Raises
------
ValueError
If `n_components` is greater than or equal to the number of reference samples.
"""
if self.n_components >= x.shape[0]:
raise ValueError("n_components must be less than the number of reference instances.")
self.x_ref = x
K = self.compute_kernel_mat(x)
D, V = torch.linalg.eigh(K)
pcs = V / torch.sqrt(D)[None, :]
return pcs[:, -self.n_components:]
def _score(self, x: torch.Tensor) -> torch.Tensor:
"""Compute the outlier score.
Centers the data and projects it onto the principal components. The score is then the sum of the
squared projections.
Parameters
----------
x
The test data.
Returns
-------
The outlier score.
"""
k_xr = self.kernel(x, self.x_ref)
k_xr_row_sums = k_xr.sum(1)
n, m = k_xr.shape
k_xr_cen = k_xr - self.k_col_sums[None, :]/m - k_xr_row_sums[:, None]/n + self.k_sum/(m*n)
x_pcs = k_xr_cen @ self.pcs
scores = -2 * k_xr.mean(-1) - (x_pcs**2).sum(1)
return scores
def compute_kernel_mat(self, x: torch.Tensor) -> torch.Tensor:
"""Computes the centered kernel matrix.
Parameters
----------
x
The reference data.
Returns
-------
The centered kernel matrix.
"""
n = len(x)
k = self.kernel(x, x)
self.k_col_sums = k.sum(0)
k_row_sums = k.sum(1)
self.k_sum = k_row_sums.sum()
k_cen = k - self.k_col_sums[None, :]/n - k_row_sums[:, None]/n + self.k_sum/(n**2)
return k_cen
| 8,313 | 30.255639 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/pytorch/__init__.py
|
from alibi_detect.utils.missing_optional_dependency import import_optional
KNNTorch = import_optional('alibi_detect.od.pytorch.knn', ['KNNTorch'])
LOFTorch = import_optional('alibi_detect.od.pytorch.lof', ['LOFTorch'])
MahalanobisTorch = import_optional('alibi_detect.od.pytorch.mahalanobis', ['MahalanobisTorch'])
KernelPCATorch, LinearPCATorch = import_optional('alibi_detect.od.pytorch.pca', ['KernelPCATorch', 'LinearPCATorch'])
Ensembler = import_optional('alibi_detect.od.pytorch.ensemble', ['Ensembler'])
GMMTorch = import_optional('alibi_detect.od.pytorch.gmm', ['GMMTorch'])
BgdSVMTorch, SgdSVMTorch = import_optional('alibi_detect.od.pytorch.svm', ['BgdSVMTorch', 'SgdSVMTorch'])
| 691 | 68.2 | 117 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/pytorch/ensemble.py
|
from abc import ABC, abstractmethod
from typing import Optional
from typing_extensions import Self
import torch
import numpy as np
from torch.nn import Module
from alibi_detect.exceptions import NotFittedError
class BaseTransformTorch(Module):
def __init__(self):
"""Base Transform class.
provides abstract methods for transform objects that map `torch` tensors.
"""
super().__init__()
def transform(self, x: torch.Tensor):
"""Public transform method.
Parameters
----------
x
`torch.Tensor` array to be transformed
"""
raise NotImplementedError()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.transform(x)
class FitMixinTorch(ABC):
fitted = False
@abstractmethod
def fit(self, x_ref: torch.Tensor) -> Self:
"""Abstract fit method.
Parameters
----------
x
`torch.Tensor` to fit object on.
"""
pass
def _set_fitted(self) -> Self:
"""Sets the fitted attribute to True.
Should be called within each transform method.
"""
self.fitted = True
return self
def check_fitted(self):
"""Checks to make sure object has been fitted.
Raises
------
NotFittedError
Raised if method called and object has not been fit.
"""
if not torch.jit.is_scripting():
self._check_fitted()
@torch.jit.unused
def _check_fitted(self):
"""Raises error if parent object instance has not been fit."""
if not self.fitted:
raise NotFittedError(self.__class__.__name__)
class PValNormalizer(BaseTransformTorch, FitMixinTorch):
def __init__(self):
"""Maps scores to there p-values.
Needs to be fit (see :py:obj:`~alibi_detect.od.pytorch.ensemble.BaseFittedTransformTorch`).
Returns the proportion of scores in the reference dataset that are greater than the score of
interest. Output is between ``1`` and ``0``. Small values are likely to be outliers.
"""
super().__init__()
self.val_scores = None
def fit(self, val_scores: torch.Tensor) -> Self:
"""Fit transform on scores.
Parameters
----------
val_scores
score outputs of ensemble of detectors applied to reference data.
"""
self.val_scores = val_scores
return self._set_fitted()
def transform(self, scores: torch.Tensor) -> torch.Tensor:
"""Transform scores to 1 - p-values.
Parameters
----------
scores
`Torch.Tensor` of scores from ensemble of detectors.
Returns
-------
`Torch.Tensor` of 1 - p-values.
"""
self.check_fitted()
less_than_val_scores = scores[:, None, :] < self.val_scores[None, :, :]
p_vals = (1 + less_than_val_scores.sum(1))/(len(self.val_scores) + 1)
return 1 - p_vals
class ShiftAndScaleNormalizer(BaseTransformTorch, FitMixinTorch):
def __init__(self):
"""Maps scores to their normalized values.
Needs to be fit (see :py:obj:`~alibi_detect.od.pytorch.ensemble.BaseFittedTransformTorch`).
Subtracts the dataset mean and scales by the standard deviation.
"""
super().__init__()
self.val_means = None
self.val_scales = None
def fit(self, val_scores: torch.Tensor) -> Self:
"""Computes the mean and standard deviation of the scores and stores them.
Parameters
----------
val_scores
`Torch.Tensor` of scores from ensemble of detectors.
"""
self.val_means = val_scores.mean(0)[None, :]
self.val_scales = val_scores.std(0)[None, :]
return self._set_fitted()
def transform(self, scores: torch.Tensor) -> torch.Tensor:
"""Transform scores to normalized values. Subtracts the mean and scales by the standard deviation.
Parameters
----------
scores
`Torch.Tensor` of scores from ensemble of detectors.
Returns
-------
`Torch.Tensor` of normalized scores.
"""
self.check_fitted()
return (scores - self.val_means)/self.val_scales
class TopKAggregator(BaseTransformTorch):
def __init__(self, k: Optional[int] = None):
"""Takes the mean of the top `k` scores.
Parameters
----------
k
number of scores to take the mean of. If `k` is left ``None`` then will be set to
half the number of scores passed in the forward call.
"""
super().__init__()
self.k = k
def transform(self, scores: torch.Tensor) -> torch.Tensor:
"""Takes the mean of the top `k` scores.
Parameters
----------
scores
`Torch.Tensor` of scores from ensemble of detectors.
Returns
-------
`Torch.Tensor` of mean of top `k` scores.
"""
if self.k is None:
self.k = int(np.ceil(scores.shape[1]/2))
sorted_scores, _ = torch.sort(scores, 1)
return sorted_scores[:, -self.k:].mean(-1)
class AverageAggregator(BaseTransformTorch):
def __init__(self, weights: Optional[torch.Tensor] = None):
"""Averages the scores of the detectors in an ensemble.
Parameters
----------
weights
Optional parameter to weight the scores. If `weights` is left ``None`` then will be set to
a vector of ones.
Raises
------
ValueError
If `weights` does not sum to ``1``.
"""
super().__init__()
if weights is not None and not np.isclose(weights.sum(), 1):
raise ValueError("Weights must sum to 1.")
self.weights = weights
def transform(self, scores: torch.Tensor) -> torch.Tensor:
"""Averages the scores of the detectors in an ensemble. If weights were passed in the `__init__`
then these are used to weight the scores.
Parameters
----------
scores
`Torch.Tensor` of scores from ensemble of detectors.
Returns
-------
`Torch.Tensor` of mean of scores.
"""
if self.weights is None:
m = scores.shape[-1]
self.weights = torch.ones(m, device=scores.device)/m
return scores @ self.weights
class MaxAggregator(BaseTransformTorch):
def __init__(self):
"""Takes the maximum of the scores of the detectors in an ensemble."""
super().__init__()
def transform(self, scores: torch.Tensor) -> torch.Tensor:
"""Takes the maximum score of a set of detectors in an ensemble.
Parameters
----------
scores
`Torch.Tensor` of scores from ensemble of detectors.
Returns
-------
`Torch.Tensor` of maximum scores.
"""
vals, _ = torch.max(scores, dim=-1)
return vals
class MinAggregator(BaseTransformTorch):
def __init__(self):
"""Takes the minimum score of a set of detectors in an ensemble."""
super().__init__()
def transform(self, scores: torch.Tensor) -> torch.Tensor:
"""Takes the minimum score of a set of detectors in an ensemble.
Parameters
----------
scores
`Torch.Tensor` of scores from ensemble of detectors.
Returns
-------
`Torch.Tensor` of minimum scores.
"""
vals, _ = torch.min(scores, dim=-1)
return vals
class Ensembler(BaseTransformTorch, FitMixinTorch):
def __init__(self,
normalizer: Optional[BaseTransformTorch] = None,
aggregator: BaseTransformTorch = None):
"""An Ensembler applies normalization and aggregation operations to the scores of an ensemble of detectors.
Parameters
----------
normalizer
`BaseFittedTransformTorch` object to normalize the scores. If ``None`` then no normalization
is applied.
aggregator
`BaseTransformTorch` object to aggregate the scores. If ``None`` defaults to `AverageAggregator`.
"""
super().__init__()
self.normalizer = normalizer
if self.normalizer is None:
self.fitted = True
if aggregator is None:
aggregator = AverageAggregator()
self.aggregator = aggregator
def transform(self, x: torch.Tensor) -> torch.Tensor:
"""Apply the normalizer and aggregator to the scores.
Parameters
----------
x
`Torch.Tensor` of scores from ensemble of detectors.
Returns
-------
`Torch.Tensor` of aggregated and normalized scores.
"""
if self.normalizer is not None:
x = self.normalizer(x)
x = self.aggregator(x)
return x
def fit(self, x: torch.Tensor) -> Self:
"""Fit the normalizer to the scores.
Parameters
----------
x
`Torch.Tensor` of scores from ensemble of detectors.
"""
if self.normalizer is not None:
self.normalizer.fit(x) # type: ignore
return self._set_fitted()
| 9,337 | 28.644444 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/pytorch/mahalanobis.py
|
from typing import Optional, Union
from typing_extensions import Literal
import torch
from alibi_detect.od.pytorch.base import TorchOutlierDetector
class MahalanobisTorch(TorchOutlierDetector):
ensemble = False
def __init__(
self,
min_eigenvalue: float = 1e-6,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""PyTorch backend for Mahalanobis detector.
Parameters
----------
min_eigenvalue
Eigenvectors with eigenvalues below this value will be discarded.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed. Can be specified by
passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of ``torch.device``.
"""
super().__init__(device=device)
self.min_eigenvalue = min_eigenvalue
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Detect if `x` is an outlier.
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor` of ``bool`` values with leading batch dimension.
Raises
------
ThresholdNotInferredException
If called before detector has had `infer_threshold` method called.
"""
scores = self.score(x)
if not torch.jit.is_scripting():
self.check_threshold_inferred()
preds = scores > self.threshold
return preds
def score(self, x: torch.Tensor) -> torch.Tensor:
"""Computes the score of `x`
Parameters
----------
x
The tensor of instances. First dimension corresponds to batch.
Returns
-------
Tensor of scores for each element in `x`.
Raises
------
NotFitException
If called before detector has been fit.
"""
self.check_fitted()
x_pcs = self._compute_linear_proj(x)
return (x_pcs**2).sum(-1)
def fit(self, x_ref: torch.Tensor):
"""Fits the detector
Parameters
----------
x_ref
The Dataset tensor.
"""
self.x_ref = x_ref
self._compute_linear_pcs(self.x_ref)
self._set_fitted()
def _compute_linear_pcs(self, x: torch.Tensor):
"""Computes the principal components of the data.
Parameters
----------
x
The reference dataset.
"""
self.means = x.mean(0)
x = x - self.means
cov_mat = (x.t() @ x)/(len(x)-1)
D, V = torch.linalg.eigh(cov_mat)
non_zero_inds = D > self.min_eigenvalue
self.pcs = V[:, non_zero_inds] / D[None, non_zero_inds].sqrt()
def _compute_linear_proj(self, x: torch.Tensor) -> torch.Tensor:
"""Projects the data point being tested onto the principal components.
Parameters
----------
x
The data point being tested.
"""
x_cen = x - self.means
x_proj = x_cen @ self.pcs
return x_proj
| 3,162 | 27.495495 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/od/pytorch/lof.py
|
from typing import Optional, Union, List, Tuple
from typing_extensions import Literal
import numpy as np
import torch
from alibi_detect.od.pytorch.ensemble import Ensembler
from alibi_detect.od.pytorch.base import TorchOutlierDetector
class LOFTorch(TorchOutlierDetector):
def __init__(
self,
k: Union[np.ndarray, List, Tuple, int],
kernel: Optional[torch.nn.Module] = None,
ensembler: Optional[Ensembler] = None,
device: Optional[Union[Literal['cuda', 'gpu', 'cpu'], 'torch.device']] = None,
):
"""PyTorch backend for LOF detector.
Parameters
----------
k
Number of nearest neighbors used to compute the local outlier factor. `k` can be a single
value or an array of integers. If `k` is a single value the score method uses the
distance/kernel similarity to the `k`-th nearest neighbor. If `k` is a list then it uses
the distance/kernel similarity to each of the specified `k` neighbors.
kernel
If a kernel is specified then instead of using `torch.cdist` the kernel defines the `k` nearest
neighbor distance.
ensembler
If `k` is an array of integers then the ensembler must not be ``None``. Should be an instance
of :py:obj:`alibi_detect.od.pytorch.ensemble.ensembler`. Responsible for combining
multiple scores into a single score.
device
Device type used. The default tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either ``'cuda'``, ``'gpu'``, ``'cpu'`` or an instance of
``torch.device``.
"""
TorchOutlierDetector.__init__(self, device=device)
self.kernel = kernel
self.ensemble = isinstance(k, (np.ndarray, list, tuple))
self.ks = torch.tensor(k) if self.ensemble else torch.tensor([k], device=self.device)
self.ensembler = ensembler
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Detect if `x` is an outlier.
Parameters
----------
x
`torch.Tensor` with leading batch dimension.
Returns
-------
`torch.Tensor` of ``bool`` values with leading batch dimension.
Raises
------
ThresholdNotInferredError
If called before detector has had `infer_threshold` method called.
"""
raw_scores = self.score(x)
scores = self._ensembler(raw_scores)
if not torch.jit.is_scripting():
self.check_threshold_inferred()
preds = scores > self.threshold
return preds
def _make_mask(self, reachabilities: torch.Tensor):
"""Generate a mask for computing the average reachability.
If k is an array then we need to compute the average reachability for each k separately. To do
this we use a mask to weight the reachability of each k-close neighbor by 1/k and the rest to 0.
"""
mask = torch.zeros_like(reachabilities[0], device=self.device)
for i, k in enumerate(self.ks):
mask[:k, i] = torch.ones(k, device=self.device)/k
return mask
def _compute_K(self, x, y):
"""Compute the distance matrix matrix between `x` and `y`."""
return torch.exp(-self.kernel(x, y)) if self.kernel is not None else torch.cdist(x, y)
def score(self, x: torch.Tensor) -> torch.Tensor:
"""Computes the score of `x`
Parameters
----------
x
The tensor of instances. First dimension corresponds to batch.
Returns
-------
Tensor of scores for each element in `x`.
Raises
------
NotFittedError
If called before detector has been fit.
"""
self.check_fitted()
# compute the distance matrix between x and x_ref
K = self._compute_K(x, self.x_ref)
# compute k nearest neighbors for maximum k in self.ks
max_k = torch.max(self.ks)
bot_k_items = torch.topk(K, int(max_k), dim=1, largest=False)
bot_k_inds, bot_k_dists = bot_k_items.indices, bot_k_items.values
# To compute the reachabilities we get the k-distances of each object in the instances
# k nearest neighbors. Then we take the maximum of their k-distances and the distance
# to the instance.
lower_bounds = self.knn_dists_ref[bot_k_inds]
reachabilities = torch.max(bot_k_dists[:, :, None], lower_bounds)
# Compute the average reachability for each instance. We use a mask to manage each k in
# self.ks separately.
mask = self._make_mask(reachabilities)
avg_reachabilities = (reachabilities*mask[None, :, :]).sum(1)
# Compute the LOF score for each instance. Note we don't take 1/avg_reachabilities as
# avg_reachabilities is the denominator in the LOF formula.
factors = (self.ref_inv_avg_reachabilities[bot_k_inds] * mask[None, :, :]).sum(1)
lofs = (avg_reachabilities * factors)
return lofs if self.ensemble else lofs[:, 0]
def fit(self, x_ref: torch.Tensor):
"""Fits the detector
Parameters
----------
x_ref
The Dataset tensor.
"""
# compute the distance matrix
K = self._compute_K(x_ref, x_ref)
# set diagonal to max distance to prevent torch.topk from returning the instance itself
K += torch.eye(len(K), device=self.device) * torch.max(K)
# compute k nearest neighbors for maximum k in self.ks
max_k = torch.max(self.ks)
bot_k_items = torch.topk(K, int(max_k), dim=1, largest=False)
bot_k_inds, bot_k_dists = bot_k_items.indices, bot_k_items.values
# store the k-distances for each instance for each k.
self.knn_dists_ref = bot_k_dists[:, self.ks-1]
# To compute the reachabilities we get the k-distances of each object in the instances
# k nearest neighbors. Then we take the maximum of their k-distances and the distance
# to the instance.
lower_bounds = self.knn_dists_ref[bot_k_inds]
reachabilities = torch.max(bot_k_dists[:, :, None], lower_bounds)
# Compute the average reachability for each instance. We use a mask to manage each k in
# self.ks separately.
mask = self._make_mask(reachabilities)
avg_reachabilities = (reachabilities*mask[None, :, :]).sum(1)
# Compute the inverse average reachability for each instance.
self.ref_inv_avg_reachabilities = 1/avg_reachabilities
self.x_ref = x_ref
self._set_fitted()
| 6,709 | 39.666667 | 107 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/models/__init__.py
| 0 | 0 | 0 |
py
|
|
alibi-detect
|
alibi-detect-master/alibi_detect/models/pytorch/embedding.py
|
from functools import partial
import torch
import torch.nn as nn
from transformers import AutoModel, AutoConfig
from typing import Dict, List
def hidden_state_embedding(hidden_states: torch.Tensor, layers: List[int],
use_cls: bool, reduce_mean: bool = True) -> torch.Tensor:
"""
Extract embeddings from hidden attention state layers.
Parameters
----------
hidden_states
Attention hidden states in the transformer model.
layers
List of layers to use for the embedding.
use_cls
Whether to use the next sentence token (CLS) to extract the embeddings.
reduce_mean
Whether to take the mean of the output tensor.
Returns
-------
Tensor with embeddings.
"""
hs = [hidden_states[layer][:, 0:1, :] if use_cls else hidden_states[layer] for layer in layers]
hs = torch.cat(hs, dim=1) # type: ignore
y = hs.mean(dim=1) if reduce_mean else hs # type: ignore
return y
class TransformerEmbedding(nn.Module):
def __init__(self, model_name_or_path: str, embedding_type: str, layers: List[int] = None) -> None:
super().__init__()
self.config = AutoConfig.from_pretrained(model_name_or_path, output_hidden_states=True)
self.model = AutoModel.from_pretrained(model_name_or_path, config=self.config)
self.emb_type = embedding_type
self.hs_emb = partial(hidden_state_embedding, layers=layers, use_cls=embedding_type.endswith('cls'))
def forward(self, tokens: Dict[str, torch.Tensor]) -> torch.Tensor:
output = self.model(**tokens)
if self.emb_type == 'pooler_output':
return output.pooler_output
elif self.emb_type == 'last_hidden_state':
return output.last_hidden_state.mean(dim=1)
attention_hidden_states = output.hidden_states[1:]
if self.emb_type.startswith('hidden_state'):
return self.hs_emb(attention_hidden_states)
else:
raise ValueError('embedding_type needs to be one of pooler_output, '
'last_hidden_state, hidden_state, or hidden_state_cls.')
| 2,138 | 38.611111 | 108 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/models/pytorch/gmm.py
|
from torch import nn
import torch
class GMMModel(nn.Module):
def __init__(self, n_components: int, dim: int) -> None:
"""Gaussian Mixture Model (GMM).
Parameters
----------
n_components
The number of mixture components.
dim
The dimensionality of the data.
"""
super().__init__()
self.weight_logits = nn.Parameter(torch.zeros(n_components))
self.means = nn.Parameter(torch.randn(n_components, dim))
self.inv_cov_factor = nn.Parameter(torch.randn(n_components, dim, dim)/10)
@property
def _inv_cov(self) -> torch.Tensor:
return torch.bmm(self.inv_cov_factor, self.inv_cov_factor.transpose(1, 2))
@property
def _weights(self) -> torch.Tensor:
return nn.functional.softmax(self.weight_logits, dim=0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Compute the log-likelihood of the data.
Parameters
----------
x
Data to score.
"""
det = torch.linalg.det(self._inv_cov) # Note det(A^-1)=1/det(A)
to_means = x[:, None, :] - self.means[None, :, :]
likelihood = ((-0.5 * (
torch.einsum('bke,bke->bk', (torch.einsum('bkd,kde->bke', to_means, self._inv_cov), to_means))
)).exp()*det[None, :]*self._weights[None, :]).sum(-1)
return -likelihood.log()
| 1,402 | 31.627907 | 106 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/models/pytorch/__init__.py
|
from alibi_detect.utils.missing_optional_dependency import import_optional
TransformerEmbedding = import_optional(
'alibi_detect.models.pytorch.embedding',
names=['TransformerEmbedding'])
trainer = import_optional(
'alibi_detect.models.pytorch.trainer',
names=['trainer'])
__all__ = [
"TransformerEmbedding",
"trainer"
]
| 349 | 20.875 | 74 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/models/pytorch/trainer.py
|
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from typing import Callable, Union
def trainer(
model: Union[nn.Module, nn.Sequential],
loss_fn: Callable,
dataloader: DataLoader,
device: torch.device,
optimizer: Callable = torch.optim.Adam,
learning_rate: float = 1e-3,
preprocess_fn: Callable = None,
epochs: int = 20,
reg_loss_fn: Callable = (lambda model: 0),
verbose: int = 1,
) -> None:
"""
Train PyTorch model.
Parameters
----------
model
Model to train.
loss_fn
Loss function used for training.
dataloader
PyTorch dataloader.
device
Device used for training.
optimizer
Optimizer used for training.
learning_rate
Optimizer's learning rate.
preprocess_fn
Preprocessing function applied to each training batch.
epochs
Number of training epochs.
reg_loss_fn
The regularisation term reg_loss_fn(model) is added to the loss function being optimized.
verbose
Whether to print training progress.
"""
optimizer = optimizer(model.parameters(), lr=learning_rate)
model.train()
for epoch in range(epochs):
dl = tqdm(enumerate(dataloader), total=len(dataloader)) if verbose == 1 else enumerate(dataloader)
loss_ma = 0
for step, (x, y) in dl:
if isinstance(preprocess_fn, Callable): # type: ignore
x = preprocess_fn(x)
x, y = x.to(device), y.to(device)
y_hat = model(x)
optimizer.zero_grad() # type: ignore
loss = loss_fn(y_hat, y) + reg_loss_fn(model)
loss.backward()
optimizer.step() # type: ignore
if verbose == 1:
loss_ma = loss_ma + (loss.item() - loss_ma) / (step + 1)
dl.set_description(f'Epoch {epoch + 1}/{epochs}')
dl.set_postfix(dict(loss_ma=loss_ma))
| 2,027 | 30.6875 | 106 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/models/pytorch/tests/test_trainer_pt.py
| 0 | 0 | 0 |
py
|
|
alibi-detect
|
alibi-detect-master/alibi_detect/models/tensorflow/embedding.py
|
from functools import partial
import tensorflow as tf
from transformers import TFAutoModel, AutoConfig
from typing import Dict, List
def hidden_state_embedding(hidden_states: tf.Tensor, layers: List[int],
use_cls: bool, reduce_mean: bool = True) -> tf.Tensor:
"""
Extract embeddings from hidden attention state layers.
Parameters
----------
hidden_states
Attention hidden states in the transformer model.
layers
List of layers to use for the embedding.
use_cls
Whether to use the next sentence token (CLS) to extract the embeddings.
reduce_mean
Whether to take the mean of the output tensor.
Returns
-------
Tensor with embeddings.
"""
hs = [hidden_states[layer][:, 0:1, :] if use_cls else hidden_states[layer] for layer in layers]
hs = tf.concat(hs, axis=1)
y = tf.reduce_mean(hs, axis=1) if reduce_mean else hs
return y
class TransformerEmbedding(tf.keras.Model):
def __init__(
self,
model_name_or_path: str,
embedding_type: str,
layers: List[int] = None
) -> None:
"""
Extract text embeddings from transformer models.
Parameters
----------
model_name_or_path
Name of or path to the model.
embedding_type
Type of embedding to extract. Needs to be one of pooler_output,
last_hidden_state, hidden_state or hidden_state_cls.
From the HuggingFace documentation:
- pooler_output
Last layer hidden-state of the first token of the sequence
(classification token) further processed by a Linear layer and a Tanh
activation function. The Linear layer weights are trained from the next
sentence prediction (classification) objective during pre-training.
This output is usually not a good summary of the semantic content of the
input, you’re often better with averaging or pooling the sequence of
hidden-states for the whole input sequence.
- last_hidden_state
Sequence of hidden-states at the output of the last layer of the model.
- hidden_state
Hidden states of the model at the output of each layer.
- hidden_state_cls
See hidden_state but use the CLS token output.
layers
If "hidden_state" or "hidden_state_cls" is used as embedding
type, layers has to be a list with int's referring to the hidden layers used
to extract the embedding.
"""
super(TransformerEmbedding, self).__init__()
self.config = AutoConfig.from_pretrained(model_name_or_path, output_hidden_states=True)
self.model = TFAutoModel.from_pretrained(model_name_or_path, config=self.config)
self.emb_type = embedding_type
self.hs_emb = partial(hidden_state_embedding, layers=layers, use_cls=embedding_type.endswith('cls'))
def call(self, tokens: Dict[str, tf.Tensor]) -> tf.Tensor:
output = self.model(tokens)
if self.emb_type == 'pooler_output':
return output.pooler_output
elif self.emb_type == 'last_hidden_state':
return tf.reduce_mean(output.last_hidden_state, axis=1)
attention_hidden_states = output.hidden_states[1:]
if self.emb_type.startswith('hidden_state'):
return self.hs_emb(attention_hidden_states)
else:
raise ValueError('embedding_type needs to be one of pooler_output, '
'last_hidden_state, hidden_state, or hidden_state_cls.')
| 3,718 | 40.322222 | 108 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/models/tensorflow/pixelcnn.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import warnings
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector
from tensorflow_probability.python.distributions import categorical
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import independent
from tensorflow_probability.python.distributions import logistic
from tensorflow_probability.python.distributions import mixture_same_family
from tensorflow_probability.python.distributions import quantized_distribution
from tensorflow_probability.python.distributions import transformed_distribution
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
__all__ = [
'Shift',
]
class WeightNorm(tf.keras.layers.Wrapper):
def __init__(self, layer, data_init: bool = True, **kwargs):
"""Layer wrapper to decouple magnitude and direction of the layer's weights.
This wrapper reparameterizes a layer by decoupling the weight's
magnitude and direction. This speeds up convergence by improving the
conditioning of the optimization problem. It has an optional data-dependent
initialization scheme, in which initial values of weights are set as functions
of the first minibatch of data. Both the weight normalization and data-
dependent initialization are described in [Salimans and Kingma (2016)][1].
Parameters
----------
layer
A `tf.keras.layers.Layer` instance. Supported layer types are
`Dense`, `Conv2D`, and `Conv2DTranspose`. Layers with multiple inputs
are not supported.
data_init
If `True` use data dependent variable initialization.
**kwargs
Additional keyword args passed to `tf.keras.layers.Wrapper`.
Raises
------
ValueError
If `layer` is not a `tf.keras.layers.Layer` instance.
"""
if not isinstance(layer, tf.keras.layers.Layer):
raise ValueError(
'Please initialize `WeightNorm` layer with a `tf.keras.layers.Layer` '
'instance. You passed: {input}'.format(input=layer)
)
layer_type = type(layer).__name__
if layer_type not in ['Dense', 'Conv2D', 'Conv2DTranspose']:
warnings.warn('`WeightNorm` is tested only for `Dense`, `Conv2D`, and '
'`Conv2DTranspose` layers. You passed a layer of type `{}`'
.format(layer_type))
super(WeightNorm, self).__init__(layer, **kwargs)
self.data_init = data_init
self._track_trackable(layer, name='layer')
self.filter_axis = -2 if layer_type == 'Conv2DTranspose' else -1
def _compute_weights(self):
"""Generate weights with normalization."""
# Determine the axis along which to expand `g` so that `g` broadcasts to
# the shape of `v`.
new_axis = -self.filter_axis - 3
self.layer.kernel = tf.nn.l2_normalize(self.v, axis=self.kernel_norm_axes) * tf.expand_dims(self.g, new_axis)
def _init_norm(self):
"""Set the norm of the weight vector."""
kernel_norm = tf.sqrt(tf.reduce_sum(tf.square(self.v), axis=self.kernel_norm_axes))
self.g.assign(kernel_norm)
def _data_dep_init(self, inputs):
"""Data dependent initialization."""
# Normalize kernel first so that calling the layer calculates
# `tf.dot(v, x)/tf.norm(v)` as in (5) in ([Salimans and Kingma, 2016][1]).
self._compute_weights()
activation = self.layer.activation
self.layer.activation = None
use_bias = self.layer.bias is not None
if use_bias:
bias = self.layer.bias
self.layer.bias = tf.zeros_like(bias)
# Since the bias is initialized as zero, setting the activation to zero and
# calling the initialized layer (with normalized kernel) yields the correct
# computation ((5) in Salimans and Kingma (2016))
x_init = self.layer(inputs)
norm_axes_out = list(range(x_init.shape.rank - 1))
m_init, v_init = tf.nn.moments(x_init, norm_axes_out)
scale_init = 1. / tf.sqrt(v_init + 1e-10)
self.g.assign(self.g * scale_init)
if use_bias:
self.layer.bias = bias
self.layer.bias.assign(-m_init * scale_init)
self.layer.activation = activation
def build(self, input_shape=None):
"""Build `Layer`.
Parameters
----------
input_shape
The shape of the input to `self.layer`.
Raises
------
ValueError
If `Layer` does not contain a `kernel` of weights.
"""
input_shape = tf.TensorShape(input_shape).as_list()
input_shape[0] = None
self.input_spec = tf.keras.layers.InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
if not hasattr(self.layer, 'kernel'):
raise ValueError('`WeightNorm` must wrap a layer that contains a `kernel` for weights')
self.kernel_norm_axes = list(range(self.layer.kernel.shape.ndims))
self.kernel_norm_axes.pop(self.filter_axis)
self.v = self.layer.kernel
# to avoid a duplicate `kernel` variable after `build` is called
self.layer.kernel = None
self.g = self.add_weight(
name='g',
shape=(int(self.v.shape[self.filter_axis]),),
initializer='ones',
dtype=self.v.dtype,
trainable=True
)
self.initialized = self.add_weight(
name='initialized',
dtype=tf.bool,
trainable=False
)
self.initialized.assign(False)
super(WeightNorm, self).build()
@tf.function
def call(self, inputs):
"""Call `Layer`."""
if not self.initialized:
if self.data_init:
self._data_dep_init(inputs)
else: # initialize `g` as the norm of the initialized kernel
self._init_norm()
self.initialized.assign(True)
self._compute_weights()
output = self.layer(inputs)
return output
def compute_output_shape(self, input_shape):
return tf.TensorShape(self.layer.compute_output_shape(input_shape).as_list())
class Shift(bijector.Bijector):
def __init__(self,
shift,
validate_args=False,
name='shift'):
"""Instantiates the `Shift` bijector which computes `Y = g(X; shift) = X + shift`
where `shift` is a numeric `Tensor`.
Parameters
----------
shift
Floating-point `Tensor`.
validate_args
Python `bool` indicating whether arguments should be checked for correctness.
name
Python `str` name given to ops managed by this object.
"""
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([shift], dtype_hint=tf.float32)
self._shift = tensor_util.convert_nonref_to_tensor(shift, dtype=dtype, name='shift')
super(Shift, self).__init__(
forward_min_event_ndims=0,
is_constant_jacobian=True,
dtype=dtype,
validate_args=validate_args,
name=name
)
@property
def shift(self):
"""The `shift` `Tensor` in `Y = X + shift`."""
return self._shift
@classmethod
def _is_increasing(cls):
return True
def _forward(self, x):
return x + self.shift
def _inverse(self, y):
return y - self.shift
def _forward_log_det_jacobian(self, x):
# is_constant_jacobian = True for this bijector, hence the
# `log_det_jacobian` need only be specified for a single input, as this will
# be tiled to match `event_ndims`.
return tf.zeros([], dtype=dtype_util.base_dtype(x.dtype))
class PixelCNN(distribution.Distribution):
def __init__(self,
image_shape: tuple,
conditional_shape: tuple = None,
num_resnet: int = 5,
num_hierarchies: int = 3,
num_filters: int = 160,
num_logistic_mix: int = 10,
receptive_field_dims: tuple = (3, 3),
dropout_p: float = 0.5,
resnet_activation: str = 'concat_elu',
l2_weight: float = 0.,
use_weight_norm: bool = True,
use_data_init: bool = True,
high: int = 255,
low: int = 0,
dtype=tf.float32,
name: str = 'PixelCNN') -> None:
"""
Construct Pixel CNN++ distribution.
Parameters
----------
image_shape
3D `TensorShape` or tuple for the `[height, width, channels]` dimensions of the image.
conditional_shape
`TensorShape` or tuple for the shape of the conditional input, or `None` if there is no conditional input.
num_resnet
The number of layers (shown in Figure 2 of [2]) within each highest-level block of Figure 2 of [1].
num_hierarchies
The number of highest-level blocks (separated by expansions/contractions of dimensions in Figure 2 of [1].)
num_filters
The number of convolutional filters.
num_logistic_mix
Number of components in the logistic mixture distribution.
receptive_field_dims
Height and width in pixels of the receptive field of the convolutional layers above and to the left
of a given pixel. The width (second element of the tuple) should be odd. Figure 1 (middle) of [2]
shows a receptive field of (3, 5) (the row containing the current pixel is included in the height).
The default of (3, 3) was used to produce the results in [1].
dropout_p
The dropout probability. Should be between 0 and 1.
resnet_activation
The type of activation to use in the resnet blocks. May be 'concat_elu', 'elu', or 'relu'.
l2_weight
The L2 regularization weight.
use_weight_norm
If `True` then use weight normalization (works only in Eager mode).
use_data_init
If `True` then use data-dependent initialization (has no effect if `use_weight_norm` is `False`).
high
The maximum value of the input data (255 for an 8-bit image).
low
The minimum value of the input data.
dtype
Data type of the `Distribution`.
name
The name of the `Distribution`.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
super(PixelCNN, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
validate_args=False,
allow_nan_stats=True,
parameters=parameters,
name=name
)
if not tensorshape_util.is_fully_defined(image_shape):
raise ValueError('`image_shape` must be fully defined.')
if conditional_shape is not None and not tensorshape_util.is_fully_defined(conditional_shape):
raise ValueError('`conditional_shape` must be fully defined`')
if tensorshape_util.rank(image_shape) != 3:
raise ValueError('`image_shape` must have length 3, representing [height, width, channels] dimensions.')
self._high = tf.cast(high, self.dtype)
self._low = tf.cast(low, self.dtype)
self._num_logistic_mix = num_logistic_mix
self.network = _PixelCNNNetwork(
dropout_p=dropout_p,
num_resnet=num_resnet,
num_hierarchies=num_hierarchies,
num_filters=num_filters,
num_logistic_mix=num_logistic_mix,
receptive_field_dims=receptive_field_dims,
resnet_activation=resnet_activation,
l2_weight=l2_weight,
use_weight_norm=use_weight_norm,
use_data_init=use_data_init,
dtype=dtype
)
image_input_shape = tensorshape_util.concatenate([None], image_shape)
if conditional_shape is None:
input_shape = image_input_shape
else:
conditional_input_shape = tensorshape_util.concatenate([None], conditional_shape)
input_shape = [image_input_shape, conditional_input_shape]
self.image_shape = image_shape
self.conditional_shape = conditional_shape
self.network.build(input_shape)
def _make_mixture_dist(self, component_logits, locs, scales, return_per_feature: bool = False):
"""Builds a mixture of quantized logistic distributions.
Parameters
----------
component_logits
4D `Tensor` of logits for the Categorical distribution
over Quantized Logistic mixture components. Dimensions are `[batch_size,
height, width, num_logistic_mix]`.
locs
4D `Tensor` of location parameters for the Quantized Logistic
mixture components. Dimensions are `[batch_size, height, width,
num_logistic_mix, num_channels]`.
scales
4D `Tensor` of location parameters for the Quantized Logistic
mixture components. Dimensions are `[batch_size, height, width,
num_logistic_mix, num_channels]`.
return_per_feature
If True, return per pixel level log prob.
Returns
-------
dist
A quantized logistic mixture `tfp.distribution` over the input data.
"""
mixture_distribution = categorical.Categorical(logits=component_logits)
# Convert distribution parameters for pixel values in
# `[self._low, self._high]` for use with `QuantizedDistribution`
locs = self._low + 0.5 * (self._high - self._low) * (locs + 1.)
scales *= 0.5 * (self._high - self._low)
logistic_dist = quantized_distribution.QuantizedDistribution(
distribution=transformed_distribution.TransformedDistribution(
distribution=logistic.Logistic(loc=locs, scale=scales),
bijector=Shift(shift=tf.cast(-0.5, self.dtype))),
low=self._low, high=self._high)
# mixture with logistics for the loc and scale on each pixel for each component
dist = mixture_same_family.MixtureSameFamily(
mixture_distribution=mixture_distribution,
components_distribution=independent.Independent(logistic_dist, reinterpreted_batch_ndims=1))
if return_per_feature:
return dist
else:
return independent.Independent(dist, reinterpreted_batch_ndims=2)
def _log_prob(self, value, conditional_input=None, training=None, return_per_feature=False):
"""Log probability function with optional conditional input.
Calculates the log probability of a batch of data under the modeled
distribution (or conditional distribution, if conditional input is
provided).
Parameters
----------
value
`Tensor` or Numpy array of image data. May have leading batch
dimension(s), which must broadcast to the leading batch dimensions of
`conditional_input`.
conditional_input
`Tensor` on which to condition the distribution (e.g.
class labels), or `None`. May have leading batch dimension(s), which
must broadcast to the leading batch dimensions of `value`.
training
`bool` or `None`. If `bool`, it controls the dropout layer,
where `True` implies dropout is active. If `None`, it defaults to
`tf.keras.backend.learning_phase()`.
return_per_feature
`bool`. If True, return per pixel level log prob.
Returns
-------
log_prob_values: `Tensor`.
"""
# Determine the batch shape of the input images
image_batch_shape = prefer_static.shape(value)[:-3]
# Broadcast `value` and `conditional_input` to the same batch_shape
if conditional_input is None:
image_batch_and_conditional_shape = image_batch_shape
else:
conditional_input = tf.convert_to_tensor(conditional_input)
conditional_input_shape = prefer_static.shape(conditional_input)
conditional_batch_rank = (prefer_static.rank(conditional_input) -
tensorshape_util.rank(self.conditional_shape))
conditional_batch_shape = conditional_input_shape[:conditional_batch_rank]
image_batch_and_conditional_shape = prefer_static.broadcast_shape(
image_batch_shape, conditional_batch_shape)
conditional_input = tf.broadcast_to(
conditional_input,
prefer_static.concat([image_batch_and_conditional_shape, self.conditional_shape], axis=0))
value = tf.broadcast_to(value, prefer_static.concat(
[image_batch_and_conditional_shape, self.event_shape], axis=0))
# Flatten batch dimension for input to Keras model
conditional_input = tf.reshape(
conditional_input,
prefer_static.concat([(-1,), self.conditional_shape], axis=0))
value = tf.reshape(value, prefer_static.concat([(-1,), self.event_shape], axis=0))
transformed_value = (2. * (value - self._low) / (self._high - self._low)) - 1.
inputs = transformed_value if conditional_input is None else [transformed_value, conditional_input]
params = self.network(inputs, training=training)
num_channels = self.event_shape[-1]
if num_channels == 1:
component_logits, locs, scales = params
else:
# If there is more than one channel, we create a linear autoregressive
# dependency among the location parameters of the channels of a single
# pixel (the scale parameters within a pixel are independent). For a pixel
# with R/G/B channels, the `r`, `g`, and `b` saturation values are
# distributed as:
#
# r ~ Logistic(loc_r, scale_r)
# g ~ Logistic(coef_rg * r + loc_g, scale_g)
# b ~ Logistic(coef_rb * r + coef_gb * g + loc_b, scale_b)
# on the coefficients instead of split/multiply/concat
component_logits, locs, scales, coeffs = params
num_coeffs = num_channels * (num_channels - 1) // 2
loc_tensors = tf.split(locs, num_channels, axis=-1)
coef_tensors = tf.split(coeffs, num_coeffs, axis=-1)
channel_tensors = tf.split(value, num_channels, axis=-1)
coef_count = 0
for i in range(num_channels):
channel_tensors[i] = channel_tensors[i][..., tf.newaxis, :]
for j in range(i):
loc_tensors[i] += channel_tensors[j] * coef_tensors[coef_count]
coef_count += 1
locs = tf.concat(loc_tensors, axis=-1)
dist = self._make_mixture_dist(component_logits, locs, scales, return_per_feature=return_per_feature)
log_px = dist.log_prob(value)
if return_per_feature:
return log_px
else:
return tf.reshape(log_px, image_batch_and_conditional_shape)
def _sample_n(self, n, seed=None, conditional_input=None, training=False):
"""Samples from the distribution, with optional conditional input.
Parameters
----------
n
`int`, number of samples desired.
seed
`int`, seed for RNG. Setting a random seed enforces reproducibility
of the samples between sessions (not within a single session).
conditional_input
`Tensor` on which to condition the distribution (e.g.
class labels), or `None`.
training
`bool` or `None`. If `bool`, it controls the dropout layer,
where `True` implies dropout is active. If `None`, it defers to Keras'
handling of train/eval status.
Returns
-------
samples
a `Tensor` of shape `[n, height, width, num_channels]`.
"""
if conditional_input is not None:
conditional_input = tf.convert_to_tensor(conditional_input, dtype=self.dtype)
conditional_event_rank = tensorshape_util.rank(self.conditional_shape)
conditional_input_shape = prefer_static.shape(conditional_input)
conditional_sample_rank = prefer_static.rank(conditional_input) - conditional_event_rank
# If `conditional_input` has no sample dimensions, prepend a sample
# dimension
if conditional_sample_rank == 0:
conditional_input = conditional_input[tf.newaxis, ...]
conditional_sample_rank = 1
# Assert that the conditional event shape in the `PixelCnnNetwork` is the
# same as that implied by `conditional_input`.
conditional_event_shape = conditional_input_shape[conditional_sample_rank:]
with tf.control_dependencies([tf.assert_equal(self.conditional_shape, conditional_event_shape)]):
conditional_sample_shape = conditional_input_shape[:conditional_sample_rank]
repeat = n // prefer_static.reduce_prod(conditional_sample_shape)
h = tf.reshape(conditional_input, prefer_static.concat([(-1,), self.conditional_shape], axis=0))
h = tf.tile(h, prefer_static.pad([repeat], paddings=[[0, conditional_event_rank]], constant_values=1))
samples_0 = tf.random.uniform(
prefer_static.concat([(n,), self.event_shape], axis=0),
minval=-1., maxval=1., dtype=self.dtype, seed=seed)
inputs = samples_0 if conditional_input is None else [samples_0, h]
params_0 = self.network(inputs, training=training)
samples_0 = self._sample_channels(*params_0, seed=seed)
image_height, image_width, _ = tensorshape_util.as_list(self.event_shape)
def loop_body(index, samples):
"""Loop for iterative pixel sampling.
Parameters
----------
index
0D `Tensor` of type `int32`. Index of the current pixel.
samples
4D `Tensor`. Images with pixels sampled in raster order, up to
pixel `[index]`, with dimensions `[batch_size, height, width,
num_channels]`.
Returns
-------
samples
4D `Tensor`. Images with pixels sampled in raster order, up to \
and including pixel `[index]`, with dimensions `[batch_size, height, \
width, num_channels]`.
"""
inputs = samples if conditional_input is None else [samples, h]
params = self.network(inputs, training=training)
samples_new = self._sample_channels(*params, seed=seed)
# Update the current pixel
samples = tf.transpose(samples, [1, 2, 3, 0])
samples_new = tf.transpose(samples_new, [1, 2, 3, 0])
row, col = index // image_width, index % image_width
updates = samples_new[row, col, ...][tf.newaxis, ...]
samples = tf.tensor_scatter_nd_update(samples, [[row, col]], updates)
samples = tf.transpose(samples, [3, 0, 1, 2])
return index + 1, samples
index0 = tf.zeros([], dtype=tf.int32)
# Construct the while loop for sampling
total_pixels = image_height * image_width
loop_cond = lambda ind, _: tf.less(ind, total_pixels) # noqa: E731
init_vars = (index0, samples_0)
_, samples = tf.while_loop(loop_cond, loop_body, init_vars, parallel_iterations=1)
transformed_samples = (self._low + 0.5 * (self._high - self._low) * (samples + 1.))
return tf.round(transformed_samples)
def _sample_channels(self, component_logits, locs, scales, coeffs=None, seed=None):
"""Sample a single pixel-iteration and apply channel conditioning.
Parameters
----------
component_logits
4D `Tensor` of logits for the Categorical distribution
over Quantized Logistic mixture components. Dimensions are `[batch_size,
height, width, num_logistic_mix]`.
locs
4D `Tensor` of location parameters for the Quantized Logistic
mixture components. Dimensions are `[batch_size, height, width,
num_logistic_mix, num_channels]`.
scales
4D `Tensor` of location parameters for the Quantized Logistic
mixture components. Dimensions are `[batch_size, height, width,
num_logistic_mix, num_channels]`.
coeffs
4D `Tensor` of coefficients for the linear dependence among color
channels, or `None` if there is only one channel. Dimensions are
`[batch_size, height, width, num_logistic_mix, num_coeffs]`, where
`num_coeffs = num_channels * (num_channels - 1) // 2`.
seed
`int`, random seed.
Returns
-------
samples
4D `Tensor` of sampled image data with autoregression among \
channels. Dimensions are `[batch_size, height, width, num_channels]`.
"""
num_channels = self.event_shape[-1]
# sample mixture components once for the entire pixel
component_dist = categorical.Categorical(logits=component_logits)
mask = tf.one_hot(indices=component_dist.sample(seed=seed), depth=self._num_logistic_mix)
mask = tf.cast(mask[..., tf.newaxis], self.dtype)
# apply mixture component mask and separate out RGB parameters
masked_locs = tf.reduce_sum(locs * mask, axis=-2)
loc_tensors = tf.split(masked_locs, num_channels, axis=-1)
masked_scales = tf.reduce_sum(scales * mask, axis=-2)
scale_tensors = tf.split(masked_scales, num_channels, axis=-1)
if coeffs is not None:
num_coeffs = num_channels * (num_channels - 1) // 2
masked_coeffs = tf.reduce_sum(coeffs * mask, axis=-2)
coef_tensors = tf.split(masked_coeffs, num_coeffs, axis=-1)
channel_samples = []
coef_count = 0
for i in range(num_channels):
loc = loc_tensors[i]
for c in channel_samples:
loc += c * coef_tensors[coef_count]
coef_count += 1
logistic_samp = logistic.Logistic(loc=loc, scale=scale_tensors[i]).sample(seed=seed)
logistic_samp = tf.clip_by_value(logistic_samp, -1., 1.)
channel_samples.append(logistic_samp)
return tf.concat(channel_samples, axis=-1)
def _batch_shape(self):
return tf.TensorShape([])
def _event_shape(self):
return tf.TensorShape(self.image_shape)
class _PixelCNNNetwork(tf.keras.layers.Layer):
"""Keras `Layer` to parameterize a Pixel CNN++ distribution.
This is a Keras implementation of the Pixel CNN++ network, as described in
Salimans et al. (2017)[1] and van den Oord et al. (2016)[2].
(https://github.com/openai/pixel-cnn).
#### References
[1]: Tim Salimans, Andrej Karpathy, Xi Chen, and Diederik P. Kingma.
PixelCNN++: Improving the PixelCNN with Discretized Logistic Mixture
Likelihood and Other Modifications. In _International Conference on
Learning Representations_, 2017.
https://pdfs.semanticscholar.org/9e90/6792f67cbdda7b7777b69284a81044857656.pdf
Additional details at https://github.com/openai/pixel-cnn
[2]: Aaron van den Oord, Nal Kalchbrenner, Oriol Vinyals, Lasse Espeholt,
Alex Graves, and Koray Kavukcuoglu. Conditional Image Generation with
PixelCNN Decoders. In _30th Conference on Neural Information Processing
Systems_, 2016.
https://papers.nips.cc/paper/6527-conditional-image-generation-with-pixelcnn-decoders.pdf.
"""
def __init__(self,
dropout_p: float = 0.5,
num_resnet: int = 5,
num_hierarchies: int = 3,
num_filters: int = 160,
num_logistic_mix: int = 10,
receptive_field_dims: tuple = (3, 3),
resnet_activation: str = 'concat_elu',
l2_weight: float = 0.,
use_weight_norm: bool = True,
use_data_init: bool = True,
dtype=tf.float32) -> None:
"""Initialize the neural network for the Pixel CNN++ distribution.
Parameters
----------
dropout_p
`float`, the dropout probability. Should be between 0 and 1.
num_resnet
`int`, the number of layers (shown in Figure 2 of [2]) within
each highest-level block of Figure 2 of [1].
num_hierarchies
`int`, the number of hightest-level blocks (separated by
expansions/contractions of dimensions in Figure 2 of [1].)
num_filters
`int`, the number of convolutional filters.
num_logistic_mix
`int`, number of components in the logistic mixture
distribution.
receptive_field_dims
`tuple`, height and width in pixels of the receptive
field of the convolutional layers above and to the left of a given
pixel. The width (second element of the tuple) should be odd. Figure 1
(middle) of [2] shows a receptive field of (3, 5) (the row containing
the current pixel is included in the height). The default of (3, 3) was
used to produce the results in [1].
resnet_activation
`string`, the type of activation to use in the resnet
blocks. May be 'concat_elu', 'elu', or 'relu'.
l2_weight
`float`, the L2 regularization weight.
use_weight_norm
`bool`, if `True` then use weight normalization.
use_data_init
`bool`, if `True` then use data-dependent initialization
(has no effect if `use_weight_norm` is `False`).
dtype
Data type of the layer.
"""
super(_PixelCNNNetwork, self).__init__(dtype=dtype)
self._dropout_p = dropout_p
self._num_resnet = num_resnet
self._num_hierarchies = num_hierarchies
self._num_filters = num_filters
self._num_logistic_mix = num_logistic_mix
self._receptive_field_dims = receptive_field_dims # first set desired receptive field, then infer kernel
self._resnet_activation = resnet_activation
self._l2_weight = l2_weight
if use_weight_norm:
def layer_wrapper(layer):
def wrapped_layer(*args, **kwargs):
return WeightNorm(layer(*args, **kwargs), data_init=use_data_init)
return wrapped_layer
self._layer_wrapper = layer_wrapper
else:
self._layer_wrapper = lambda layer: layer
def build(self, input_shape):
dtype = self.dtype
if len(input_shape) == 2:
batch_image_shape, batch_conditional_shape = input_shape
conditional_input = tf.keras.layers.Input(shape=batch_conditional_shape[1:], dtype=dtype)
else:
batch_image_shape = input_shape
conditional_input = None
image_shape = batch_image_shape[1:]
image_input = tf.keras.layers.Input(shape=image_shape, dtype=dtype)
if self._resnet_activation == 'concat_elu':
activation = tf.keras.layers.Lambda(lambda x: tf.nn.elu(tf.concat([x, -x], axis=-1)), dtype=dtype)
else:
activation = tf.keras.activations.get(self._resnet_activation)
# Define layers with default inputs and layer wrapper applied
Conv2D = functools.partial( # pylint:disable=invalid-name
self._layer_wrapper(tf.keras.layers.Convolution2D),
filters=self._num_filters,
padding='same',
kernel_regularizer=tf.keras.regularizers.l2(self._l2_weight),
dtype=dtype)
Dense = functools.partial( # pylint:disable=invalid-name
self._layer_wrapper(tf.keras.layers.Dense),
kernel_regularizer=tf.keras.regularizers.l2(self._l2_weight),
dtype=dtype)
Conv2DTranspose = functools.partial( # pylint:disable=invalid-name
self._layer_wrapper(tf.keras.layers.Conv2DTranspose),
filters=self._num_filters,
padding='same',
strides=(2, 2),
kernel_regularizer=tf.keras.regularizers.l2(self._l2_weight),
dtype=dtype)
rows, cols = self._receptive_field_dims
# Define the dimensions of the valid (unmasked) areas of the layer kernels
# for stride 1 convolutions in the internal layers.
kernel_valid_dims = {'vertical': (rows - 1, cols), # vertical stack
'horizontal': (2, cols // 2 + 1)} # horizontal stack
# Define the size of the kernel necessary to center the current pixel
# correctly for stride 1 convolutions in the internal layers.
kernel_sizes = {'vertical': (2 * rows - 3, cols), 'horizontal': (3, cols)}
# Make the kernel constraint functions for stride 1 convolutions in internal
# layers.
kernel_constraints = {
k: _make_kernel_constraint(kernel_sizes[k], (0, v[0]), (0, v[1]))
for k, v in kernel_valid_dims.items()}
# Build the initial vertical stack/horizontal stack convolutional layers,
# as shown in Figure 1 of [2]. The receptive field of the initial vertical
# stack layer is a rectangular area centered above the current pixel.
vertical_stack_init = Conv2D(
kernel_size=(2 * rows - 1, cols),
kernel_constraint=_make_kernel_constraint((2 * rows - 1, cols), (0, rows - 1), (0, cols)))(image_input)
# In Figure 1 [2], the receptive field of the horizontal stack is
# illustrated as the pixels in the same row and to the left of the current
# pixel. [1] increases the height of this receptive field from one pixel to
# two (`horizontal_stack_left`) and additionally includes a subset of the
# row of pixels centered above the current pixel (`horizontal_stack_up`).
horizontal_stack_up = Conv2D(
kernel_size=(3, cols),
kernel_constraint=_make_kernel_constraint((3, cols), (0, 1), (0, cols)))(image_input)
horizontal_stack_left = Conv2D(
kernel_size=(3, cols),
kernel_constraint=_make_kernel_constraint((3, cols), (0, 2), (0, cols // 2)))(image_input)
horizontal_stack_init = tf.keras.layers.add([horizontal_stack_up, horizontal_stack_left], dtype=dtype)
layer_stacks = {
'vertical': [vertical_stack_init],
'horizontal': [horizontal_stack_init]
}
# Build the downward pass of the U-net (left-hand half of Figure 2 of [1]).
# Each `i` iteration builds one of the highest-level blocks (identified as
# 'Sequence of 6 layers' in the figure, consisting of `num_resnet=5` stride-
# 1 layers, and one stride-2 layer that contracts the height/width
# dimensions). The `_` iterations build the stride 1 layers. The layers of
# the downward pass are stored in lists, since we'll later need them to make
# skip-connections to layers in the upward pass of the U-net (the skip-
# connections are represented by curved lines in Figure 2 [1]).
for i in range(self._num_hierarchies):
for _ in range(self._num_resnet):
# Build a layer shown in Figure 2 of [2]. The 'vertical' iteration
# builds the layers in the left half of the figure, and the 'horizontal'
# iteration builds the layers in the right half.
for stack in ['vertical', 'horizontal']:
input_x = layer_stacks[stack][-1]
x = activation(input_x)
x = Conv2D(kernel_size=kernel_sizes[stack],
kernel_constraint=kernel_constraints[stack])(x)
# Add the vertical-stack layer to the horizontal-stack layer
if stack == 'horizontal':
h = activation(layer_stacks['vertical'][-1])
h = Dense(self._num_filters)(h)
x = tf.keras.layers.add([h, x], dtype=dtype)
x = activation(x)
x = tf.keras.layers.Dropout(self._dropout_p, dtype=dtype)(x)
x = Conv2D(filters=2*self._num_filters,
kernel_size=kernel_sizes[stack],
kernel_constraint=kernel_constraints[stack])(x)
if conditional_input is not None:
h_projection = _build_and_apply_h_projection(conditional_input,
self._num_filters, dtype=dtype)
x = tf.keras.layers.add([x, h_projection], dtype=dtype)
x = _apply_sigmoid_gating(x)
# Add a residual connection from the layer's input.
out = tf.keras.layers.add([input_x, x], dtype=dtype)
layer_stacks[stack].append(out)
if i < self._num_hierarchies - 1:
# Build convolutional layers that contract the height/width dimensions
# on the downward pass between each set of layers (e.g. contracting from
# 32x32 to 16x16 in Figure 2 of [1]).
for stack in ['vertical', 'horizontal']:
# Define kernel dimensions/masking to maintain the autoregressive property.
x = layer_stacks[stack][-1]
h, w = kernel_valid_dims[stack]
kernel_height = 2 * h
if stack == 'vertical':
kernel_width = w + 1
else:
kernel_width = 2 * w
kernel_size = (kernel_height, kernel_width)
kernel_constraint = _make_kernel_constraint(kernel_size, (0, h), (0, w))
x = Conv2D(strides=(2, 2), kernel_size=kernel_size,
kernel_constraint=kernel_constraint)(x)
layer_stacks[stack].append(x)
# Upward pass of the U-net (right-hand half of Figure 2 of [1]). We stored
# the layers of the downward pass in a list, in order to access them to make
# skip-connections to the upward pass. For the upward pass, we need to keep
# track of only the current layer, so we maintain a reference to the
# current layer of the horizontal/vertical stack in the `upward_pass` dict.
# The upward pass begins with the last layer of the downward pass.
upward_pass = {key: stack.pop() for key, stack in layer_stacks.items()}
# As with the downward pass, each `i` iteration builds a highest level block
# in Figure 2 [1], and the `_` iterations build individual layers within the
# block.
for i in range(self._num_hierarchies):
num_resnet = self._num_resnet if i == 0 else self._num_resnet + 1
for _ in range(num_resnet):
# Build a layer as shown in Figure 2 of [2], with a skip-connection
# from the symmetric layer in the downward pass.
for stack in ['vertical', 'horizontal']:
input_x = upward_pass[stack]
x_symmetric = layer_stacks[stack].pop()
x = activation(input_x)
x = Conv2D(kernel_size=kernel_sizes[stack],
kernel_constraint=kernel_constraints[stack])(x)
# Include the vertical-stack layer of the upward pass in the layers
# to be added to the horizontal layer.
if stack == 'horizontal':
x_symmetric = tf.keras.layers.Concatenate(axis=-1,
dtype=dtype)([upward_pass['vertical'],
x_symmetric])
# Add a skip-connection from the symmetric layer in the downward
# pass to the layer `x` in the upward pass.
h = activation(x_symmetric)
h = Dense(self._num_filters)(h)
x = tf.keras.layers.add([h, x], dtype=dtype)
x = activation(x)
x = tf.keras.layers.Dropout(self._dropout_p, dtype=dtype)(x)
x = Conv2D(filters=2*self._num_filters,
kernel_size=kernel_sizes[stack],
kernel_constraint=kernel_constraints[stack])(x)
if conditional_input is not None:
h_projection = _build_and_apply_h_projection(conditional_input, self._num_filters, dtype=dtype)
x = tf.keras.layers.add([x, h_projection], dtype=dtype)
x = _apply_sigmoid_gating(x)
upward_pass[stack] = tf.keras.layers.add([input_x, x], dtype=dtype)
# Define deconvolutional layers that expand height/width dimensions on the
# upward pass (e.g. expanding from 8x8 to 16x16 in Figure 2 of [1]), with
# the correct kernel dimensions/masking to maintain the autoregressive
# property.
if i < self._num_hierarchies - 1:
for stack in ['vertical', 'horizontal']:
h, w = kernel_valid_dims[stack]
kernel_height = 2 * h - 2
if stack == 'vertical':
kernel_width = w + 1
kernel_constraint = _make_kernel_constraint(
(kernel_height, kernel_width), (h - 2, kernel_height), (0, w))
else:
kernel_width = 2 * w - 2
kernel_constraint = _make_kernel_constraint(
(kernel_height, kernel_width), (h - 2, kernel_height),
(w - 2, kernel_width))
x = upward_pass[stack]
x = Conv2DTranspose(kernel_size=(kernel_height, kernel_width),
kernel_constraint=kernel_constraint)(x)
upward_pass[stack] = x
x_out = tf.keras.layers.ELU(dtype=dtype)(upward_pass['horizontal'])
# Build final Dense/Reshape layers to output the correct number of
# parameters per pixel.
num_channels = tensorshape_util.as_list(image_shape)[-1]
num_coeffs = num_channels * (num_channels - 1) // 2 # alpha, beta, gamma in eq.3 of paper
num_out = num_channels * 2 + num_coeffs + 1 # mu, s + alpha, beta, gamma + 1 (mixture weight)
num_out_total = num_out * self._num_logistic_mix
params = Dense(num_out_total)(x_out)
params = tf.reshape(params, prefer_static.concat( # [-1,H,W,nb mixtures, params per mixture]
[[-1], image_shape[:-1], [self._num_logistic_mix, num_out]], axis=0))
# If there is one color channel, split the parameters into a list of three
# output `Tensor`s: (1) component logits for the Quantized Logistic mixture
# distribution, (2) location parameters for each component, and (3) scale
# parameters for each component. If there is more than one color channel,
# return a fourth `Tensor` for the coefficients for the linear dependence
# among color channels (e.g. alpha, beta, gamma).
# [logits, mu, s, linear dependence]
splits = 3 if num_channels == 1 else [1, num_channels, num_channels, num_coeffs]
outputs = tf.split(params, splits, axis=-1)
# Squeeze singleton dimension from component logits
outputs[0] = tf.squeeze(outputs[0], axis=-1)
# Ensure scales are positive and do not collapse to near-zero
outputs[2] = tf.nn.softplus(outputs[2]) + tf.cast(tf.exp(-7.), self.dtype)
inputs = image_input if conditional_input is None else [image_input, conditional_input]
self._network = tf.keras.Model(inputs=inputs, outputs=outputs)
super(_PixelCNNNetwork, self).build(input_shape)
def call(self, inputs, training=None):
"""Call the Pixel CNN network model.
Parameters
----------
inputs
4D `Tensor` of image data with dimensions [batch size, height,
width, channels] or a 2-element `list`. If `list`, the first element is
the 4D image `Tensor` and the second element is a `Tensor` with
conditional input data (e.g. VAE encodings or class labels) with the
same leading batch dimension as the image `Tensor`.
training
`bool` or `None`. If `bool`, it controls the dropout layer,
where `True` implies dropout is active. If `None`, it it defaults to
`tf.keras.backend.learning_phase()`
Returns
-------
outputs
a 3- or 4-element `list` of `Tensor`s in the following order: \
component_logits: 4D `Tensor` of logits for the Categorical distribution \
over Quantized Logistic mixture components. Dimensions are \
`[batch_size, height, width, num_logistic_mix]`.
locs
4D `Tensor` of location parameters for the Quantized Logistic \
mixture components. Dimensions are `[batch_size, height, width, \
num_logistic_mix, num_channels]`.
scales
4D `Tensor` of location parameters for the Quantized Logistic \
mixture components. Dimensions are `[batch_size, height, width, \
num_logistic_mix, num_channels]`.
coeffs
4D `Tensor` of coefficients for the linear dependence among \
color channels, included only if the image has more than one channel. \
Dimensions are `[batch_size, height, width, num_logistic_mix, \
num_coeffs]`, where `num_coeffs = num_channels * (num_channels - 1) // 2`.
"""
return self._network(inputs, training=training)
def _make_kernel_constraint(kernel_size, valid_rows, valid_columns):
"""Make the masking function for layer kernels."""
mask = np.zeros(kernel_size)
lower, upper = valid_rows
left, right = valid_columns
mask[lower:upper, left:right] = 1.
mask = mask[:, :, np.newaxis, np.newaxis]
return lambda x: x * mask
def _build_and_apply_h_projection(h, num_filters, dtype):
"""Project the conditional input."""
h = tf.keras.layers.Flatten(dtype=dtype)(h)
h_projection = tf.keras.layers.Dense(2*num_filters, kernel_initializer='random_normal', dtype=dtype)(h)
return h_projection[..., tf.newaxis, tf.newaxis, :]
def _apply_sigmoid_gating(x):
"""Apply the sigmoid gating in Figure 2 of [2]."""
activation_tensor, gate_tensor = tf.split(x, 2, axis=-1)
sigmoid_gate = tf.sigmoid(gate_tensor)
return tf.keras.layers.multiply([sigmoid_gate, activation_tensor], dtype=x.dtype)
| 48,470 | 45.383732 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/models/tensorflow/losses.py
|
from typing import Optional
import tensorflow as tf
from tensorflow.keras.layers import Flatten
from tensorflow.keras.losses import kld, categorical_crossentropy
import tensorflow_probability as tfp
from alibi_detect.models.tensorflow.gmm import gmm_params, gmm_energy
def elbo(y_true: tf.Tensor,
y_pred: tf.Tensor,
cov_full: Optional[tf.Tensor] = None,
cov_diag: Optional[tf.Tensor] = None,
sim: Optional[float] = None
) -> tf.Tensor:
"""
Compute ELBO loss. The covariance matrix can be specified by passing the full covariance matrix, the matrix
diagonal, or a scale identity multiplier. Only one of these should be specified. If none are specified, the
identity matrix is used.
Parameters
----------
y_true
Labels.
y_pred
Predictions.
cov_full
Full covariance matrix.
cov_diag
Diagonal (variance) of covariance matrix.
sim
Scale identity multiplier.
Returns
-------
ELBO loss value.
Example
-------
>>> import tensorflow as tf
>>> from alibi_detect.models.tensorflow.losses import elbo
>>> y_true = tf.constant([[0.0, 1.0], [1.0, 0.0]])
>>> y_pred = tf.constant([[0.1, 0.9], [0.8, 0.2]])
>>> # Specifying scale identity multiplier
>>> elbo(y_true, y_pred, sim=1.0)
>>> # Specifying covariance matrix diagonal
>>> elbo(y_true, y_pred, cov_diag=tf.ones(2))
>>> # Specifying full covariance matrix
>>> elbo(y_true, y_pred, cov_full=tf.eye(2))
"""
if len([x for x in [cov_full, cov_diag, sim] if x is not None]) > 1:
raise ValueError('Only one of cov_full, cov_diag or sim should be specified.')
y_pred_flat = Flatten()(y_pred)
if isinstance(cov_full, tf.Tensor):
y_mn = tfp.distributions.MultivariateNormalFullCovariance(y_pred_flat,
covariance_matrix=cov_full)
else:
if sim:
cov_diag = sim * tf.ones(y_pred_flat.shape[-1])
y_mn = tfp.distributions.MultivariateNormalDiag(y_pred_flat,
scale_diag=cov_diag)
loss = -tf.reduce_mean(y_mn.log_prob(Flatten()(y_true)))
return loss
def loss_aegmm(x_true: tf.Tensor,
x_pred: tf.Tensor,
z: tf.Tensor,
gamma: tf.Tensor,
w_energy: float = .1,
w_cov_diag: float = .005
) -> tf.Tensor:
"""
Loss function used for OutlierAEGMM.
Parameters
----------
x_true
Batch of instances.
x_pred
Batch of reconstructed instances by the autoencoder.
z
Latent space values.
gamma
Membership prediction for mixture model components.
w_energy
Weight on sample energy loss term.
w_cov_diag
Weight on covariance regularizing loss term.
Returns
-------
Loss value.
"""
recon_loss = tf.reduce_mean((x_true - x_pred) ** 2)
phi, mu, cov, L, log_det_cov = gmm_params(z, gamma)
sample_energy, cov_diag = gmm_energy(z, phi, mu, cov, L, log_det_cov, return_mean=True)
loss = recon_loss + w_energy * sample_energy + w_cov_diag * cov_diag
return loss
def loss_vaegmm(x_true: tf.Tensor,
x_pred: tf.Tensor,
z: tf.Tensor,
gamma: tf.Tensor,
w_recon: float = 1e-7,
w_energy: float = .1,
w_cov_diag: float = .005,
cov_full: tf.Tensor = None,
cov_diag: tf.Tensor = None,
sim: float = .05
) -> tf.Tensor:
"""
Loss function used for OutlierVAEGMM.
Parameters
----------
x_true
Batch of instances.
x_pred
Batch of reconstructed instances by the variational autoencoder.
z
Latent space values.
gamma
Membership prediction for mixture model components.
w_recon
Weight on elbo loss term.
w_energy
Weight on sample energy loss term.
w_cov_diag
Weight on covariance regularizing loss term.
cov_full
Full covariance matrix.
cov_diag
Diagonal (variance) of covariance matrix.
sim
Scale identity multiplier.
Returns
-------
Loss value.
"""
recon_loss = elbo(x_true, x_pred, cov_full=cov_full, cov_diag=cov_diag, sim=sim)
phi, mu, cov, L, log_det_cov = gmm_params(z, gamma)
sample_energy, cov_diag = gmm_energy(z, phi, mu, cov, L, log_det_cov)
loss = w_recon * recon_loss + w_energy * sample_energy + w_cov_diag * cov_diag
return loss
def loss_adv_ae(x_true: tf.Tensor,
x_pred: tf.Tensor,
model: tf.keras.Model = None,
model_hl: list = None,
w_model: float = 1.,
w_recon: float = 0.,
w_model_hl: list = None,
temperature: float = 1.
) -> tf.Tensor:
"""
Loss function used for AdversarialAE.
Parameters
----------
x_true
Batch of instances.
x_pred
Batch of reconstructed instances by the autoencoder.
model
A trained tf.keras model with frozen layers (layers.trainable = False).
model_hl
List with tf.keras models used to extract feature maps and make predictions on hidden layers.
w_model
Weight on model prediction loss term.
w_recon
Weight on MSE reconstruction error loss term.
w_model_hl
Weights assigned to the loss of each model in model_hl.
temperature
Temperature used for model prediction scaling.
Temperature <1 sharpens the prediction probability distribution.
Returns
-------
Loss value.
"""
y_true = model(x_true)
y_pred = model(x_pred)
# apply temperature scaling
if temperature != 1.:
y_true = y_true ** (1 / temperature)
y_true = y_true / tf.reshape(tf.reduce_sum(y_true, axis=-1), (-1, 1))
# compute K-L divergence loss
loss_kld = kld(y_true, y_pred)
std_kld = tf.math.reduce_std(loss_kld)
loss = tf.reduce_mean(loss_kld)
# add loss from optional K-L divergences extracted from hidden layers
if isinstance(model_hl, list):
if w_model_hl is None:
w_model_hl = list(tf.ones(len(model_hl)))
for m, w in zip(model_hl, w_model_hl):
h_true = m(x_true)
h_pred = m(x_pred)
loss_kld_hl = tf.reduce_mean(kld(h_true, h_pred))
loss += tf.constant(w) * loss_kld_hl
loss *= w_model
# add optional reconstruction loss
if w_recon > 0.:
loss_recon = (x_true - x_pred) ** 2
std_recon = tf.math.reduce_std(loss_recon)
w_scale = std_kld / (std_recon + 1e-10)
loss_recon = w_recon * w_scale * tf.reduce_mean(loss_recon)
loss += loss_recon
return loss
else:
return loss
def loss_distillation(x_true: tf.Tensor,
y_pred: tf.Tensor,
model: tf.keras.Model = None,
loss_type: str = 'kld',
temperature: float = 1.,
) -> tf.Tensor:
"""
Loss function used for Model Distillation.
Parameters
----------
x_true
Batch of data points.
y_pred
Batch of prediction from the distilled model.
model
tf.keras model.
loss_type
Type of loss for distillation. Supported 'kld', 'xent.
temperature
Temperature used for model prediction scaling.
Temperature <1 sharpens the prediction probability distribution.
Returns
-------
Loss value.
"""
y_true = model(x_true)
# apply temperature scaling
if temperature != 1.:
y_true = y_true ** (1 / temperature)
y_true = y_true / tf.reshape(tf.reduce_sum(y_true, axis=-1), (-1, 1))
if loss_type == 'kld':
loss_dist = kld(y_true, y_pred)
elif loss_type == 'xent':
loss_dist = categorical_crossentropy(y_true, y_pred, from_logits=False)
else:
raise NotImplementedError
# compute K-L divergence loss
loss = tf.reduce_mean(loss_dist)
return loss
| 8,256 | 29.925094 | 111 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/models/tensorflow/resnet.py
|
# implementation adopted from https://github.com/tensorflow/models
# TODO: proper train-val-test split
import argparse
import numpy as np
import os
from pathlib import Path
import tensorflow as tf
from tensorflow.keras.callbacks import Callback, ModelCheckpoint
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.layers import (Activation, Add, BatchNormalization, Conv2D,
Dense, Input, ZeroPadding2D)
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.regularizers import l2
from typing import Callable, Tuple, Union
# parameters specific for CIFAR-10 training
BATCH_NORM_DECAY = 0.997
BATCH_NORM_EPSILON = 1e-5
L2_WEIGHT_DECAY = 2e-4
LR_SCHEDULE = [(0.1, 91), (0.01, 136), (0.001, 182)] # (multiplier, epoch to start) tuples
BASE_LEARNING_RATE = 0.1
HEIGHT, WIDTH, NUM_CHANNELS = 32, 32, 3
def l2_regulariser(l2_regularisation: bool = True):
"""
Apply L2 regularisation to kernel.
Parameters
----------
l2_regularisation
Whether to apply L2 regularisation.
Returns
-------
Kernel regularisation.
"""
return l2(L2_WEIGHT_DECAY) if l2_regularisation else None
def identity_block(x_in: tf.Tensor,
filters: Tuple[int, int],
kernel_size: Union[int, list, Tuple[int]],
stage: int,
block: str,
l2_regularisation: bool = True) -> tf.Tensor:
"""
Identity block in ResNet.
Parameters
----------
x_in
Input Tensor.
filters
Number of filters for each of the 2 conv layers.
kernel_size
Kernel size for the conv layers.
stage
Stage of the block in the ResNet.
block
Block within a stage in the ResNet.
l2_regularisation
Whether to apply L2 regularisation.
Returns
-------
Output Tensor of the identity block.
"""
# name of block
conv_name_base = 'res' + str(stage) + '_' + block + '_branch'
bn_name_base = 'bn' + str(stage) + '_' + block + '_branch'
filters_1, filters_2 = filters
bn_axis = 3 # channels last format
x = Conv2D(
filters_1,
kernel_size,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2_regulariser(l2_regularisation),
name=conv_name_base + '2a')(x_in)
x = BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(
filters_2,
kernel_size,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2_regulariser(l2_regularisation),
name=conv_name_base + '2b')(x)
x = BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2b')(x)
x = Add()([x, x_in])
x = Activation('relu')(x)
return x
def conv_block(x_in: tf.Tensor,
filters: Tuple[int, int],
kernel_size: Union[int, list, Tuple[int]],
stage: int,
block: str,
strides: Tuple[int, int] = (2, 2),
l2_regularisation: bool = True) -> tf.Tensor:
"""
Conv block in ResNet with a parameterised skip connection to reduce the width and height
controlled by the strides.
Parameters
----------
x_in
Input Tensor.
filters
Number of filters for each of the 2 conv layers.
kernel_size
Kernel size for the conv layers.
stage
Stage of the block in the ResNet.
block
Block within a stage in the ResNet.
strides
Stride size applied to reduce the image size.
l2_regularisation
Whether to apply L2 regularisation.
Returns
-------
Output Tensor of the conv block.
"""
# name of block
conv_name_base = 'res' + str(stage) + '_' + block + '_branch'
bn_name_base = 'bn' + str(stage) + '_' + block + '_branch'
filters_1, filters_2 = filters
bn_axis = 3 # channels last format
x = Conv2D(
filters_1,
kernel_size,
strides=strides,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2_regulariser(l2_regularisation),
name=conv_name_base + '2a')(x_in)
x = BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(
filters_2,
kernel_size,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2_regulariser(l2_regularisation),
name=conv_name_base + '2b')(x)
x = BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2b')(x)
shortcut = Conv2D(
filters_2,
(1, 1),
strides=strides,
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2_regulariser(l2_regularisation),
name=conv_name_base + '1')(x_in)
shortcut = BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '1')(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
return x
def resnet_block(x_in: tf.Tensor,
size: int,
filters: Tuple[int, int],
kernel_size: Union[int, list, Tuple[int]],
stage: int,
strides: Tuple[int, int] = (2, 2),
l2_regularisation: bool = True) -> tf.Tensor:
"""
Block in ResNet combining a conv block with identity blocks.
Parameters
----------
x_in
Input Tensor.
size
The ResNet block consists of 1 conv block and size-1 identity blocks.
filters
Number of filters for each of the conv layers.
kernel_size
Kernel size for the conv layers.
stage
Stage of the block in the ResNet.
strides
Stride size applied to reduce the image size.
l2_regularisation
Whether to apply L2 regularisation.
Returns
-------
Output Tensor of the conv block.
"""
x = conv_block(
x_in,
filters,
kernel_size,
stage,
'block0',
strides=strides,
l2_regularisation=l2_regularisation
)
for i in range(size - 1):
x = identity_block(
x,
filters,
kernel_size,
stage,
f'block{i + 1}',
l2_regularisation=l2_regularisation
)
return x
def resnet(num_blocks: int,
classes: int = 10,
input_shape: Tuple[int, int, int] = (32, 32, 3)) -> tf.keras.Model:
"""
Define ResNet.
Parameters
----------
num_blocks
Number of ResNet blocks.
classes
Number of classification classes.
input_shape
Input shape of an image.
Returns
-------
ResNet as a tf.keras.Model.
"""
bn_axis = 3 # channels last format
l2_regularisation = True
x_in = Input(shape=input_shape)
x = ZeroPadding2D(
padding=(1, 1),
name='conv1_pad')(x_in)
x = Conv2D(
16,
(3, 3),
strides=(1, 1),
padding='valid',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=l2_regulariser(l2_regularisation),
name='conv1')(x)
x = BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name='bn_conv1')(x)
x = Activation('relu')(x)
x = resnet_block(
x_in=x,
size=num_blocks,
filters=(16, 16),
kernel_size=3,
stage=2,
strides=(1, 1),
l2_regularisation=True
)
x = resnet_block(
x_in=x,
size=num_blocks,
filters=(32, 32),
kernel_size=3,
stage=3,
strides=(2, 2),
l2_regularisation=True
)
x = resnet_block(
x_in=x,
size=num_blocks,
filters=(64, 64),
kernel_size=3,
stage=4,
strides=(2, 2),
l2_regularisation=True
)
x = tf.reduce_mean(x, axis=(1, 2)) # take mean across width and height
x_out = Dense(
classes,
activation='softmax',
kernel_initializer=RandomNormal(stddev=.01),
kernel_regularizer=l2(L2_WEIGHT_DECAY),
bias_regularizer=l2(L2_WEIGHT_DECAY),
name='fc10')(x)
model = Model(x_in, x_out, name='resnet')
return model
def learning_rate_schedule(current_epoch: int,
current_batch: int,
batches_per_epoch: int,
batch_size: int) -> float:
"""
Linear learning rate scaling and learning rate decay at specified epochs.
Parameters
----------
current_epoch
Current training epoch.
current_batch
Current batch with current epoch, not used.
batches_per_epoch
Number of batches or steps in an epoch, not used.
batch_size
Batch size.
Returns
-------
Adjusted learning rate.
"""
del current_batch, batches_per_epoch # not used
initial_learning_rate = BASE_LEARNING_RATE * batch_size / 128
learning_rate = initial_learning_rate
for mult, start_epoch in LR_SCHEDULE:
if current_epoch >= start_epoch:
learning_rate = initial_learning_rate * mult
else:
break
return learning_rate
class LearningRateBatchScheduler(Callback):
def __init__(self, schedule: Callable, batch_size: int, steps_per_epoch: int):
"""
Callback to update learning rate on every batch instead of epoch.
Parameters
----------
schedule
Function taking the epoch and batch index as input which returns the new
learning rate as output.
batch_size
Batch size.
steps_per_epoch
Number of batches or steps per epoch.
"""
super(LearningRateBatchScheduler, self).__init__()
self.schedule = schedule
self.steps_per_epoch = steps_per_epoch
self.batch_size = batch_size
self.epochs = -1
self.prev_lr = -1
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'learning_rate'):
raise ValueError('Optimizer must have a "learning_rate" attribute.')
self.epochs += 1
def on_batch_begin(self, batch, logs=None):
"""Executes before step begins."""
lr = self.schedule(self.epochs,
batch,
self.steps_per_epoch,
self.batch_size)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function should be float.')
if lr != self.prev_lr:
self.model.optimizer.learning_rate = lr # lr should be a float
self.prev_lr = lr
tf.compat.v1.logging.debug(
'Epoch %05d Batch %05d: LearningRateBatchScheduler '
'change learning rate to %s.', self.epochs, batch, lr)
def preprocess_image(x: np.ndarray, is_training: bool = True) -> np.ndarray:
if is_training:
# resize image and add 4 pixels to each side
x = tf.image.resize_with_crop_or_pad(x, HEIGHT + 8, WIDTH + 8)
# randomly crop a [HEIGHT, WIDTH] section of the image
x = tf.image.random_crop(x, [HEIGHT, WIDTH, NUM_CHANNELS])
# randomly flip the image horizontally
x = tf.image.random_flip_left_right(x)
# standardise by image
x = tf.image.per_image_standardization(x).numpy().astype(np.float32)
return x
def scale_by_instance(x: np.ndarray, eps: float = 1e-12) -> np.ndarray:
xmean = x.mean(axis=(1, 2, 3)).reshape(-1, 1, 1, 1)
xstd = x.std(axis=(1, 2, 3)).reshape(-1, 1, 1, 1)
x_scaled = (x - xmean) / (xstd + eps)
return x_scaled
def run(num_blocks: int,
epochs: int,
batch_size: int,
model_dir: Union[str, os.PathLike],
num_classes: int = 10,
input_shape: Tuple[int, int, int] = (32, 32, 3),
validation_freq: int = 10,
verbose: int = 2,
seed: int = 1,
serving: bool = False
) -> None:
# load and preprocess CIFAR-10 data
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.cifar10.load_data()
X_train = X_train.astype('float32')
X_test = scale_by_instance(X_test.astype('float32')) # can already preprocess test data
y_train = y_train.astype('int64').reshape(-1, )
y_test = y_test.astype('int64').reshape(-1, )
# define and compile model
model = resnet(num_blocks, classes=num_classes, input_shape=input_shape)
optimizer = SGD(learning_rate=BASE_LEARNING_RATE, momentum=0.9)
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=optimizer,
metrics=['sparse_categorical_accuracy']
)
# set up callbacks
steps_per_epoch = X_train.shape[0] // batch_size
ckpt_path = Path(model_dir).joinpath('model.h5')
callbacks = [
ModelCheckpoint(
ckpt_path,
monitor='val_sparse_categorical_accuracy',
save_best_only=True,
save_weights_only=False
),
LearningRateBatchScheduler(
schedule=learning_rate_schedule,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch
)
]
# data augmentation and preprocessing
datagen = ImageDataGenerator(preprocessing_function=preprocess_image)
# train
model.fit(
x=datagen.flow(X_train, y_train, batch_size=batch_size, shuffle=True, seed=seed),
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=callbacks,
validation_freq=validation_freq,
validation_data=(X_test, y_test),
shuffle=True,
verbose=verbose
)
if serving:
tf.saved_model.save(model, model_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train ResNet on CIFAR-10.")
parser.add_argument('--num_blocks', type=int, default=5)
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--model_dir', type=str, default='./model/')
parser.add_argument('--num_classes', type=int, default=10)
parser.add_argument('--validation_freq', type=int, default=10)
parser.add_argument('--verbose', type=int, default=2)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--serving', type=bool, default=False)
args = parser.parse_args()
run(
args.num_blocks,
args.epochs,
args.batch_size,
args.model_dir,
num_classes=args.num_classes,
validation_freq=args.validation_freq,
verbose=args.verbose,
seed=args.seed,
serving=args.serving
)
| 15,500 | 28.469582 | 92 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/models/tensorflow/gmm.py
|
import numpy as np
import tensorflow as tf
from typing import Tuple
def gmm_params(z: tf.Tensor,
gamma: tf.Tensor) \
-> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""
Compute parameters of Gaussian Mixture Model.
Parameters
----------
z
Observations.
gamma
Mixture probabilities to derive mixture distribution weights from.
Returns
-------
phi
Mixture component distribution weights.
mu
Mixture means.
cov
Mixture covariance.
L
Cholesky decomposition of `cov`.
log_det_cov
Log of the determinant of `cov`.
"""
# compute gmm parameters phi, mu and cov
N = gamma.shape[0] # nb of samples in batch
sum_gamma = tf.reduce_sum(gamma, 0) # K
phi = sum_gamma / N # K
mu = (tf.reduce_sum(tf.expand_dims(gamma, -1) * tf.expand_dims(z, 1), 0)
/ tf.expand_dims(sum_gamma, -1)) # K x D (D = latent_dim)
z_mu = tf.expand_dims(z, 1) - tf.expand_dims(mu, 0) # N x K x D
z_mu_outer = tf.expand_dims(z_mu, -1) * tf.expand_dims(z_mu, -2) # N x K x D x D
cov = (tf.reduce_sum(tf.expand_dims(tf.expand_dims(gamma, -1), -1) * z_mu_outer, 0)
/ tf.expand_dims(tf.expand_dims(sum_gamma, -1), -1)) # K x D x D
# cholesky decomposition of covariance and determinant derivation
D = tf.shape(cov)[1]
eps = 1e-6
L = tf.linalg.cholesky(cov + tf.eye(D) * eps) # K x D x D
log_det_cov = 2. * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(L)), 1) # K
return phi, mu, cov, L, log_det_cov
def gmm_energy(z: tf.Tensor,
phi: tf.Tensor,
mu: tf.Tensor,
cov: tf.Tensor,
L: tf.Tensor,
log_det_cov: tf.Tensor,
return_mean: bool = True) \
-> Tuple[tf.Tensor, tf.Tensor]:
"""
Compute sample energy from Gaussian Mixture Model.
Parameters
----------
z
Observations.
phi
Mixture component distribution weights.
mu
Mixture means.
cov
Mixture covariance.
L
Cholesky decomposition of `cov`.
log_det_cov
Log of the determinant of `cov`.
return_mean
Take mean across all sample energies in a batch.
Returns
-------
sample_energy
The sample energy of the GMM.
cov_diag
The inverse sum of the diagonal components of the covariance matrix.
"""
D = tf.shape(cov)[1]
z_mu = tf.expand_dims(z, 1) - tf.expand_dims(mu, 0) # N x K x D
z_mu_T = tf.transpose(z_mu, perm=[1, 2, 0]) # K x D x N
v = tf.linalg.triangular_solve(L, z_mu_T, lower=True) # K x D x D
# rewrite sample energy in logsumexp format for numerical stability
logits = tf.math.log(tf.expand_dims(phi, -1)) - .5 * (
tf.reduce_sum(tf.square(v), 1)
+ tf.cast(D, tf.float32) * tf.math.log(2. * np.pi)
+ tf.expand_dims(log_det_cov, -1)) # K x N
sample_energy = - tf.reduce_logsumexp(logits, axis=0) # N
if return_mean:
sample_energy = tf.reduce_mean(sample_energy)
# inverse sum of variances
cov_diag = tf.reduce_sum(tf.divide(1, tf.linalg.diag_part(cov)))
return sample_energy, cov_diag
| 3,268 | 29.839623 | 87 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/models/tensorflow/autoencoder.py
|
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Bidirectional, Concatenate, Dense, Flatten, Layer, LSTM
from typing import Callable, List, Tuple
from alibi_detect.utils.tensorflow.distance import relative_euclidean_distance
class Sampling(Layer):
""" Reparametrization trick. Uses (z_mean, z_log_var) to sample the latent vector z. """
def call(self, inputs: Tuple[tf.Tensor, tf.Tensor]) -> tf.Tensor:
"""
Sample z.
Parameters
----------
inputs
Tuple with mean and log variance.
Returns
-------
Sampled vector z.
"""
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
class EncoderVAE(Layer):
def __init__(self,
encoder_net: tf.keras.Model,
latent_dim: int,
name: str = 'encoder_vae') -> None:
"""
Encoder of VAE.
Parameters
----------
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class.
latent_dim
Dimensionality of the latent space.
name
Name of encoder.
"""
super(EncoderVAE, self).__init__(name=name)
self.encoder_net = encoder_net
self.fc_mean = Dense(latent_dim, activation=None)
self.fc_log_var = Dense(latent_dim, activation=None)
self.sampling = Sampling()
def call(self, x: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
x = self.encoder_net(x)
if len(x.shape) > 2:
x = Flatten()(x)
z_mean = self.fc_mean(x)
z_log_var = self.fc_log_var(x)
z = self.sampling((z_mean, z_log_var))
return z_mean, z_log_var, z
class Decoder(Layer):
def __init__(self,
decoder_net: tf.keras.Model,
name: str = 'decoder') -> None:
"""
Decoder of (V)AE.
Parameters
----------
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class.
name
Name of decoder.
"""
super(Decoder, self).__init__(name=name)
self.decoder_net = decoder_net
def call(self, x: tf.Tensor) -> tf.Tensor:
return self.decoder_net(x)
class VAE(tf.keras.Model):
def __init__(self,
encoder_net: tf.keras.Model,
decoder_net: tf.keras.Model,
latent_dim: int,
beta: float = 1.,
name: str = 'vae') -> None:
"""
Combine encoder and decoder in VAE.
Parameters
----------
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class.
latent_dim
Dimensionality of the latent space.
beta
Beta parameter for KL-divergence loss term.
name
Name of VAE model.
"""
super(VAE, self).__init__(name=name)
self.encoder = EncoderVAE(encoder_net, latent_dim)
self.decoder = Decoder(decoder_net)
self.beta = beta
self.latent_dim = latent_dim
def call(self, x: tf.Tensor) -> tf.Tensor:
z_mean, z_log_var, z = self.encoder(x)
x_recon = self.decoder(z)
# add KL divergence loss term
kl_loss = -.5 * tf.reduce_mean(z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)
self.add_loss(self.beta * kl_loss)
return x_recon
class EncoderAE(Layer):
def __init__(self,
encoder_net: tf.keras.Model,
name: str = 'encoder_ae') -> None:
"""
Encoder of AE.
Parameters
----------
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class.
name
Name of encoder.
"""
super(EncoderAE, self).__init__(name=name)
self.encoder_net = encoder_net
def call(self, x: tf.Tensor) -> tf.Tensor:
return self.encoder_net(x)
class AE(tf.keras.Model):
def __init__(self,
encoder_net: tf.keras.Model,
decoder_net: tf.keras.Model,
name: str = 'ae') -> None:
"""
Combine encoder and decoder in AE.
Parameters
----------
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class.
name
Name of autoencoder model.
"""
super(AE, self).__init__(name=name)
self.encoder = EncoderAE(encoder_net)
self.decoder = Decoder(decoder_net)
def call(self, x: tf.Tensor) -> tf.Tensor:
z = self.encoder(x)
x_recon = self.decoder(z)
return x_recon
class EncoderLSTM(Layer):
def __init__(self,
latent_dim: int,
name: str = 'encoder_lstm') -> None:
"""
Bidirectional LSTM encoder.
Parameters
----------
latent_dim
Latent dimension. Must be an even number given the bidirectional encoder.
name
Name of encoder.
"""
super(EncoderLSTM, self).__init__(name=name)
self.encoder_net = Bidirectional(LSTM(latent_dim // 2, return_state=True, return_sequences=True))
def call(self, x: tf.Tensor) -> Tuple[tf.Tensor, List[tf.Tensor]]:
enc_out, fwd_h, fwd_c, bwd_h, bwd_c = self.encoder_net(x)
h = Concatenate()([fwd_h, bwd_h])
c = Concatenate()([fwd_c, bwd_c])
return enc_out, [h, c]
class DecoderLSTM(Layer):
def __init__(self,
latent_dim: int,
output_dim: int,
output_activation: str = None,
name: str = 'decoder_lstm') -> None:
"""
LSTM decoder.
Parameters
----------
latent_dim
Latent dimension.
output_dim
Decoder output dimension.
output_activation
Activation used in the Dense output layer.
name
Name of decoder.
"""
super(DecoderLSTM, self).__init__(name=name)
self.decoder_net = LSTM(latent_dim, return_state=True, return_sequences=True)
self.dense = Dense(output_dim, activation=output_activation)
def call(self, x: tf.Tensor, init_state: List[tf.Tensor]) -> Tuple[tf.Tensor, tf.Tensor, List[tf.Tensor]]:
x, h, c = self.decoder_net(x, initial_state=init_state)
dec_out = self.dense(x)
return dec_out, x, [h, c]
class Seq2Seq(tf.keras.Model):
def __init__(self,
encoder_net: EncoderLSTM,
decoder_net: DecoderLSTM,
threshold_net: tf.keras.Model,
n_features: int,
score_fn: Callable = tf.math.squared_difference,
beta: float = 1.,
name: str = 'seq2seq') -> None:
"""
Sequence-to-sequence model.
Parameters
----------
encoder_net
Encoder network.
decoder_net
Decoder network.
threshold_net
Regression network used to estimate threshold.
n_features
Number of features.
score_fn
Function used for outlier score.
beta
Weight on the threshold estimation loss term.
name
Name of the seq2seq model.
"""
super(Seq2Seq, self).__init__(name=name)
self.encoder = encoder_net
self.decoder = decoder_net
self.threshold_net = threshold_net
self.threshold_est = Dense(n_features, activation=None)
self.score_fn = score_fn
self.beta = beta
def call(self, x: tf.Tensor) -> tf.Tensor:
""" Forward pass used for teacher-forcing training. """
# reconstruct input via encoder-decoder
init_state = self.encoder(x)[1]
x_recon, z, _ = self.decoder(x, init_state=init_state)
# compute outlier score
err_recon = self.score_fn(x, x_recon)
# estimate outlier threshold from hidden state of decoder
z = self.threshold_net(z)
threshold_est = self.threshold_est(z)
# add threshold estimate loss
threshold_loss = tf.reduce_mean((err_recon - threshold_est) ** 2)
self.add_loss(self.beta * threshold_loss)
return x_recon
def decode_seq(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
""" Sequence decoding and threshold estimation used for inference. """
seq_len = np.shape(x)[1]
n_batch = x.shape[0]
# use encoder to get state vectors
init_state = self.encoder(x)[1]
# generate start of target sequence
decoder_input = np.reshape(x[:, 0, :], (n_batch, 1, -1))
# initialize hidden states used to compute outlier thresholds
z = np.zeros((n_batch, seq_len, init_state[0].numpy().shape[1])).astype(np.float32)
# sequential prediction of time series
decoded_seq = np.zeros_like(x)
decoded_seq[:, 0, :] = x[:, 0, :]
i = 1
while i < seq_len:
# decode step in sequence
decoder_output = self.decoder(decoder_input, init_state=init_state)
decoded_seq[:, i:i+1, :] = decoder_output[0].numpy()
init_state = decoder_output[2]
# update hidden state decoder used for outlier threshold
z[:, i:i+1, :] = decoder_output[1].numpy()
# update next decoder input
decoder_input = np.zeros_like(decoder_input)
decoder_input[:, :1, :] = decoder_output[0].numpy()
i += 1
# compute outlier thresholds
z = self.threshold_net(z)
threshold_est = self.threshold_est(z).numpy()
return decoded_seq, threshold_est
def eucl_cosim_features(x: tf.Tensor,
y: tf.Tensor,
max_eucl: float = 1e2) -> tf.Tensor:
"""
Compute features extracted from the reconstructed instance using the
relative Euclidean distance and cosine similarity between 2 tensors.
Parameters
----------
x
Tensor used in feature computation.
y
Tensor used in feature computation.
max_eucl
Maximum value to clip relative Euclidean distance by.
Returns
-------
Tensor concatenating the relative Euclidean distance and cosine similarity features.
"""
if len(x.shape) > 2 or len(y.shape) > 2:
x = Flatten()(x)
y = Flatten()(y)
rec_cos = tf.reshape(tf.keras.losses.cosine_similarity(y, x, -1), (-1, 1))
rec_euc = tf.reshape(relative_euclidean_distance(y, x, -1), (-1, 1))
# rec_euc could become very large so should be clipped
rec_euc = tf.clip_by_value(rec_euc, 0, max_eucl)
return tf.concat([rec_cos, rec_euc], -1)
class AEGMM(tf.keras.Model):
def __init__(self,
encoder_net: tf.keras.Model,
decoder_net: tf.keras.Model,
gmm_density_net: tf.keras.Model,
n_gmm: int,
recon_features: Callable = eucl_cosim_features,
name: str = 'aegmm') -> None:
"""
Deep Autoencoding Gaussian Mixture Model.
Parameters
----------
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class.
gmm_density_net
Layers for the GMM network wrapped in a tf.keras.Sequential class.
n_gmm
Number of components in GMM.
recon_features
Function to extract features from the reconstructed instance by the decoder.
name
Name of the AEGMM model.
"""
super(AEGMM, self).__init__(name=name)
self.encoder = encoder_net
self.decoder = decoder_net
self.gmm_density = gmm_density_net
self.n_gmm = n_gmm
self.recon_features = recon_features
def call(self, x: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
enc = self.encoder(x)
x_recon = self.decoder(enc)
recon_features = self.recon_features(x, x_recon)
z = tf.concat([enc, recon_features], -1)
gamma = self.gmm_density(z)
return x_recon, z, gamma
class VAEGMM(tf.keras.Model):
def __init__(self,
encoder_net: tf.keras.Model,
decoder_net: tf.keras.Model,
gmm_density_net: tf.keras.Model,
n_gmm: int,
latent_dim: int,
recon_features: Callable = eucl_cosim_features,
beta: float = 1.,
name: str = 'vaegmm') -> None:
"""
Variational Autoencoding Gaussian Mixture Model.
Parameters
----------
encoder_net
Layers for the encoder wrapped in a tf.keras.Sequential class.
decoder_net
Layers for the decoder wrapped in a tf.keras.Sequential class.
gmm_density_net
Layers for the GMM network wrapped in a tf.keras.Sequential class.
n_gmm
Number of components in GMM.
latent_dim
Dimensionality of the latent space.
recon_features
Function to extract features from the reconstructed instance by the decoder.
beta
Beta parameter for KL-divergence loss term.
name
Name of the VAEGMM model.
"""
super(VAEGMM, self).__init__(name=name)
self.encoder = EncoderVAE(encoder_net, latent_dim)
self.decoder = decoder_net
self.gmm_density = gmm_density_net
self.n_gmm = n_gmm
self.latent_dim = latent_dim
self.recon_features = recon_features
self.beta = beta
def call(self, x: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
enc_mean, enc_log_var, enc = self.encoder(x)
x_recon = self.decoder(enc)
recon_features = self.recon_features(x, x_recon)
z = tf.concat([enc, recon_features], -1)
gamma = self.gmm_density(z)
# add KL divergence loss term
kl_loss = -.5 * tf.reduce_mean(enc_log_var - tf.square(enc_mean) - tf.exp(enc_log_var) + 1)
self.add_loss(self.beta * kl_loss)
return x_recon, z, gamma
| 14,709 | 31.472406 | 110 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/models/tensorflow/__init__.py
|
from alibi_detect.utils.missing_optional_dependency import import_optional
AE, AEGMM, VAE, VAEGMM, Seq2Seq, eucl_cosim_features = import_optional(
'alibi_detect.models.tensorflow.autoencoder',
names=['AE', 'AEGMM', 'VAE', 'VAEGMM', 'Seq2Seq', 'eucl_cosim_features'])
TransformerEmbedding = import_optional(
'alibi_detect.models.tensorflow.embedding',
names=['TransformerEmbedding'])
PixelCNN = import_optional(
'alibi_detect.models.tensorflow.pixelcnn',
names=['PixelCNN'])
resnet, scale_by_instance = import_optional(
'alibi_detect.models.tensorflow.resnet',
names=['resnet', 'scale_by_instance'])
trainer = import_optional(
'alibi_detect.models.tensorflow.trainer',
names=['trainer'])
loss_aegmm, loss_adv_ae, loss_distillation, elbo, loss_vaegmm = import_optional(
'alibi_detect.models.tensorflow.losses',
names=['loss_aegmm', 'loss_adv_ae', 'loss_distillation', 'elbo', 'loss_vaegmm']
)
__all__ = [
"AE",
"AEGMM",
"Seq2Seq",
"VAE",
"VAEGMM",
"resnet",
"scale_by_instance",
"PixelCNN",
"TransformerEmbedding",
"trainer",
"eucl_cosim_features",
"elbo",
"loss_aegmm",
"loss_vaegmm",
"loss_adv_ae",
"loss_distillation"
]
| 1,242 | 24.895833 | 83 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/models/tensorflow/trainer.py
|
from functools import partial
import numpy as np
import tensorflow as tf
from typing import Callable, Tuple
def trainer(
model: tf.keras.Model,
loss_fn: tf.keras.losses,
x_train: np.ndarray,
y_train: np.ndarray = None,
dataset: tf.keras.utils.Sequence = None,
optimizer: tf.keras.optimizers = tf.keras.optimizers.Adam,
loss_fn_kwargs: dict = None,
preprocess_fn: Callable = None,
epochs: int = 20,
reg_loss_fn: Callable = (lambda model: 0),
batch_size: int = 64,
buffer_size: int = 1024,
verbose: bool = True,
log_metric: Tuple[str, "tf.keras.metrics"] = None,
callbacks: tf.keras.callbacks = None
) -> None:
"""
Train TensorFlow model.
Parameters
----------
model
Model to train.
loss_fn
Loss function used for training.
x_train
Training data.
y_train
Training labels.
dataset
Training dataset which returns (x, y).
optimizer
Optimizer used for training.
loss_fn_kwargs
Kwargs for loss function.
preprocess_fn
Preprocessing function applied to each training batch.
epochs
Number of training epochs.
reg_loss_fn
Allows an additional regularisation term to be defined as reg_loss_fn(model)
batch_size
Batch size used for training.
buffer_size
Maximum number of elements that will be buffered when prefetching.
verbose
Whether to print training progress.
log_metric
Additional metrics whose progress will be displayed if verbose equals True.
callbacks
Callbacks used during training.
"""
optimizer = optimizer() if isinstance(optimizer, type) else optimizer
return_xy = False if not isinstance(dataset, tf.keras.utils.Sequence) and y_train is None else True
if not isinstance(dataset, tf.keras.utils.Sequence): # create dataset
train_data = x_train if y_train is None else (x_train, y_train)
dataset = tf.data.Dataset.from_tensor_slices(train_data)
dataset = dataset.shuffle(buffer_size=buffer_size).batch(batch_size)
n_minibatch = len(dataset)
if loss_fn_kwargs:
loss_fn = partial(loss_fn, **loss_fn_kwargs)
# iterate over epochs
for epoch in range(epochs):
if verbose:
pbar = tf.keras.utils.Progbar(n_minibatch, 1)
if hasattr(dataset, 'on_epoch_end'):
dataset.on_epoch_end()
loss_val_ma = 0.
for step, data in enumerate(dataset):
x, y = data if return_xy else (data, None)
if isinstance(preprocess_fn, Callable): # type: ignore
x = preprocess_fn(x)
with tf.GradientTape() as tape:
y_hat = model(x)
y = x if y is None else y
if isinstance(loss_fn, Callable): # type: ignore
args = [y, y_hat] if tf.is_tensor(y_hat) else [y] + list(y_hat)
loss = loss_fn(*args)
else:
loss = 0.
if model.losses: # additional model losses
loss += sum(model.losses)
loss += reg_loss_fn(model) # alternative way they might be specified
grads = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
if verbose:
loss_val = loss.numpy()
if loss_val.shape:
if loss_val.shape[0] != batch_size:
if len(loss_val.shape) == 1:
shape = (batch_size - loss_val.shape[0], )
elif len(loss_val.shape) == 2:
shape = (batch_size - loss_val.shape[0], loss_val.shape[1]) # type: ignore
add_mean = np.ones(shape) * loss_val.mean()
loss_val = np.r_[loss_val, add_mean]
loss_val_ma = loss_val_ma + (loss_val - loss_val_ma) / (step + 1)
pbar_values = [('loss_ma', loss_val_ma)]
if log_metric is not None:
log_metric[1](y, y_hat)
pbar_values.append((log_metric[0], log_metric[1].result().numpy()))
pbar.add(1, values=pbar_values)
| 4,358 | 37.919643 | 103 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.