repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
mfjb/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
Trevortds/Etymachine | Etymachine.py | 1 | 14570 | # import requests
import re
# import os
# import subprocess
import nltk
import pylab as pl
import tsvopener
import dawg
from nltk.corpus import brown
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
test_text = "four score and seven years ago, our fathers brought fourth\
on this continent a new nation."
tokenized_text = nltk.word_tokenize(test_text)
new_category_dict = tsvopener.open_tsv("stat_categorized.tsv")
category_dawg = dawg.BytesDAWG([(x, str.encode(new_category_dict[x])) for x
in new_category_dict.keys()])
def make_lexicon_pie(input_dict, title):
'''
Make a pie of the full lexicon based on the given mapping of words to
languages
:input_dict: dict of words to language sources
:title: title to put on the pie chart
'''
e = 0
f = 0
n = 0
l = 0
g = 0
o = 0
for word in input_dict.keys():
label = input_dict[word]
if label == "English":
e += 1
elif label == "French":
f += 1
elif label == "Norse":
n += 1
elif label == "Latin":
l += 1
elif label == "Greek":
g += 1
else:
o += 1
total = e + f + n + l + g + o
fracs = [o/total, n/total, g/total, l/total, f/total, e/total]
labels = 'Other', 'Norse', 'Greek', 'Latin', 'French', 'English'
pl.figure(figsize=(6, 6))
pl.axes([0.1, 0.1, 0.8, 0.8])
pl.pie(fracs, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
pl.title(title)
pl.show()
def make_analysis_pie(sentences, title="Pie Chart", token=False, ignore_unknowns=False,
show=True):
'''
Analyzes the given text and generates a pie chart to show the proportions
of etymological origins in it.
:sentences: tagged sentences from the Brown corpus
:title: title to go on the chart
:token: whether to count token frequencies instead of word frequencies
:ignore_unknowns: whether to have a slice for unknowns in the chart
:show: whether to show the chart after completeion.
:return: the proportions of each language origin in the text in the order
'Unknown', 'Other', 'Norse', 'Greek', 'Latin', 'French', 'Old English'
'''
e = f = n = g = l = o = u = 0
if token:
already_seen = []
unknowns = []
for sentence in sentences:
for word, tag in sentence:
if token:
if word in already_seen:
continue
else:
already_seen.append(word)
label = label_word(word, tag)
if label == "Unknown":
label = label_word(word.lower(), tag)
if label == "English":
e += 1
elif label == "French":
f += 1
elif label == "Norse":
n += 1
elif label == "Latin":
l += 1
elif label == "Greek":
g += 1
elif label == "Other":
o += 1
elif label == "Unknown":
unknowns.append((word, tag))
u += 1
total = u + e + f + n + l + g + o
fracs = [u/total, o/total, n/total, g/total, l/total, f/total, e/total]
labels = 'Unknown', 'Other', 'Norse', 'Greek', 'Latin', 'French', 'Old English'
colors = 'r', 'orange', 'b', 'c', 'm', 'y', 'g'
if ignore_unknowns:
total = e + f + n + l + g + o
fracs = [o/total, n/total, g/total, l/total, f/total, e/total]
labels = 'Other', 'Norse', 'Greek', 'Latin', 'French', 'English'
colors = 'orange', 'b', 'c', 'm', 'y', 'g'
pl.figure(figsize=(6, 6))
pl.axes([0.1, 0.1, 0.8, 0.8])
pl.pie(fracs, labels=labels, colors=colors, autopct='%1.1f%%',
shadow=True, startangle=90)
pl.title(title)
if show:
pl.show()
return fracs
# return [e, f, n, l, g, o, u, total]
# make_pie(new_category_dict, "Proportions of etymologies in the lexicon")
def reduce_brown_pos(word, brown_tag):
'''
Turns a brown part of speech into a word part of speech.
:word: the word in question
:brown_tag: the tag from the brown corpus
:return: the etymdict equivalent of the brown tag, or "skip" for
punctuation, or None for unknowns
'''
skip_tags = ['(', ')', '*', ',', '--', '.', ':', '\'\'', '``', '\'',
'.-HL', ',-HL', '(-HL', ')-HL']
if brown_tag in skip_tags:
return "skip"
elif brown_tag.startswith("B"):
return "v"
elif brown_tag.startswith("H"):
return "v"
elif brown_tag.startswith("V"):
return "v"
elif brown_tag.startswith("P"):
return "pron"
elif brown_tag.startswith("N"):
return "n"
elif brown_tag.startswith("J"):
return "adj"
elif brown_tag.startswith("W"):
return "pron"
elif brown_tag.startswith("R"):
return "adv"
elif brown_tag.startswith("U"):
return "interj"
elif brown_tag.startswith("Q"):
return "adj"
elif brown_tag.startswith("DO"):
return "v"
elif brown_tag.startswith("DT"):
return "adj"
elif brown_tag.startswith("I"):
return "prep"
elif brown_tag.startswith("CC"):
return "conj"
elif brown_tag.startswith("CS"):
return "conj"
elif brown_tag.startswith("CD"):
return "skip"
elif brown_tag.startswith("M"):
return "v"
elif brown_tag.startswith("AP"):
return "adj"
elif brown_tag.startswith("FW"):
return None
elif brown_tag.startswith("OD"):
return "adj"
elif brown_tag.startswith("EX"):
return "adv.,conj"
# elif "$" in word: late addition, not reflected in submitted charts
# return "skip"
else:
print(word, " ", brown_tag)
return None
'''
first, check if the word by itself is in the dictionary
if it's not, then check if there's only one entry with "word ("
if it's not, then take the part of speech and look
if still not, then lemmatize and look
'''
lemmatizer = nltk.stem.WordNetLemmatizer()
def label_word(word, brown_tag, lemmatized=False):
'''
return the etymological category of the word given
:word: the word in question
:brown_tag: the tag in the brown corpus
:lemmatized: whether this word has been lemmatized yet
:return: {'Unknown', 'Other', 'Norse', 'Greek', 'Latin', 'French',
'Old English'}
'''
brown_tag = re.sub("-HL", "", brown_tag)
brown_tag = re.sub("-TL", "", brown_tag)
if word in category_dawg:
return category_dawg[word][0].decode()
if word.lower() in category_dawg:
return category_dawg[word.lower()][0].decode()
if len(category_dawg.keys(word + " (")) > 0:
return category_dawg[category_dawg.keys(word)[0]][0].decode()
etymtag = reduce_brown_pos(word, brown_tag)
if etymtag is None:
return "Unknown"
if etymtag == "skip":
return "skip"
word_n_tag = word + " (" + etymtag + ".)"
word_n_tag_n_number = word + " (" + etymtag + ".1)"
if word_n_tag in category_dawg:
return category_dawg[word_n_tag][0].decode()
if word_n_tag_n_number in category_dawg:
return category_dawg[word_n_tag_n_number][0].decode()
if lemmatized:
return "Unknown"
if etymtag == "n":
wordnet_tag = "n"
elif etymtag == "v":
wordnet_tag = "v"
elif etymtag == "adj":
wordnet_tag = "a"
elif etymtag == "adv":
wordnet_tag = "v"
else:
return "Unknown"
lemma = lemmatizer.lemmatize(word, pos=wordnet_tag)
return label_word(lemma, brown_tag, lemmatized=True)
def big_pie_maker():
'''
Big function to make lots of pies.
Generates pies for each of six texts, with and without token frequencies,
and then shows them.
'''
sentences = brown.tagged_sents("ca09")
title = "Words in 1961 Philadelphia Inquirer political article"
print(title)
make_analysis_pie(sentences, title, show=False)
title = "Tokens in 1961 Philadelphia Inquirer political article"
make_analysis_pie(sentences, title, show=False, token=True)
sentences = brown.tagged_sents("cm01")
title = "Words in Robert A Henlein's Stranger in a Strange Land"
print(title)
make_analysis_pie(sentences, title, show=False)
title = "Tokens in Robert A Henlein's Stranger in a Strange Land"
make_analysis_pie(sentences, title, show=False, token=True)
sentences = brown.tagged_sents("cp26")
title = "Words in Evrin D Krause's The Snake"
print(title)
make_analysis_pie(sentences, title, show=False)
title = "Tokens in Evrin D Krause's The Snake"
make_analysis_pie(sentences, title, show=False, token=True)
sentences = brown.tagged_sents("cd07")
title = "Words in Peter Eversveld's Faith Amid Fear"
print(title)
make_analysis_pie(sentences, title, show=False)
title = "Tokens in Peter Eversveld's Faith Amid Fear"
make_analysis_pie(sentences, title, show=False, token=True)
sentences = brown.tagged_sents("ch09")
title = "Words in the Public Laws of the 87th Congress"
print(title)
make_analysis_pie(sentences, title, show=False)
title = "Tokens in the Public Laws of the 87th Congress"
make_analysis_pie(sentences, title, show=False, token=True)
sentences = brown.tagged_sents("cj16")
title = "Words in Nagaraj and Black: Wound-Tumor Virus Antigen"
print(title)
make_analysis_pie(sentences, title, show=False)
title = "Tokens in Nagaraj and Black: Wound-Tumor Virus Antigen"
make_analysis_pie(sentences, title, show=False, token=True)
pl.show()
# big_pie_maker()
# Take a sample of the words labeled "other"
# otherwords = []
# for words in new_category_dict.keys():
# if new_category_dict[words] == "Other":
# otherwords.append(words)
# print("number of 'other': ", len(otherwords))
# import random
# random.shuffle(otherwords)
# print(otherwords[:100])
# this code borrowed from stackoverflow, I'm really not sure how it works
# I only added the color.
def plot_clustered_stacked(dfall, labels=None,
title="multiple stacked bar plot", H="/",
**kwargs):
"""
Given a list of dataframes, with identical columns and index,
create a clustered stacked bar plot.
labels is a list of the names of the dataframe, used for the legend
title is a string for the title of the plot
H is the hatch used for identification of the different dataframe"""
n_df = len(dfall)
n_col = len(dfall[0].columns)
n_ind = len(dfall[0].index)
axe = plt.subplot(111)
colors = 'r', 'orange', 'b', 'c', 'm', 'y', 'g'
colors = colors[::-1]
for df in dfall : # for each data frame
axe = df.plot(kind="bar",
linewidth=0,
stacked=True,
ax=axe,
legend=False,
grid=False,
colors=colors,
**kwargs) # make bar plots
h,l = axe.get_legend_handles_labels() # get the handles we want to modify
for i in range(0, n_df * n_col, n_col): # len(h) = n_col * n_df
for j, pa in enumerate(h[i:i+n_col]):
for rect in pa.patches: # for each index
rect.set_x(rect.get_x() + 1 / float(n_df + 1) * i / float(n_col))
rect.set_hatch(H * int(i / n_col)) #edited part
rect.set_width(1 / float(n_df + 1))
axe.set_xticks((np.arange(0, 2 * n_ind, 2) + 1 / float(n_df + 1)) / 2.)
axe.set_xticklabels(df.index, rotation = 0)
axe.set_title(title)
# Add invisible data to add another legend
n=[]
for i in range(n_df):
n.append(axe.bar(0, 0, color="gray", hatch=H * i))
l1 = axe.legend(h[:n_col], l[:n_col], loc=[1.01, 0.5])
if labels is not None:
l2 = plt.legend(n, labels, loc=[1.01, 0.1])
axe.add_artist(l1)
return axe
# create dataframes
def big_chart_maker():
sentences = brown.tagged_sents("ca09")
word_matrix = np.matrix(make_analysis_pie(sentences, show=False)[::-1])
token_matrix = np.matrix(make_analysis_pie(sentences, show=False, token=True)[::-1])
sentences = brown.tagged_sents("cm01")
new_words = make_analysis_pie(sentences, show=False)[::-1]
new_tokens = make_analysis_pie(sentences, show=False, token=True)[::-1]
word_matrix = np.vstack((word_matrix, new_words))
token_matrix = np.vstack((token_matrix, new_tokens))
sentences = brown.tagged_sents("cp26")
new_words = make_analysis_pie(sentences, show=False)[::-1]
new_tokens = make_analysis_pie(sentences, show=False, token=True)[::-1]
word_matrix = np.vstack((word_matrix, new_words))
token_matrix = np.vstack((token_matrix, new_tokens))
sentences = brown.tagged_sents("cd07")
new_words = make_analysis_pie(sentences, show=False)[::-1]
new_tokens = make_analysis_pie(sentences, show=False, token=True)[::-1]
word_matrix = np.vstack((word_matrix, new_words))
token_matrix = np.vstack((token_matrix, new_tokens))
sentences = brown.tagged_sents("ch09")
new_words = make_analysis_pie(sentences, show=False)[::-1]
new_tokens = make_analysis_pie(sentences, show=False, token=True)[::-1]
word_matrix = np.vstack((word_matrix, new_words))
token_matrix = np.vstack((token_matrix, new_tokens))
sentences = brown.tagged_sents("cj16")
new_words = make_analysis_pie(sentences, show=False)[::-1]
new_tokens = make_analysis_pie(sentences, show=False, token=True)[::-1]
word_matrix = np.vstack((word_matrix, new_words))
token_matrix = np.vstack((token_matrix, new_tokens))
df1 = pd.DataFrame(word_matrix,
index=["News", "Sci-fi", "Romance", "Religion", "Legal", "Medical"],
columns=['Unknown', 'Other', 'Norse', 'Greek', 'Latin', 'French', 'Old English'][::-1])
df2 = pd.DataFrame(token_matrix,
index=["News", "Sci-fi", "Romance", "Religion", "Legal", "Medical"],
columns=['Unknown', 'Other', 'Norse', 'Greek', 'Latin', 'French', 'Old English'][::-1])
# Then, just call :
plot_clustered_stacked([df1, df2],["Words", "Tokens"],
title="Proportions of etymological origins in several texts")
plt.show() | gpl-2.0 |
pwalczysko/openmicroscopy | components/tools/OmeroPy/src/omero/install/jvmcfg.py | 10 | 16253 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Automatic configuration of memory settings for Java servers.
"""
from types import StringType
from shlex import split
import logging
LOGGER = logging.getLogger("omero.install.jvmcfg")
def strip_dict(map, prefix=("omero", "jvmcfg"), suffix=(), limit=1):
"""
For the given dictionary, return a copy of the
dictionary where all entries not matching the
prefix, suffix, and limit have been removed and
where all remaining keys have had the prefix and
suffix stripped. The limit describes the number
of elements that are allowed in the new key after
stripping prefix and suffix.
"""
if isinstance(prefix, StringType):
prefix = tuple(prefix.split("."))
if isinstance(suffix, StringType):
suffix = tuple(suffix.split("."))
rv = dict()
if not map:
return dict()
def __strip_dict(k, v, prefix, suffix, rv):
key = tuple(k.split("."))
ksz = len(key)
psz = len(prefix)
ssz = len(suffix)
if ksz <= (psz + ssz):
return # No way to strip if smaller
if key[0:psz] == prefix and key[ksz-ssz:] == suffix:
newkey = key[psz:ksz-ssz]
if len(newkey) == limit:
newkey = ".".join(newkey)
rv[newkey] = v
for k, v in map.items():
__strip_dict(k, v, prefix, suffix, rv)
return rv
class StrategyRegistry(dict):
def __init__(self, *args, **kwargs):
super(dict, self).__init__(*args, **kwargs)
STRATEGY_REGISTRY = StrategyRegistry()
class Settings(object):
"""
Container for the config options found in etc/grid/config.xml
"""
def __init__(self, server_values=None, global_values=None):
if server_values is None:
self.__server = dict()
else:
self.__server = server_values
if global_values is None:
self.__global = dict()
else:
self.__global = global_values
self.__static = {
"strategy": PercentStrategy,
"append": "",
"perm_gen": "128m",
"heap_dump": "off",
"heap_size": "512m",
"system_memory": None,
"max_system_memory": "48000",
"min_system_memory": "3414",
}
self.__manual = dict()
def __getattr__(self, key):
return self.lookup(key)
def lookup(self, key, default=None):
if key in self.__manual:
return self.__manual[key]
elif key in self.__server:
return self.__server[key]
elif key in self.__global:
return self.__global[key]
elif key in self.__static:
return self.__static[key]
else:
return default
def overwrite(self, key, value, always=False):
if self.was_set(key) and not always:
# Then we leave it as the user requested
return
else:
self.__manual[key] = value
def was_set(self, key):
return key in self.__server or key in self.__global
def get_strategy(self):
return STRATEGY_REGISTRY.get(self.strategy, self.strategy)
def __str__(self):
rv = dict()
rv.update(self.__server)
rv.update(self.__global)
if not rv:
rv = ""
return 'Settings(%s)' % rv
class Strategy(object):
"""
Strategy for calculating memory settings. Primary
class of the memory module.
"""
def __init__(self, name, settings=None):
"""
'name' argument should likely be one of:
('blitz', 'indexer', 'pixeldata', 'repository')
"""
if settings is None:
settings = Settings()
self.name = name
self.settings = settings
if type(self) == Strategy:
raise Exception("Must subclass!")
# Memory helpers
def system_memory_mb(self):
"""
Returns a tuple, in MB, of available, active, and total memory.
"total" memory is found by calling to first a Python library
(if installed) and otherwise a Java class. If
"system_memory" is set, it will short-circuit both methods.
"active" memory is set to "total" but limited by "min_system_memory"
and "max_system_memory".
"available" may not be accurate, and in some cases will be
set to total.
"""
available, total = None, None
if self.settings.system_memory is not None:
total = int(self.settings.system_memory)
available = total
else:
pymem = self._system_memory_mb_psutil()
if pymem is not None:
available, total = pymem
else:
available, total = self._system_memory_mb_java()
max_system_memory = int(self.settings.max_system_memory)
min_system_memory = int(self.settings.min_system_memory)
active = max(min(total, max_system_memory), min_system_memory)
return available, active, total
def _system_memory_mb_psutil(self):
try:
import psutil
pymem = psutil.virtual_memory()
return (pymem.free/1000000, pymem.total/1000000)
except ImportError:
LOGGER.debug("No psutil installed")
return None
def _system_memory_mb_java(self):
import omero.cli
import omero.java
# Copied from db.py. Needs better dir detection
cwd = omero.cli.CLI().dir
server_jar = cwd / "lib" / "server" / "server.jar"
cmd = ["ome.services.util.JvmSettingsCheck", "--psutil"]
p = omero.java.popen(["-cp", str(server_jar)] + cmd)
o, e = p.communicate()
if p.poll() != 0:
LOGGER.warn("Failed to invoke java:\nout:%s\nerr:%s",
o, e)
rv = dict()
for line in o.split("\n"):
line = line.strip()
if not line:
continue
parts = line.split(":")
if len(parts) == 1:
parts.append("")
rv[parts[0]] = parts[1]
try:
free = long(rv["Free"]) / 1000000
except:
LOGGER.warn("Failed to parse Free from %s", rv)
free = 2000
try:
total = long(rv["Total"]) / 1000000
except:
LOGGER.warn("Failed to parse Total from %s", rv)
total = 4000
return (free, total)
# API Getters
def get_heap_size(self, sz=None):
if sz is None or self.settings.was_set("heap_size"):
sz = self.settings.heap_size
if str(sz).startswith("-X"):
return sz
else:
rv = "-Xmx%s" % sz
if rv[-1].lower() not in ("b", "k", "m", "g"):
rv = "%sm" % rv
return rv
def get_heap_dump(self):
hd = self.settings.heap_dump
if hd == "off":
return ""
elif hd in ("on", "cwd", "tmp"):
return "-XX:+HeapDumpOnOutOfMemoryError"
def get_perm_gen(self):
pg = self.settings.perm_gen
if str(pg).startswith("-XX"):
return pg
else:
return "-XX:MaxPermSize=%s" % pg
def get_append(self):
values = []
if self.settings.heap_dump == "tmp":
import tempfile
tmp = tempfile.gettempdir()
values.append("-XX:HeapDumpPath=%s" % tmp)
return values + split(self.settings.append)
def get_memory_settings(self):
values = [
self.get_heap_size(),
self.get_heap_dump(),
self.get_perm_gen(),
]
if any([x.startswith("-XX:MaxPermSize") for x in values]):
values.append("-XX:+IgnoreUnrecognizedVMOptions")
values += self.get_append()
return [x for x in values if x]
class ManualStrategy(Strategy):
"""
Simplest strategy which assumes all values have
been set and simply uses them or their defaults.
"""
class PercentStrategy(Strategy):
"""
Strategy based on a percent of available memory.
"""
PERCENT_DEFAULTS = (
("blitz", 15),
("pixeldata", 15),
("indexer", 10),
("repository", 10),
("other", 1),
)
def __init__(self, name, settings=None):
super(PercentStrategy, self).__init__(name, settings)
self.defaults = dict(self.PERCENT_DEFAULTS)
self.use_active = True
def get_heap_size(self):
"""
Uses the results of the default settings of
calculate_heap_size() as an argument to
get_heap_size(), in other words some percent
of the active memory.
"""
sz = self.calculate_heap_size()
return super(PercentStrategy, self).get_heap_size(sz)
def get_percent(self):
other = self.defaults.get("other", "1")
default = self.defaults.get(self.name, other)
percent = int(self.settings.lookup("percent", default))
return percent
def get_perm_gen(self):
available, active, total = self.system_memory_mb()
choice = self.use_active and active or total
if choice <= 4000:
if choice >= 2000:
self.settings.overwrite("perm_gen", "256m")
elif choice <= 8000:
self.settings.overwrite("perm_gen", "512m")
else:
self.settings.overwrite("perm_gen", "1g")
return super(PercentStrategy, self).get_perm_gen()
def calculate_heap_size(self, method=None):
"""
Re-calculates the appropriate heap size based on the
value of get_percent(). The "active" memory returned
by method() will be used by default, but can be modified
to use "total" via the "use_active" flag.
"""
if method is None:
method = self.system_memory_mb
available, active, total = method()
choice = self.use_active and active or total
percent = self.get_percent()
calculated = choice * int(percent) / 100
return calculated
def usage_table(self, min=10, max=20):
total_mb = [2**x for x in range(min, max)]
for total in total_mb:
method = lambda: (total, total, total)
yield total, self.calculate_heap_size(method)
STRATEGY_REGISTRY["manual"] = ManualStrategy
STRATEGY_REGISTRY["percent"] = PercentStrategy
def read_settings(template_xml):
"""
Read the memory settings from the template file
"""
rv = dict()
for template in template_xml.findall("server-template"):
for server in template.findall("server"):
for option in server.findall("option"):
o = option.text
if o.startswith("-Xmx") | o.startswith("-XX"):
rv.setdefault(server.get('id'), []).append(o)
return rv
def adjust_settings(config, template_xml,
blitz=None, indexer=None,
pixeldata=None, repository=None):
"""
Takes an omero.config.ConfigXml object and adjusts
the memory settings. Primary entry point to the
memory module.
"""
from xml.etree.ElementTree import Element
from collections import defaultdict
replacements = dict()
options = dict()
for template in template_xml.findall("server-template"):
for server in template.findall("server"):
for option in server.findall("option"):
o = option.text
if o.startswith("MEMORY:"):
options[o[7:]] = (server, option)
for props in server.findall("properties"):
for prop in props.findall("property"):
name = prop.attrib.get("name", "")
if name.startswith("REPLACEMENT:"):
replacements[name[12:]] = (server, prop)
rv = defaultdict(list)
m = config.as_map()
loop = (("blitz", blitz), ("indexer", indexer),
("pixeldata", pixeldata), ("repository", repository))
for name, StrategyType in loop:
if name not in options:
raise Exception(
"Cannot find %s option. Make sure templates.xml was "
"not copied from an older server" % name)
for name, StrategyType in loop:
specific = strip_dict(m, suffix=name)
defaults = strip_dict(m)
settings = Settings(specific, defaults)
rv[name].append(settings)
if StrategyType is None:
StrategyType = settings.get_strategy()
if not callable(StrategyType):
raise Exception("Bad strategy: %s" % StrategyType)
strategy = StrategyType(name, settings)
settings = strategy.get_memory_settings()
server, option = options[name]
idx = 0
for v in settings:
rv[name].append(v)
if idx == 0:
option.text = v
else:
elem = Element("option")
elem.text = v
server.insert(idx, elem)
idx += 1
# Now we check for any other properties and
# put them where the replacement should go.
for k, v in m.items():
r = []
suffix = ".%s" % name
size = len(suffix)
if k.endswith(suffix):
k = k[:-size]
r.append((k, v))
server, replacement = replacements[name]
idx = 0
for k, v in r:
if idx == 0:
replacement.attrib["name"] = k
replacement.attrib["value"] = v
else:
elem = Element("property", name=k, value=v)
server.append(elem)
return rv
def usage_charts(path,
min=0, max=20,
Strategy=PercentStrategy, name="blitz"):
# See http://matplotlib.org/examples/pylab_examples/anscombe.html
from pylab import array
from pylab import axis
from pylab import gca
from pylab import subplot
from pylab import plot
from pylab import setp
from pylab import savefig
from pylab import text
points = 200
x = array([2 ** (x / points) / 1000
for x in range(min*points, max*points)])
y_configs = (
(Settings({}), 'A'),
(Settings({"percent": "20"}), 'B'),
(Settings({}), 'C'),
(Settings({"max_system_memory": "10000"}), 'D'),
)
def f(cfg):
s = Strategy(name, settings=cfg[0])
y = []
for total in x:
method = lambda: (total, total, total)
y.append(s.calculate_heap_size(method))
return y
y1 = f(y_configs[0])
y2 = f(y_configs[1])
y3 = f(y_configs[2])
y4 = f(y_configs[3])
axis_values = [0, 20, 0, 6]
def ticks_f():
setp(gca(), xticks=(8, 16), yticks=(2, 4))
def text_f(which):
cfg = y_configs[which]
# s = cfg[0]
txt = "%s" % (cfg[1],)
text(2, 2, txt, fontsize=20)
subplot(221)
plot(x, y1)
axis(axis_values)
text_f(0)
ticks_f()
subplot(222)
plot(x, y2)
axis(axis_values)
text_f(1)
ticks_f()
subplot(223)
plot(x, y3)
axis(axis_values)
text_f(2)
ticks_f()
subplot(224)
plot(x, y4)
axis(axis_values)
text_f(3)
ticks_f()
savefig(path)
| gpl-2.0 |
Walter1218/rpn_detector_tf | batch_generate.py | 1 | 15624 | import pandas as pd
import numpy as np
import cv2, utils
import random
import copy
import threading
import itertools
import numpy.random as npr
class Box():
def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
def get_img_by_name(df,ind,size=(960,640), dataset = 'PASCAL_VOC'):
file_name = df['FileName'][ind]
#print(file_name)
img = cv2.imread(file_name)
img_size = np.shape(img)
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = cv2.resize(img,size)
if(dataset == 'PASCAL_VOC'):
name_str = file_name.split('/')
name_str = name_str[-1]
else:
name_str = file_name.split('/')
#print(name_str)
name = name_str[2] + '/' + name_str[3] + '/' + name_str[4] + '/' + name_str[5] + '/'
#print(name)
name_str = name + name_str[-1]
#print(name_str)
#print(file_name)
bb_boxes = df[df['Frame'] == name_str].reset_index()
img_size_post = np.shape(img)
#TODO,(add data augment support)
return name_str,img,bb_boxes
def target_label_generate(gta, anchor_box ,mc ,is_multi_layer = False, DEBUG = False):
"""
target label generate function,
input:
gta: ground truth
anchor_box: anchor box, default is anchor_box[0]
is_multi_layer: later will support multi feature detector
returmn:
target label(fg/bg) array
target bbox regression array(target bbox delta, bbox_in_weight, bbox)out_weight)
"""
anchor_box = anchor_box[0]
#default anchor_box[0] is H:40, W:60, because the rf is 16 in vgg(conv5_3)
H, W = (40, 60)
num_anchor_box_per_grid = mc.ANCHOR_PER_GRID
gta = bbox_transform(gta, is_df = True)
gta = bbox2cxcy(gta)
#target_label = np.zeros((H, W, num_anchor_box_per_grid), dtype=np.float32)
#target_bbox_delta = np.zeros((H, W, num_anchor_box_per_grid, 4), dtype=np.float32)
#bbox_in_w = np.zeros((H, W, num_anchor_box_per_grid, 4), dtype=np.float32)
#bbox_out_w = np.zeros((H, W, num_anchor_box_per_grid, 4), dtype=np.float32)
#is valid, only inside anchor box is valid
img_size = (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT)
#transfer center_x, center_y, w, h to xmin, ymin, xmax, ymax
bbox_xy = bbox_transform(anchor_box, is_df = False)
_allowed_border = 0
inds_inside = np.where(
(bbox_xy[:, 0] >= -_allowed_border) &
(bbox_xy[:, 1] >= -_allowed_border) &
(bbox_xy[:, 2] < img_size[0] + _allowed_border) & # width
(bbox_xy[:, 3] < img_size[1] + _allowed_border) # height
)[0]
out_inside = np.where(
(bbox_xy[:, 0] < -_allowed_border) &
(bbox_xy[:, 1] < -_allowed_border) &
(bbox_xy[:, 2] >= img_size[0] + _allowed_border) & # width
(bbox_xy[:, 3] >= img_size[1] + _allowed_border) # height
)[0]
#if(DEBUG):
# print('the valid anchors have ',len(inds_inside))
#valid_anchors
valid_anchors = anchor_box[inds_inside]
anchors = coord2box(valid_anchors)
groundtruth = coord2box(gta)
#print(anchors)
num_of_anchors = len(anchors)
num_of_gta = len(groundtruth)
overlaps_table = np.zeros((num_of_anchors, num_of_gta))
for i in range(num_of_anchors):
for j in range(num_of_gta):
overlaps_table[i,j] = utils.box_iou(anchors[i], groundtruth[j])
#overlaps_table = utils.bbox_overlaps(anchors, groundtruth)
if(DEBUG):
print('the shape of overlaps table {0}'.format(overlaps_table.shape))
print('the number of groundtruth is', len(groundtruth))
print('the number of valid anchor box is', len(anchors))
#pick the postive and negative samples referenced from overlaps table
#argmax overlaps for each groundtruth
gt_argmax_overlaps = overlaps_table.argmax(axis=0)
if(DEBUG):
print('the shape of gt_argmax_overlaps is ',gt_argmax_overlaps.shape)
print('the value in gt_argmax_overlaps is ',gt_argmax_overlaps)
argmax_overlaps = overlaps_table.argmax(axis = 1)
if(DEBUG):
print('the shape of argmax_overlaps is ', argmax_overlaps.shape)
print('the value in argmax_overlaps is ', argmax_overlaps)
#overlaps groundtruth
gt_max_overlaps = overlaps_table[gt_argmax_overlaps,np.arange(overlaps_table.shape[1])]
gt_argmax_overlaps = np.where(overlaps_table == gt_max_overlaps)[0]
if(DEBUG):
print('the shape of processed gt_argmax_overlaps is ', gt_argmax_overlaps.shape)
print('the value in processed gt_argmax_overlaps is ', gt_argmax_overlaps)
#used this to select postive/ negative/ no care samples
max_overlaps = overlaps_table[np.arange(len(valid_anchors)), argmax_overlaps]
if(DEBUG):
print('the shape of max overlaps table is ', max_overlaps.shape)
target_labels = pick_samples(max_overlaps, gt_argmax_overlaps, mc)
#target_labels[out_inside] = -1
if(DEBUG):
num_pos_samples = len(np.where(target_labels == 1)[0])
num_neg_samples = len(np.where(target_labels == 0)[0])
print('the number of postive samples is ', num_pos_samples)
print('the number os negative samples is ', num_neg_samples)
#subsampling, default subsampling methods is random sample
target_labels = subsampling(target_labels, mc)
if(DEBUG):
num_pos_samples = len(np.where(target_labels == 1)[0])
num_neg_samples = len(np.where(target_labels == 0)[0])
print('After subsampling, the number of postive samples is ', num_pos_samples)
print('After subsampling, the number os negative samples is ', num_neg_samples)
#target cls label
#if(mc.cls = True):
#bbox delta label
target_delta, bbox_in_w, bbox_out_w = target_bbox(out_inside, valid_anchors, gta[argmax_overlaps,:], target_labels, mc)
if(DEBUG):
print('the shape of target_delta is ',target_delta.shape)
print('the shape of bbox_in_w is ',bbox_in_w.shape)
print('the shape of bbox_out_w is ',bbox_out_w.shape)
#UNMAP TO original feature images
total_anchors = num_anchor_box_per_grid * H * W
labels = unmap2original(target_labels, total_anchors, inds_inside, fill=-1)
bbox_targets = unmap2original(target_delta, total_anchors, inds_inside, fill=0)
bbox_inside_weights = unmap2original(bbox_in_w, total_anchors, inds_inside, fill=0)
bbox_outside_weights = unmap2original(bbox_out_w, total_anchors, inds_inside, fill=0)
#labels = target_labels
#bbox_targets = target_delta
#bbox_inside_weights = bbox_in_w
#bbox_outside_weights = bbox_out_w
#bbox_targets[out_inside] = 0
#bbox_inside_weights[out_inside] = 0
#bbox_outside_weights[out_inside] = 0
if(DEBUG):
print('the shape of target labels is ', labels.shape)
print('the shape of bbox_target is', bbox_targets.shape)
print('the shape of bbox_in_w is', bbox_inside_weights.shape)
print('the shape of bbox_out_w is', bbox_outside_weights.shape)
#reshape
#labels = labels.reshape((H, W, num_anchor_box_per_grid))
#bbox_targets = bbox_targets.reshape((H, W, num_anchor_box_per_grid * 4))
#bbox_inside_weights = bbox_inside_weights.reshape((H, W, num_anchor_box_per_grid * 4))
#bbox_outside_weights = bbox_outside_weights.reshape((H, W, num_anchor_box_per_grid * 4))
labels = labels.reshape((mc.H , mc.W , mc.ANCHOR_PER_GRID))
rpn_labels = labels
#print(rpn_labels.shape)
# bbox_targets
bbox_targets = bbox_targets \
.reshape((mc.H , mc.W , mc.ANCHOR_PER_GRID * 4))
rpn_bbox_targets = bbox_targets
# bbox_inside_weights
bbox_inside_weights = bbox_inside_weights \
.reshape((mc.H , mc.W , mc.ANCHOR_PER_GRID * 4))
#assert bbox_inside_weights.shape[2] == height
#assert bbox_inside_weights.shape[3] == width
rpn_bbox_inside_weights = bbox_inside_weights
# bbox_outside_weights
bbox_outside_weights = bbox_outside_weights \
.reshape((mc.H , mc.W , mc.ANCHOR_PER_GRID * 4))
#assert bbox_outside_weights.shape[2] == height
#assert bbox_outside_weights.shape[3] == width
rpn_bbox_outside_weights = bbox_outside_weights
if(DEBUG):
print('overlaps_table', overlaps_table.shape)
print('overlaps_table displayed', overlaps_table)
num_gta = overlaps_table.shape[1]
print('number of gta',num_gta)
#if(mc.cls):
# num_gta = overlaps_table.shape[1]
# cls_map = cls_mapping(valid_anchors, overlaps_table, target_labels, gta, num_gta, mc)
# cls_map = unmap2original(cls_map, total_anchors, inds_inside, fill=-1)
# cls_map = cls_map.reshape((mc.H, mc.W, mc.ANCHOR_PER_GRID))
#print(cls_map.shape)
# return rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights, cls_map
#else:
return rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights, gta
def cls_mapping(valid_anchors, overlaps_table, target_labels, gta, num_gta, mc):
dim = overlaps_table.shape[0]
target_cls_label = np.zeros((dim, ), dtype = np.float32)
#for i in range(num_gta):
# print('the orientations of index i is',gta[i,4])
#print('this gta, the overlaps looks as',overlaps_table[:,i])
postive_index = np.where(target_labels == 1 )[0]
anchor_box = valid_anchors
#print('the coordinate of postive samples anchor box is',anchor_box[postive_index])
ref_anchors = anchor_box[postive_index]
cls_overlaps_table = np.zeros((len(postive_index), num_gta))
anchors = coord2box(ref_anchors)
groundtruth = coord2box(gta)
for i in range(len(postive_index)):
for j in range(num_gta):
cls_overlaps_table[i,j] = utils.box_iou(anchors[i], groundtruth[j])
#print(cls_overlaps_table)
#print(cls_overlaps_table.argmax(axis = 1))
cls_idx = cls_overlaps_table.argmax(axis = 1)
#print(gta[cls_idx,4],postive_index[cls_idx])
for i in range(num_gta):
pos_idx_gta = np.where(cls_idx == i)[0]
#print('the groundtruth index {0} , postive index is {1}'.format(i, postive_index[pos_idx_gta]))
if(int(gta[i,4]) == 1):
target_cls_label[postive_index[pos_idx_gta]] = 1
else:
target_cls_label[postive_index[pos_idx_gta]] = 0
#print(np.where(target_cls_label[:,int(gta[i,4] + 1)] == 1)[0])
#for i in range(len(cls_idx)):
#print('the psotive samples', np.where(target_labels > 0)[0])
#for i in range(len(gta)):
#print(gta[i,4])
# target_cls_label[]
return target_cls_label
def unmap2original(data, count, inds, fill=0):
if len(data.shape) == 1:
ret = np.empty((count, ), dtype=np.float32)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count, ) + data.shape[1:], dtype=np.float32)
ret.fill(fill)
ret[inds, :] = data
return ret
def target_bbox(out_inside, anchor_box, gt_boxes, target_labels, mc):
#create target bbox delta here
target_delta = np.zeros((len(anchor_box), 4), dtype = np.float32)
target_delta = utils.bbox_delta_convert(anchor_box, gt_boxes)
target_delta[out_inside] = 0
bbox_in_w = np.zeros((len(anchor_box), 4), dtype = np.float32)
bbox_out_w = np.zeros((len(anchor_box), 4), dtype = np.float32)
RPN_BBOX_INSIDE_WEIGHTS = mc.RPN_BBOX_INSIDE_WEIGHTS
RPN_POSITIVE_WEIGHT = mc.RPN_POSITIVE_WEIGHT
bbox_in_w[target_labels == 1] = np.array(RPN_BBOX_INSIDE_WEIGHTS)
if RPN_POSITIVE_WEIGHT < 0:
# uniform weighting of examples (given non-uniform sampling)
num_examples = np.sum(target_labels >= 0)
positive_weights = np.ones((1, 4)) * 1.0 / num_examples
negative_weights = np.ones((1, 4)) * 1.0 / num_examples
else:
assert ((RPN_POSITIVE_WEIGHT > 0) & (RPN_POSITIVE_WEIGHT < 1))
positive_weights = (RPN_POSITIVE_WEIGHT / np.sum(target_labels == 1))
negative_weights = ((1.0 - RPN_POSITIVE_WEIGHT) / np.sum(target_labels == 0))
bbox_out_w[target_labels == 1] = positive_weights
bbox_out_w[target_labels == 0] = negative_weights
return target_delta, bbox_in_w, bbox_out_w
def pick_samples(max_overlaps, gt_argmax_overlaps, mc):
negative_threshould = mc.neg_max_overlaps
postive_threshould = mc.pos_min_overlaps
#initialize target labels here
#like the original faster rcnn model, we set postive samples as 1, negative samples as 0, and -1 for no care
target_labels = np.empty((len(max_overlaps), ), dtype=np.int32)
#all target labels will fill -1 first
target_labels.fill(-1)
#negative samples, < negative_threshould
target_labels[max_overlaps < negative_threshould] = 0
#all gt argmax, the maximun overlaps of each groundtruth set as postive samples
target_labels[gt_argmax_overlaps] = 1
#postive samples, >= postive_threshould
target_labels[max_overlaps >= postive_threshould] = 1
return target_labels
def subsampling(target_labels, mc, sampling_methods = 'random'):
"""
Random Sampling, Bootstracp and Mixture methods
now, only support random sampling methods
"""
fraction = mc.RPN_FRACTION
batch_size = mc.RPN_BATCH_SIZE
bal_num_of_pos = int(fraction * batch_size)
fg = np.where(target_labels == 1)[0]
if(len(fg) > bal_num_of_pos):
#subsampling the postive samples
disable_inds = npr.choice(fg, size=(len(fg) - bal_num_of_pos), replace=False)
target_labels[disable_inds] = -1
bal_num_of_neg = batch_size - np.sum(target_labels == 1)
bg = np.where(target_labels == 0)[0]
if(len(bg) > bal_num_of_neg):
#subsampling the negative samples
disable_inds = npr.choice(bg, size=(len(bg) - bal_num_of_neg), replace=False)
target_labels[disable_inds] = -1
return target_labels
def coord2box(bbox):
boxes = []
for i in range(len(bbox)):
x = bbox[i,0]
y = bbox[i,1]
w = bbox[i,2]
h = bbox[i,3]
boxes.append(Box(x,y,w,h))
return boxes
def bbox2cxcy(bb_boxes):
gta = np.zeros((len(bb_boxes), 5))
for i in range(len(bb_boxes)):
gta[i,0] = bb_boxes[i,0] + (bb_boxes[i,2] - bb_boxes[i,0]) / 2
gta[i,1] = bb_boxes[i,1] + (bb_boxes[i,3] - bb_boxes[i,1]) / 2
gta[i,2] = (bb_boxes[i,2] - bb_boxes[i,0])
gta[i,3] = (bb_boxes[i,3] - bb_boxes[i,1])
gta[i,4] = bb_boxes[i,4]
return gta
def bbox_transform(bb_boxes, is_df = True):
"""
convert the x_center, y_center, w, h to xmin, ymin, xmax, ymax type
"""
gta = np.zeros((len(bb_boxes), 5))
#print(gta.shape)
if(is_df):
for i in range(len(bb_boxes)):
"""
gta index:
0: xmin -> x_center - (w / 2.)
1: ymin -> y_center - (h / 2.)
2: xmax -> x_center + (w / 2.)
3: ymax -> y_center + (h / 2.)
"""
gta[i,0] = bb_boxes.iloc[i]['x_center'] - (bb_boxes.iloc[i]['w'] / 2.)
gta[i,1] = bb_boxes.iloc[i]['y_center'] - (bb_boxes.iloc[i]['h'] / 2.)
gta[i,2] = bb_boxes.iloc[i]['x_center'] + (bb_boxes.iloc[i]['w'] / 2.)
gta[i,3] = bb_boxes.iloc[i]['y_center'] + (bb_boxes.iloc[i]['h'] / 2.)
gta[i,4] = bb_boxes.iloc[i]['label']
else:
for i in range(len(bb_boxes)):
cx = bb_boxes[i,0]
cy = bb_boxes[i,1]
w = bb_boxes[i,2]
h = bb_boxes[i,3]
gta[i,0] = cx - (w / 2.)
gta[i,1] = cy - (h / 2.)
gta[i,2] = cx + (w / 2.)
gta[i,3] = cy + (h / 2.)
#gta[i,4] = bb_boxes[i,4]
return gta#data
| apache-2.0 |
harisbal/pandas | pandas/tests/tslibs/test_array_to_datetime.py | 3 | 7110 | # -*- coding: utf-8 -*-
from datetime import datetime, date
import numpy as np
import pytest
import pytz
from dateutil.tz.tz import tzoffset
from pandas._libs import tslib
from pandas.compat.numpy import np_array_datetime64_compat
import pandas.util.testing as tm
class TestParseISO8601(object):
@pytest.mark.parametrize('date_str, exp', [
('2011-01-02', datetime(2011, 1, 2)),
('2011-1-2', datetime(2011, 1, 2)),
('2011-01', datetime(2011, 1, 1)),
('2011-1', datetime(2011, 1, 1)),
('2011 01 02', datetime(2011, 1, 2)),
('2011.01.02', datetime(2011, 1, 2)),
('2011/01/02', datetime(2011, 1, 2)),
('2011\\01\\02', datetime(2011, 1, 2)),
('2013-01-01 05:30:00', datetime(2013, 1, 1, 5, 30)),
('2013-1-1 5:30:00', datetime(2013, 1, 1, 5, 30))])
def test_parsers_iso8601(self, date_str, exp):
# GH#12060
# test only the iso parser - flexibility to different
# separators and leadings 0s
# Timestamp construction falls back to dateutil
actual = tslib._test_parse_iso8601(date_str)
assert actual == exp
@pytest.mark.parametrize(
'date_str',
['2011-01/02', '2011^11^11',
'201401', '201111', '200101',
# mixed separated and unseparated
'2005-0101', '200501-01',
'20010101 12:3456',
'20010101 1234:56',
# HHMMSS must have two digits in
# each component if unseparated
'20010101 1', '20010101 123',
'20010101 12345', '20010101 12345Z',
# wrong separator for HHMMSS
'2001-01-01 12-34-56'])
def test_parsers_iso8601_invalid(self, date_str):
# separators must all match - YYYYMM not valid
with pytest.raises(ValueError):
tslib._test_parse_iso8601(date_str)
class TestArrayToDatetime(object):
def test_parsing_valid_dates(self):
arr = np.array(['01-01-2013', '01-02-2013'], dtype=object)
result, _ = tslib.array_to_datetime(arr)
expected = ['2013-01-01T00:00:00.000000000-0000',
'2013-01-02T00:00:00.000000000-0000']
tm.assert_numpy_array_equal(
result,
np_array_datetime64_compat(expected, dtype='M8[ns]'))
arr = np.array(['Mon Sep 16 2013', 'Tue Sep 17 2013'], dtype=object)
result, _ = tslib.array_to_datetime(arr)
expected = ['2013-09-16T00:00:00.000000000-0000',
'2013-09-17T00:00:00.000000000-0000']
tm.assert_numpy_array_equal(
result,
np_array_datetime64_compat(expected, dtype='M8[ns]'))
@pytest.mark.parametrize('dt_string, expected_tz', [
['01-01-2013 08:00:00+08:00', pytz.FixedOffset(480)],
['2013-01-01T08:00:00.000000000+0800', pytz.FixedOffset(480)],
['2012-12-31T16:00:00.000000000-0800', pytz.FixedOffset(-480)],
['12-31-2012 23:00:00-01:00', pytz.FixedOffset(-60)]])
def test_parsing_timezone_offsets(self, dt_string, expected_tz):
# All of these datetime strings with offsets are equivalent
# to the same datetime after the timezone offset is added
arr = np.array(['01-01-2013 00:00:00'], dtype=object)
expected, _ = tslib.array_to_datetime(arr)
arr = np.array([dt_string], dtype=object)
result, result_tz = tslib.array_to_datetime(arr)
tm.assert_numpy_array_equal(result, expected)
assert result_tz is expected_tz
def test_parsing_non_iso_timezone_offset(self):
dt_string = '01-01-2013T00:00:00.000000000+0000'
arr = np.array([dt_string], dtype=object)
result, result_tz = tslib.array_to_datetime(arr)
expected = np.array([np.datetime64('2013-01-01 00:00:00.000000000')])
tm.assert_numpy_array_equal(result, expected)
assert result_tz is pytz.FixedOffset(0)
def test_parsing_different_timezone_offsets(self):
# GH 17697
data = ["2015-11-18 15:30:00+05:30", "2015-11-18 15:30:00+06:30"]
data = np.array(data, dtype=object)
result, result_tz = tslib.array_to_datetime(data)
expected = np.array([datetime(2015, 11, 18, 15, 30,
tzinfo=tzoffset(None, 19800)),
datetime(2015, 11, 18, 15, 30,
tzinfo=tzoffset(None, 23400))],
dtype=object)
tm.assert_numpy_array_equal(result, expected)
assert result_tz is None
def test_number_looking_strings_not_into_datetime(self):
# GH#4601
# These strings don't look like datetimes so they shouldn't be
# attempted to be converted
arr = np.array(['-352.737091', '183.575577'], dtype=object)
result, _ = tslib.array_to_datetime(arr, errors='ignore')
tm.assert_numpy_array_equal(result, arr)
arr = np.array(['1', '2', '3', '4', '5'], dtype=object)
result, _ = tslib.array_to_datetime(arr, errors='ignore')
tm.assert_numpy_array_equal(result, arr)
@pytest.mark.parametrize('invalid_date', [
date(1000, 1, 1),
datetime(1000, 1, 1),
'1000-01-01',
'Jan 1, 1000',
np.datetime64('1000-01-01')])
def test_coerce_outside_ns_bounds(self, invalid_date):
arr = np.array([invalid_date], dtype='object')
with pytest.raises(ValueError):
tslib.array_to_datetime(arr, errors='raise')
result, _ = tslib.array_to_datetime(arr, errors='coerce')
expected = np.array([tslib.iNaT], dtype='M8[ns]')
tm.assert_numpy_array_equal(result, expected)
def test_coerce_outside_ns_bounds_one_valid(self):
arr = np.array(['1/1/1000', '1/1/2000'], dtype=object)
result, _ = tslib.array_to_datetime(arr, errors='coerce')
expected = [tslib.iNaT,
'2000-01-01T00:00:00.000000000-0000']
tm.assert_numpy_array_equal(
result,
np_array_datetime64_compat(expected, dtype='M8[ns]'))
def test_coerce_of_invalid_datetimes(self):
arr = np.array(['01-01-2013', 'not_a_date', '1'], dtype=object)
# Without coercing, the presence of any invalid dates prevents
# any values from being converted
result, _ = tslib.array_to_datetime(arr, errors='ignore')
tm.assert_numpy_array_equal(result, arr)
# With coercing, the invalid dates becomes iNaT
result, _ = tslib.array_to_datetime(arr, errors='coerce')
expected = ['2013-01-01T00:00:00.000000000-0000',
tslib.iNaT,
tslib.iNaT]
tm.assert_numpy_array_equal(
result,
np_array_datetime64_compat(expected, dtype='M8[ns]'))
def test_to_datetime_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that dropping nanos would result
# in an in-bounds datetime
arr = np.array(['2262-04-11 23:47:16.854775808'], dtype=object)
with pytest.raises(tslib.OutOfBoundsDatetime):
tslib.array_to_datetime(arr)
| bsd-3-clause |
jayflo/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
phoebe-project/phoebe2-docs | 2.2/tutorials/ecc.py | 1 | 2155 | #!/usr/bin/env python
# coding: utf-8
# Eccentricity (Volume Conservation)
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.2 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.2,<2.3"')
# As always, let's do imports and initialize a logger and a new Bundle. See [Building a System](building_a_system.ipynb) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
# Relevant Parameters
# ----------------------------
#
# In[3]:
print(b.get_parameter(qualifier='ecc'))
# In[4]:
print(b.get_parameter(qualifier='ecosw', context='component'))
# In[5]:
print(b.get_parameter(qualifier='esinw', context='component'))
# Relevant Constraints
# -----------------------------
# In[6]:
print(b.get_parameter(qualifier='ecosw', context='constraint'))
# In[7]:
print(b.get_parameter(qualifier='esinw', context='constraint'))
# Influence on Meshes (volume conservation)
# ----------------------------
#
# In[8]:
b.add_dataset('mesh', times=np.linspace(0,1,11), columns=['volume'])
# In[9]:
b.set_value('ecc', 0.2)
# In[10]:
b.run_compute()
# In[11]:
print(b['volume@primary@model'])
# In[12]:
afig, mplfig = b['mesh01'].plot(x='times', y='volume', show=True)
# In[13]:
b.remove_dataset('mesh01')
# Influence on Radial Velocities
# ----------------------------------
#
# In[14]:
b.add_dataset('rv', times=np.linspace(0,1,51))
# In[15]:
b.run_compute()
# In[16]:
afig, mplfig = b['rv@model'].plot(show=True)
# In[17]:
b.remove_dataset('rv01')
# Influence on Light Curves (fluxes)
# -----------------------------------------
#
# In[18]:
b.add_dataset('lc', times=np.linspace(0,1,51))
# In[19]:
b.run_compute()
# In[20]:
afig, mplfig = b['lc@model'].plot(show=True)
| gpl-3.0 |
pydata/xarray | xarray/core/combine.py | 1 | 33931 | import itertools
import warnings
from collections import Counter
import pandas as pd
from . import dtypes
from .concat import concat
from .dataarray import DataArray
from .dataset import Dataset
from .merge import merge
from .utils import iterate_nested
def _infer_concat_order_from_positions(datasets):
return dict(_infer_tile_ids_from_nested_list(datasets, ()))
def _infer_tile_ids_from_nested_list(entry, current_pos):
"""
Given a list of lists (of lists...) of objects, returns a iterator
which returns a tuple containing the index of each object in the nested
list structure as the key, and the object. This can then be called by the
dict constructor to create a dictionary of the objects organised by their
position in the original nested list.
Recursively traverses the given structure, while keeping track of the
current position. Should work for any type of object which isn't a list.
Parameters
----------
entry : list[list[obj, obj, ...], ...]
List of lists of arbitrary depth, containing objects in the order
they are to be concatenated.
Returns
-------
combined_tile_ids : dict[tuple(int, ...), obj]
"""
if isinstance(entry, list):
for i, item in enumerate(entry):
yield from _infer_tile_ids_from_nested_list(item, current_pos + (i,))
else:
yield current_pos, entry
def _ensure_same_types(series, dim):
if series.dtype == object:
types = set(series.map(type))
if len(types) > 1:
types = ", ".join(t.__name__ for t in types)
raise TypeError(
f"Cannot combine along dimension '{dim}' with mixed types."
f" Found: {types}."
)
def _infer_concat_order_from_coords(datasets):
concat_dims = []
tile_ids = [() for ds in datasets]
# All datasets have same variables because they've been grouped as such
ds0 = datasets[0]
for dim in ds0.dims:
# Check if dim is a coordinate dimension
if dim in ds0:
# Need to read coordinate values to do ordering
indexes = [ds.xindexes.get(dim) for ds in datasets]
if any(index is None for index in indexes):
raise ValueError(
"Every dimension needs a coordinate for "
"inferring concatenation order"
)
# TODO (benbovy, flexible indexes): all indexes should be Pandas.Index
# get pd.Index objects from Index objects
indexes = [index.array for index in indexes]
# If dimension coordinate values are same on every dataset then
# should be leaving this dimension alone (it's just a "bystander")
if not all(index.equals(indexes[0]) for index in indexes[1:]):
# Infer order datasets should be arranged in along this dim
concat_dims.append(dim)
if all(index.is_monotonic_increasing for index in indexes):
ascending = True
elif all(index.is_monotonic_decreasing for index in indexes):
ascending = False
else:
raise ValueError(
"Coordinate variable {} is neither "
"monotonically increasing nor "
"monotonically decreasing on all datasets".format(dim)
)
# Assume that any two datasets whose coord along dim starts
# with the same value have the same coord values throughout.
if any(index.size == 0 for index in indexes):
raise ValueError("Cannot handle size zero dimensions")
first_items = pd.Index([index[0] for index in indexes])
series = first_items.to_series()
# ensure series does not contain mixed types, e.g. cftime calendars
_ensure_same_types(series, dim)
# Sort datasets along dim
# We want rank but with identical elements given identical
# position indices - they should be concatenated along another
# dimension, not along this one
rank = series.rank(
method="dense", ascending=ascending, numeric_only=False
)
order = rank.astype(int).values - 1
# Append positions along extra dimension to structure which
# encodes the multi-dimensional concatentation order
tile_ids = [
tile_id + (position,) for tile_id, position in zip(tile_ids, order)
]
if len(datasets) > 1 and not concat_dims:
raise ValueError(
"Could not find any dimension coordinates to use to "
"order the datasets for concatenation"
)
combined_ids = dict(zip(tile_ids, datasets))
return combined_ids, concat_dims
def _check_dimension_depth_tile_ids(combined_tile_ids):
"""
Check all tuples are the same length, i.e. check that all lists are
nested to the same depth.
"""
tile_ids = combined_tile_ids.keys()
nesting_depths = [len(tile_id) for tile_id in tile_ids]
if not nesting_depths:
nesting_depths = [0]
if set(nesting_depths) != {nesting_depths[0]}:
raise ValueError(
"The supplied objects do not form a hypercube because"
" sub-lists do not have consistent depths"
)
# return these just to be reused in _check_shape_tile_ids
return tile_ids, nesting_depths
def _check_shape_tile_ids(combined_tile_ids):
"""Check all lists along one dimension are same length."""
tile_ids, nesting_depths = _check_dimension_depth_tile_ids(combined_tile_ids)
for dim in range(nesting_depths[0]):
indices_along_dim = [tile_id[dim] for tile_id in tile_ids]
occurrences = Counter(indices_along_dim)
if len(set(occurrences.values())) != 1:
raise ValueError(
"The supplied objects do not form a hypercube "
"because sub-lists do not have consistent "
"lengths along dimension" + str(dim)
)
def _combine_nd(
combined_ids,
concat_dims,
data_vars="all",
coords="different",
compat="no_conflicts",
fill_value=dtypes.NA,
join="outer",
combine_attrs="drop",
):
"""
Combines an N-dimensional structure of datasets into one by applying a
series of either concat and merge operations along each dimension.
No checks are performed on the consistency of the datasets, concat_dims or
tile_IDs, because it is assumed that this has already been done.
Parameters
----------
combined_ids : Dict[Tuple[int, ...]], xarray.Dataset]
Structure containing all datasets to be concatenated with "tile_IDs" as
keys, which specify position within the desired final combined result.
concat_dims : sequence of str
The dimensions along which the datasets should be concatenated. Must be
in order, and the length must match the length of the tuples used as
keys in combined_ids. If the string is a dimension name then concat
along that dimension, if it is None then merge.
Returns
-------
combined_ds : xarray.Dataset
"""
example_tile_id = next(iter(combined_ids.keys()))
n_dims = len(example_tile_id)
if len(concat_dims) != n_dims:
raise ValueError(
"concat_dims has length {} but the datasets "
"passed are nested in a {}-dimensional structure".format(
len(concat_dims), n_dims
)
)
# Each iteration of this loop reduces the length of the tile_ids tuples
# by one. It always combines along the first dimension, removing the first
# element of the tuple
for concat_dim in concat_dims:
combined_ids = _combine_all_along_first_dim(
combined_ids,
dim=concat_dim,
data_vars=data_vars,
coords=coords,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
(combined_ds,) = combined_ids.values()
return combined_ds
def _combine_all_along_first_dim(
combined_ids,
dim,
data_vars,
coords,
compat,
fill_value=dtypes.NA,
join="outer",
combine_attrs="drop",
):
# Group into lines of datasets which must be combined along dim
# need to sort by _new_tile_id first for groupby to work
# TODO: is the sorted need?
combined_ids = dict(sorted(combined_ids.items(), key=_new_tile_id))
grouped = itertools.groupby(combined_ids.items(), key=_new_tile_id)
# Combine all of these datasets along dim
new_combined_ids = {}
for new_id, group in grouped:
combined_ids = dict(sorted(group))
datasets = combined_ids.values()
new_combined_ids[new_id] = _combine_1d(
datasets, dim, compat, data_vars, coords, fill_value, join, combine_attrs
)
return new_combined_ids
def _combine_1d(
datasets,
concat_dim,
compat="no_conflicts",
data_vars="all",
coords="different",
fill_value=dtypes.NA,
join="outer",
combine_attrs="drop",
):
"""
Applies either concat or merge to 1D list of datasets depending on value
of concat_dim
"""
if concat_dim is not None:
try:
combined = concat(
datasets,
dim=concat_dim,
data_vars=data_vars,
coords=coords,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
except ValueError as err:
if "encountered unexpected variable" in str(err):
raise ValueError(
"These objects cannot be combined using only "
"xarray.combine_nested, instead either use "
"xarray.combine_by_coords, or do it manually "
"with xarray.concat, xarray.merge and "
"xarray.align"
)
else:
raise
else:
combined = merge(
datasets,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
return combined
def _new_tile_id(single_id_ds_pair):
tile_id, ds = single_id_ds_pair
return tile_id[1:]
def _nested_combine(
datasets,
concat_dims,
compat,
data_vars,
coords,
ids,
fill_value=dtypes.NA,
join="outer",
combine_attrs="drop",
):
if len(datasets) == 0:
return Dataset()
# Arrange datasets for concatenation
# Use information from the shape of the user input
if not ids:
# Determine tile_IDs by structure of input in N-D
# (i.e. ordering in list-of-lists)
combined_ids = _infer_concat_order_from_positions(datasets)
else:
# Already sorted so just use the ids already passed
combined_ids = dict(zip(ids, datasets))
# Check that the inferred shape is combinable
_check_shape_tile_ids(combined_ids)
# Apply series of concatenate or merge operations along each dimension
combined = _combine_nd(
combined_ids,
concat_dims,
compat=compat,
data_vars=data_vars,
coords=coords,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
return combined
def combine_nested(
datasets,
concat_dim,
compat="no_conflicts",
data_vars="all",
coords="different",
fill_value=dtypes.NA,
join="outer",
combine_attrs="drop",
):
"""
Explicitly combine an N-dimensional grid of datasets into one by using a
succession of concat and merge operations along each dimension of the grid.
Does not sort the supplied datasets under any circumstances, so the
datasets must be passed in the order you wish them to be concatenated. It
does align coordinates, but different variables on datasets can cause it to
fail under some scenarios. In complex cases, you may need to clean up your
data and use concat/merge explicitly.
To concatenate along multiple dimensions the datasets must be passed as a
nested list-of-lists, with a depth equal to the length of ``concat_dims``.
``combine_nested`` will concatenate along the top-level list first.
Useful for combining datasets from a set of nested directories, or for
collecting the output of a simulation parallelized along multiple
dimensions.
Parameters
----------
datasets : list or nested list of Dataset
Dataset objects to combine.
If concatenation or merging along more than one dimension is desired,
then datasets must be supplied in a nested list-of-lists.
concat_dim : str, or list of str, DataArray, Index or None
Dimensions along which to concatenate variables, as used by
:py:func:`xarray.concat`.
Set ``concat_dim=[..., None, ...]`` explicitly to disable concatenation
and merge instead along a particular dimension.
The position of ``None`` in the list specifies the dimension of the
nested-list input along which to merge.
Must be the same length as the depth of the list passed to
``datasets``.
compat : {"identical", "equals", "broadcast_equals", \
"no_conflicts", "override"}, optional
String indicating how to compare variables of the same name for
potential merge conflicts:
- "broadcast_equals": all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- "equals": all values and dimensions must be the same.
- "identical": all values, dimensions and attributes must be the
same.
- "no_conflicts": only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- "override": skip comparing and pick variable from first dataset
data_vars : {"minimal", "different", "all" or list of str}, optional
Details are in the documentation of concat
coords : {"minimal", "different", "all" or list of str}, optional
Details are in the documentation of concat
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names to fill values. Use a data array's name to
refer to its values.
join : {"outer", "inner", "left", "right", "exact"}, optional
String indicating how to combine differing indexes
(excluding concat_dim) in objects
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "drop"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
Returns
-------
combined : xarray.Dataset
Examples
--------
A common task is collecting data from a parallelized simulation in which
each process wrote out to a separate file. A domain which was decomposed
into 4 parts, 2 each along both the x and y axes, requires organising the
datasets into a doubly-nested list, e.g:
>>> x1y1 = xr.Dataset(
... {
... "temperature": (("x", "y"), np.random.randn(2, 2)),
... "precipitation": (("x", "y"), np.random.randn(2, 2)),
... }
... )
>>> x1y1
<xarray.Dataset>
Dimensions: (x: 2, y: 2)
Dimensions without coordinates: x, y
Data variables:
temperature (x, y) float64 1.764 0.4002 0.9787 2.241
precipitation (x, y) float64 1.868 -0.9773 0.9501 -0.1514
>>> x1y2 = xr.Dataset(
... {
... "temperature": (("x", "y"), np.random.randn(2, 2)),
... "precipitation": (("x", "y"), np.random.randn(2, 2)),
... }
... )
>>> x2y1 = xr.Dataset(
... {
... "temperature": (("x", "y"), np.random.randn(2, 2)),
... "precipitation": (("x", "y"), np.random.randn(2, 2)),
... }
... )
>>> x2y2 = xr.Dataset(
... {
... "temperature": (("x", "y"), np.random.randn(2, 2)),
... "precipitation": (("x", "y"), np.random.randn(2, 2)),
... }
... )
>>> ds_grid = [[x1y1, x1y2], [x2y1, x2y2]]
>>> combined = xr.combine_nested(ds_grid, concat_dim=["x", "y"])
>>> combined
<xarray.Dataset>
Dimensions: (x: 4, y: 4)
Dimensions without coordinates: x, y
Data variables:
temperature (x, y) float64 1.764 0.4002 -0.1032 ... 0.04576 -0.1872
precipitation (x, y) float64 1.868 -0.9773 0.761 ... -0.7422 0.1549 0.3782
``combine_nested`` can also be used to explicitly merge datasets with
different variables. For example if we have 4 datasets, which are divided
along two times, and contain two different variables, we can pass ``None``
to ``concat_dim`` to specify the dimension of the nested list over which
we wish to use ``merge`` instead of ``concat``:
>>> t1temp = xr.Dataset({"temperature": ("t", np.random.randn(5))})
>>> t1temp
<xarray.Dataset>
Dimensions: (t: 5)
Dimensions without coordinates: t
Data variables:
temperature (t) float64 -0.8878 -1.981 -0.3479 0.1563 1.23
>>> t1precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))})
>>> t1precip
<xarray.Dataset>
Dimensions: (t: 5)
Dimensions without coordinates: t
Data variables:
precipitation (t) float64 1.202 -0.3873 -0.3023 -1.049 -1.42
>>> t2temp = xr.Dataset({"temperature": ("t", np.random.randn(5))})
>>> t2precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))})
>>> ds_grid = [[t1temp, t1precip], [t2temp, t2precip]]
>>> combined = xr.combine_nested(ds_grid, concat_dim=["t", None])
>>> combined
<xarray.Dataset>
Dimensions: (t: 10)
Dimensions without coordinates: t
Data variables:
temperature (t) float64 -0.8878 -1.981 -0.3479 ... -0.5097 -0.4381 -1.253
precipitation (t) float64 1.202 -0.3873 -0.3023 ... -0.2127 -0.8955 0.3869
See also
--------
concat
merge
"""
mixed_datasets_and_arrays = any(
isinstance(obj, Dataset) for obj in iterate_nested(datasets)
) and any(
isinstance(obj, DataArray) and obj.name is None
for obj in iterate_nested(datasets)
)
if mixed_datasets_and_arrays:
raise ValueError("Can't combine datasets with unnamed arrays.")
if isinstance(concat_dim, (str, DataArray)) or concat_dim is None:
concat_dim = [concat_dim]
# The IDs argument tells _nested_combine that datasets aren't yet sorted
return _nested_combine(
datasets,
concat_dims=concat_dim,
compat=compat,
data_vars=data_vars,
coords=coords,
ids=False,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
def vars_as_keys(ds):
return tuple(sorted(ds))
def _combine_single_variable_hypercube(
datasets,
fill_value=dtypes.NA,
data_vars="all",
coords="different",
compat="no_conflicts",
join="outer",
combine_attrs="no_conflicts",
):
"""
Attempt to combine a list of Datasets into a hypercube using their
coordinates.
All provided Datasets must belong to a single variable, ie. must be
assigned the same variable name. This precondition is not checked by this
function, so the caller is assumed to know what it's doing.
This function is NOT part of the public API.
"""
if len(datasets) == 0:
raise ValueError(
"At least one Dataset is required to resolve variable names "
"for combined hypercube."
)
combined_ids, concat_dims = _infer_concat_order_from_coords(list(datasets))
if fill_value is None:
# check that datasets form complete hypercube
_check_shape_tile_ids(combined_ids)
else:
# check only that all datasets have same dimension depth for these
# vars
_check_dimension_depth_tile_ids(combined_ids)
# Concatenate along all of concat_dims one by one to create single ds
concatenated = _combine_nd(
combined_ids,
concat_dims=concat_dims,
data_vars=data_vars,
coords=coords,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
# Check the overall coordinates are monotonically increasing
for dim in concat_dims:
indexes = concatenated.indexes.get(dim)
if not (indexes.is_monotonic_increasing or indexes.is_monotonic_decreasing):
raise ValueError(
"Resulting object does not have monotonic"
" global indexes along dimension {}".format(dim)
)
return concatenated
# TODO remove empty list default param after version 0.19, see PR4696
def combine_by_coords(
data_objects=[],
compat="no_conflicts",
data_vars="all",
coords="different",
fill_value=dtypes.NA,
join="outer",
combine_attrs="no_conflicts",
datasets=None,
):
"""
Attempt to auto-magically combine the given datasets (or data arrays)
into one by using dimension coordinates.
This method attempts to combine a group of datasets along any number of
dimensions into a single entity by inspecting coords and metadata and using
a combination of concat and merge.
Will attempt to order the datasets such that the values in their dimension
coordinates are monotonic along all dimensions. If it cannot determine the
order in which to concatenate the datasets, it will raise a ValueError.
Non-coordinate dimensions will be ignored, as will any coordinate
dimensions which do not vary between each dataset.
Aligns coordinates, but different variables on datasets can cause it
to fail under some scenarios. In complex cases, you may need to clean up
your data and use concat/merge explicitly (also see `combine_nested`).
Works well if, for example, you have N years of data and M data variables,
and each combination of a distinct time period and set of data variables is
saved as its own dataset. Also useful for if you have a simulation which is
parallelized in multiple dimensions, but has global coordinates saved in
each file specifying the positions of points within the global domain.
Parameters
----------
data_objects : sequence of xarray.Dataset or sequence of xarray.DataArray
Data objects to combine.
compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- "broadcast_equals": all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- "equals": all values and dimensions must be the same.
- "identical": all values, dimensions and attributes must be the
same.
- "no_conflicts": only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- "override": skip comparing and pick variable from first dataset
data_vars : {"minimal", "different", "all" or list of str}, optional
These data variables will be concatenated together:
* "minimal": Only data variables in which the dimension already
appears are included.
* "different": Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* "all": All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the "minimal" data variables.
If objects are DataArrays, `data_vars` must be "all".
coords : {"minimal", "different", "all"} or list of str, optional
As per the "data_vars" kwarg, but for coordinate variables.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names to fill values. Use a data array's name to
refer to its values. If None, raises a ValueError if
the passed Datasets do not create a complete hypercube.
join : {"outer", "inner", "left", "right", "exact"}, optional
String indicating how to combine differing indexes in objects
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "drop"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
Returns
-------
combined : xarray.Dataset
See also
--------
concat
merge
combine_nested
Examples
--------
Combining two datasets using their common dimension coordinates. Notice
they are concatenated based on the values in their dimension coordinates,
not on their position in the list passed to `combine_by_coords`.
>>> x1 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [0, 1], "x": [10, 20, 30]},
... )
>>> x2 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [2, 3], "x": [10, 20, 30]},
... )
>>> x3 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [2, 3], "x": [40, 50, 60]},
... )
>>> x1
<xarray.Dataset>
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 0 1
* x (x) int64 10 20 30
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 10.9 8.473 12.92
precipitation (y, x) float64 0.4376 0.8918 0.9637 0.3834 0.7917 0.5289
>>> x2
<xarray.Dataset>
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 2 3
* x (x) int64 10 20 30
Data variables:
temperature (y, x) float64 11.36 18.51 1.421 1.743 0.4044 16.65
precipitation (y, x) float64 0.7782 0.87 0.9786 0.7992 0.4615 0.7805
>>> x3
<xarray.Dataset>
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 2 3
* x (x) int64 40 50 60
Data variables:
temperature (y, x) float64 2.365 12.8 2.867 18.89 10.44 8.293
precipitation (y, x) float64 0.2646 0.7742 0.4562 0.5684 0.01879 0.6176
>>> xr.combine_by_coords([x2, x1])
<xarray.Dataset>
Dimensions: (y: 4, x: 3)
Coordinates:
* y (y) int64 0 1 2 3
* x (x) int64 10 20 30
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 10.9 ... 1.743 0.4044 16.65
precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.7992 0.4615 0.7805
>>> xr.combine_by_coords([x3, x1])
<xarray.Dataset>
Dimensions: (y: 4, x: 6)
Coordinates:
* y (y) int64 0 1 2 3
* x (x) int64 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 nan ... nan 18.89 10.44 8.293
precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.5684 0.01879 0.6176
>>> xr.combine_by_coords([x3, x1], join="override")
<xarray.Dataset>
Dimensions: (y: 2, x: 6)
Coordinates:
* y (y) int64 0 1
* x (x) int64 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 2.365 ... 18.89 10.44 8.293
precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.5684 0.01879 0.6176
>>> xr.combine_by_coords([x1, x2, x3])
<xarray.Dataset>
Dimensions: (y: 4, x: 6)
Coordinates:
* y (y) int64 0 1 2 3
* x (x) int64 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 nan ... 18.89 10.44 8.293
precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.5684 0.01879 0.6176
"""
# TODO remove after version 0.19, see PR4696
if datasets is not None:
warnings.warn(
"The datasets argument has been renamed to `data_objects`."
" In future passing a value for datasets will raise an error."
)
data_objects = datasets
if not data_objects:
return Dataset()
mixed_arrays_and_datasets = any(
isinstance(data_object, DataArray) and data_object.name is None
for data_object in data_objects
) and any(isinstance(data_object, Dataset) for data_object in data_objects)
if mixed_arrays_and_datasets:
raise ValueError("Can't automatically combine datasets with unnamed arrays.")
all_unnamed_data_arrays = all(
isinstance(data_object, DataArray) and data_object.name is None
for data_object in data_objects
)
if all_unnamed_data_arrays:
unnamed_arrays = data_objects
temp_datasets = [data_array._to_temp_dataset() for data_array in unnamed_arrays]
combined_temp_dataset = _combine_single_variable_hypercube(
temp_datasets,
fill_value=fill_value,
data_vars=data_vars,
coords=coords,
compat=compat,
join=join,
combine_attrs=combine_attrs,
)
return DataArray()._from_temp_dataset(combined_temp_dataset)
else:
# Group by data vars
sorted_datasets = sorted(data_objects, key=vars_as_keys)
grouped_by_vars = itertools.groupby(sorted_datasets, key=vars_as_keys)
# Perform the multidimensional combine on each group of data variables
# before merging back together
concatenated_grouped_by_data_vars = []
for vars, datasets_with_same_vars in grouped_by_vars:
concatenated = _combine_single_variable_hypercube(
list(datasets_with_same_vars),
fill_value=fill_value,
data_vars=data_vars,
coords=coords,
compat=compat,
join=join,
combine_attrs=combine_attrs,
)
concatenated_grouped_by_data_vars.append(concatenated)
return merge(
concatenated_grouped_by_data_vars,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
| apache-2.0 |
erramuzpe/NeuroVault | neurovault/settings.py | 2 | 11986 | # Django settings for neurovault project.
import os
import sys
import tempfile
from datetime import timedelta
import matplotlib
from kombu import Exchange, Queue
matplotlib.use('Agg')
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = True
DOMAIN_NAME = "http://neurovault.org"
TEMPLATE_DEBUG = DEBUG
ADMINS = (
(('Chris', '[email protected]'))
)
MANAGERS = ADMINS
SITE_ID = 1
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'postgres',
# The following settings are not used with sqlite3:
'USER': 'postgres',
'HOST': 'db', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '5432', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Berlin'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/public/media/'
PRIVATE_MEDIA_ROOT = '/var/www/image_data'
PRIVATE_MEDIA_URL = '/media/images'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = '/var/www/static'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'neurovault.apps.statmaps.middleware.CollectionRedirectMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'neurovault.urls'
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'neurovault.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (),
'OPTIONS': {'context_processors': ("django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
'django.core.context_processors.request'),
'loaders': ('hamlpy.template.loaders.HamlPyFilesystemLoader',
'hamlpy.template.loaders.HamlPyAppDirectoriesLoader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)}
}
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'neurovault.apps.main',
'neurovault.apps.statmaps',
'neurovault.apps.users',
'django.contrib.sitemaps',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
#'django.contrib.admindocs',
'social.apps.django_app.default',
'rest_framework',
'taggit',
'crispy_forms',
'coffeescript',
'taggit_templatetags',
#'south',
'dbbackup',
'polymorphic',
'djcelery',
'django_cleanup',
'file_resubmit',
'django_mailgun',
'django_hstore',
'guardian',
'oauth2_provider',
'fixture_media'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'filters': {
# 'require_debug_false': {
# '()': 'django.utils.log.RequireDebugFalse'
# }
# },
# 'handlers': {
# 'mail_admins': {
# 'level': 'ERROR',
# 'filters': ['require_debug_false'],
# 'class': 'django.utils.log.AdminEmailHandler'
# }
# },
# 'loggers': {
# 'django.request': {
# 'handlers': ['mail_admins'],
# 'level': 'ERROR',
# 'propagate': True,
# },
# }
# }
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'guardian.backends.ObjectPermissionBackend',
)
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.social_auth.associate_by_email', # <--- enable this one
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
)
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.HyperlinkedModelSerializer',
# LimitOffsetPagination will allow to set a ?limit= and ?offset=
# variable in the URL.
'DEFAULT_PAGINATION_CLASS':
'neurovault.api.pagination.StandardResultPagination',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.ext.rest_framework.OAuth2Authentication',
),
'DEFAULT_RENDERER_CLASSES': (
'neurovault.api.utils.ExplicitUnicodeJSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'UNICODE_JSON': True,
}
OAUTH2_PROVIDER = {
'REQUEST_APPROVAL_PROMPT': 'auto'
}
LOGIN_REDIRECT_URL = '/my_collections/'
#LOGIN_URL = '/login-form/'
#LOGIN_ERROR_URL = '/login-error/'
CRISPY_TEMPLATE_PACK = 'bootstrap3'
DBBACKUP_STORAGE = 'dbbackup.storage.dropbox_storage'
DBBACKUP_TOKENS_FILEPATH = '/home/filo/dbtokens'
DBBACKUP_POSTGRES_BACKUP_COMMAND = 'export PGPASSWORD=neurovault\n pg_dump --username={adminuser} --host={host} --port={port} {databasename} >'
# For Apache, use 'sendfile.backends.xsendfile'
# For Nginx, use 'sendfile.backends.nginx'
# For Devserver, use 'sendfile.backends.development'
SENDFILE_BACKEND = 'sendfile.backends.development'
PRIVATE_MEDIA_REDIRECT_HEADER = 'X-Accel-Redirect'
PYCORTEX_DATASTORE = os.path.join(BASE_DIR,'pycortex_data')
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
"file_resubmit": {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
"LOCATION": '/tmp/file_resubmit/'
}
}
# Mandrill config
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = 'key-3ax6xnjp29jd6fds4gc373sgvjxteol0' # replace with a real key in production
MAILGUN_SERVER_NAME = 'samples.mailgun.org'# replace with 'neurovault.org' in production
DEFAULT_FROM_EMAIL = "[email protected]"
if os.path.exists('/usr/local/share/pycortex/db/fsaverage'):
STATICFILES_DIRS = (
('pycortex-resources', '/usr/local/lib/python2.7/site-packages/cortex/webgl/resources'),
('pycortex-ctmcache', '/usr/local/share/pycortex/db/fsaverage/cache')
)
# Celery config
BROKER_URL = 'redis://redis:6379/0'
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_DEFAULT_QUEUE = 'default'
CELERY_QUEUES = (
Queue('default', Exchange('default'), routing_key='default'),
)
CELERY_IMPORTS = ('neurovault.apps.statmaps.tasks', )
CELERYBEAT_SCHEDULE = {
'anima_crawl_every day': {
'task': 'crawl_anima',
'schedule': timedelta(days=1)
},
}
CELERY_TIMEZONE = 'Europe/Berlin'
ANONYMOUS_USER_ID = -1
DEFAULT_OAUTH_APPLICATION_ID = -1
DEFAULT_OAUTH_APP_NAME = 'DefaultOAuthApp'
DEFAULT_OAUTH_APP_OWNER_ID = -2
DEFAULT_OAUTH_APP_OWNER_USERNAME = 'DefaultAppOwner'
OAUTH_PERSONAL_TOKEN_LENGTH = 40
# Bogus secret key.
try:
from secrets import *
except ImportError:
from bogus_secrets import *
try:
from local_settings import *
except ImportError:
pass
# freesurfer/pycortex environment
os.environ["XDG_CONFIG_HOME"] = PYCORTEX_DATASTORE
os.environ["FREESURFER_HOME"] = "/opt/freesurfer"
os.environ["SUBJECTS_DIR"] = os.path.join(os.environ["FREESURFER_HOME"],"subjects")
os.environ["FSLOUTPUTTYPE"] = "NIFTI_GZ"
# provToolbox path
os.environ["PATH"] += os.pathsep + '/path/to/lib/provToolbox/bin'
#CELERYBEAT_SCHEDULE = {
# 'run_make_correlation_df': {
# 'task': 'neurovault.apps.statmaps.tasks...',
# 'schedule': timedelta(minutes=30),
# },
#}
# or manage periodic schedule in django admin
#CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
if "test" in sys.argv or "benchmark" in sys.argv:
test_media_root = os.path.join(tempfile.mkdtemp(prefix="neurovault_test_"))
PRIVATE_MEDIA_ROOT = test_media_root
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
TAGGIT_CASE_INSENSITIVE=True
FIXTURE_DIRS = (
'apps/statmaps/fixtures/',
)
MEDIA_ROOT = PRIVATE_MEDIA_ROOT
| mit |
jakobworldpeace/scikit-learn | sklearn/tree/export.py | 35 | 16873 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# License: BSD 3 clause
import numpy as np
import warnings
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
class Sentinel(object):
def __repr__():
return '"tree.dot"'
SENTINEL = Sentinel()
def export_graphviz(decision_tree, out_file=SENTINEL, max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default='tree.dot')
Handle or name of the output file. If ``None``, the result is
returned as a string. This will the default from version 0.20.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Returns
-------
dot_data : string
String representation of the input tree in GraphViz dot format.
Only returned if ``out_file`` is None.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0
else:
alpha = int(np.round(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]), 0))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(np.round(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] -
colors['bounds'][0])), 0))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
return_string = False
try:
if out_file == SENTINEL:
warnings.warn("out_file can be set to None starting from 0.18. "
"This will be the default in 0.20.",
DeprecationWarning)
out_file = "tree.dot"
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
if out_file is None:
return_string = True
out_file = six.StringIO()
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
if return_string:
return out_file.getvalue()
finally:
if own_file:
out_file.close()
| bsd-3-clause |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/tutorials/plot_epochs_to_data_frame.py | 12 | 8847 | """
.. _tut_io_export_pandas:
=================================
Export epochs to Pandas DataFrame
=================================
In this example the pandas exporter will be used to produce a DataFrame
object. After exploring some basic features a split-apply-combine
work flow will be conducted to examine the latencies of the response
maxima across epochs and conditions.
Note. Equivalent methods are available for raw and evoked data objects.
Short Pandas Primer
-------------------
Pandas Data Frames
~~~~~~~~~~~~~~~~~~
A data frame can be thought of as a combination of matrix, list and dict:
It knows about linear algebra and element-wise operations but is size mutable
and allows for labeled access to its data. In addition, the pandas data frame
class provides many useful methods for restructuring, reshaping and visualizing
data. As most methods return data frame instances, operations can be chained
with ease; this allows to write efficient one-liners. Technically a DataFrame
can be seen as a high-level container for numpy arrays and hence switching
back and forth between numpy arrays and DataFrames is very easy.
Taken together, these features qualify data frames for inter operation with
databases and for interactive data exploration / analysis.
Additionally, pandas interfaces with the R statistical computing language that
covers a huge amount of statistical functionality.
Export Options
~~~~~~~~~~~~~~
The pandas exporter comes with a few options worth being commented.
Pandas DataFrame objects use a so called hierarchical index. This can be
thought of as an array of unique tuples, in our case, representing the higher
dimensional MEG data in a 2D data table. The column names are the channel names
from the epoch object. The channels can be accessed like entries of a
dictionary:
df['MEG 2333']
Epochs and time slices can be accessed with the .ix method:
epochs_df.ix[(1, 2), 'MEG 2333']
However, it is also possible to include this index as regular categorial data
columns which yields a long table format typically used for repeated measure
designs. To take control of this feature, on export, you can specify which
of the three dimensions 'condition', 'epoch' and 'time' is passed to the Pandas
index using the index parameter. Note that this decision is revertible any
time, as demonstrated below.
Similarly, for convenience, it is possible to scale the times, e.g. from
seconds to milliseconds.
Some Instance Methods
~~~~~~~~~~~~~~~~~~~~~
Most numpy methods and many ufuncs can be found as instance methods, e.g.
mean, median, var, std, mul, , max, argmax etc.
Below an incomplete listing of additional useful data frame instance methods:
apply : apply function to data.
Any kind of custom function can be applied to the data. In combination with
lambda this can be very useful.
describe : quickly generate summary stats
Very useful for exploring data.
groupby : generate subgroups and initialize a 'split-apply-combine' operation.
Creates a group object. Subsequently, methods like apply, agg, or transform
can be used to manipulate the underlying data separately but
simultaneously. Finally, reset_index can be used to combine the results
back into a data frame.
plot : wrapper around plt.plot
However it comes with some special options. For examples see below.
shape : shape attribute
gets the dimensions of the data frame.
values :
return underlying numpy array.
to_records :
export data as numpy record array.
to_dict :
export data as dict of arrays.
Reference
~~~~~~~~~
More information and additional introductory materials can be found at the
pandas doc sites: http://pandas.pydata.org/pandas-docs/stable/
"""
# Author: Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import mne
import matplotlib.pyplot as plt
import numpy as np
from mne.io import Raw
from mne.datasets import sample
print(__doc__)
# turn on interactive mode
plt.ion()
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
raw = Raw(raw_fname)
# For simplicity we will only consider the first 10 epochs
events = mne.read_events(event_fname)[:10]
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, exclude='bads')
tmin, tmax = -0.2, 0.5
baseline = (None, 0)
reject = dict(grad=4000e-13, eog=150e-6)
event_id = dict(auditory_l=1, auditory_r=2, visual_l=3, visual_r=4)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=baseline, preload=True, reject=reject)
###############################################################################
# Export DataFrame
# The following parameters will scale the channels and times plotting
# friendly. The info columns 'epoch' and 'time' will be used as hierarchical
# index whereas the condition is treated as categorial data. Note that
# this is optional. By passing None you could also print out all nesting
# factors in a long table style commonly used for analyzing repeated measure
# designs.
index, scale_time, scalings = ['epoch', 'time'], 1e3, dict(grad=1e13)
df = epochs.to_data_frame(picks=None, scalings=scalings, scale_time=scale_time,
index=index)
# Create MEG channel selector and drop EOG channel.
meg_chs = [c for c in df.columns if 'MEG' in c]
df.pop('EOG 061') # this works just like with a list.
###############################################################################
# Explore Pandas MultiIndex
# Pandas is using a MultiIndex or hierarchical index to handle higher
# dimensionality while at the same time representing data in a flat 2d manner.
print(df.index.names, df.index.levels)
# Inspecting the index object unveils that 'epoch', 'time' are used
# for subsetting data. We can take advantage of that by using the
# .ix attribute, where in this case the first position indexes the MultiIndex
# and the second the columns, that is, channels.
# Plot some channels across the first three epochs
xticks, sel = np.arange(3, 600, 120), meg_chs[:15]
df.ix[:3, sel].plot(xticks=xticks)
mne.viz.tight_layout()
# slice the time starting at t0 in epoch 2 and ending 500ms after
# the base line in epoch 3. Note that the second part of the tuple
# represents time in milliseconds from stimulus onset.
df.ix[(1, 0):(3, 500), sel].plot(xticks=xticks)
mne.viz.tight_layout()
# Note: For convenience the index was converted from floating point values
# to integer values. To restore the original values you can e.g. say
# df['times'] = np.tile(epoch.times, len(epochs_times)
# We now reset the index of the DataFrame to expose some Pandas
# pivoting functionality. To simplify the groupby operation we
# we drop the indices to treat epoch and time as categroial factors.
df = df.reset_index()
# The ensuing DataFrame then is split into subsets reflecting a crossing
# between condition and trial number. The idea is that we can broadcast
# operations into each cell simultaneously.
factors = ['condition', 'epoch']
sel = factors + ['MEG 1332', 'MEG 1342']
grouped = df[sel].groupby(factors)
# To make the plot labels more readable let's edit the values of 'condition'.
df.condition = df.condition.apply(lambda name: name + ' ')
# Now we compare the mean of two channels response across conditions.
grouped.mean().plot(kind='bar', stacked=True, title='Mean MEG Response',
color=['steelblue', 'orange'])
mne.viz.tight_layout()
# We can even accomplish more complicated tasks in a few lines calling
# apply method and passing a function. Assume we wanted to know the time
# slice of the maximum response for each condition.
max_latency = grouped[sel[2]].apply(lambda x: df.time[x.argmax()])
print(max_latency)
# Then make the plot labels more readable let's edit the values of 'condition'.
df.condition = df.condition.apply(lambda name: name + ' ')
plt.figure()
max_latency.plot(kind='barh', title='Latency of Maximum Reponse',
color=['steelblue'])
mne.viz.tight_layout()
# Finally, we will again remove the index to create a proper data table that
# can be used with statistical packages like statsmodels or R.
final_df = max_latency.reset_index()
final_df.rename(columns={0: sel[2]}) # as the index is oblivious of names.
# The index is now written into regular columns so it can be used as factor.
print(final_df)
# To save as csv file, uncomment the next line.
# final_df.to_csv('my_epochs.csv')
# Note. Data Frames can be easily concatenated, e.g., across subjects.
# E.g. say:
#
# import pandas as pd
# group = pd.concat([df_1, df_2])
# group['subject'] = np.r_[np.ones(len(df_1)), np.ones(len(df_2)) + 1]
| bsd-3-clause |
PyPSA/PyPSA | pypsa/plot.py | 1 | 32392 |
## Copyright 2015-2017 Tom Brown (FIAS), Jonas Hoersch (FIAS)
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Functions for plotting networks.
"""
import pandas as pd
import numpy as np
import networkx as nx
import warnings
import logging
logger = logging.getLogger(__name__)
__author__ = "Tom Brown (FIAS), Jonas Hoersch (FIAS), Fabian Hofmann (FIAS), Fabian Neumann (KIT)"
__copyright__ = "Copyright 2015-2020 Tom Brown (FIAS), Jonas Hoersch (FIAS); Copyright 2019-2020 Fabian Hofmann (FIAS), Fabian Neumann (KIT), GNU GPL 3"
import matplotlib.pyplot as plt
from matplotlib.patches import Wedge, Circle
from matplotlib.collections import LineCollection, PatchCollection
from matplotlib.patches import FancyArrow
cartopy_present = True
try:
import cartopy
import cartopy.crs as ccrs
import cartopy.mpl.geoaxes
import requests
except ImportError:
cartopy_present = False
pltly_present = True
try:
import plotly.offline as pltly
import plotly.graph_objects as go
except ImportError:
pltly_present = False
def plot(n, margin=None, ax=None, geomap=True, projection=None,
bus_colors='cadetblue', bus_alpha=1, bus_sizes=2e-2, bus_cmap=None,
line_colors='rosybrown', link_colors='darkseagreen',
transformer_colors='orange',
line_widths=1.5, link_widths=1.5, transformer_widths=1.5,
line_cmap=None, link_cmap=None, transformer_cmap=None,
flow=None, branch_components=None, layouter=None, title="",
boundaries=None, geometry=False, jitter=None, color_geomap=None):
"""
Plot the network buses and lines using matplotlib and cartopy.
Parameters
----------
margin : float
Margin at the sides as proportion of distance between max/min x,y
ax : matplotlib ax, defaults to plt.gca()
Axis to which to plot the network
geomap: bool/str, default True
Switch to use Cartopy and draw geographical features.
If string is passed, it will be used as a resolution argument,
valid options are '10m', '50m' and '110m'.
projection: cartopy.crs.Projection, defaults to None
Define the projection of your geomap, only valid if cartopy is
installed. If None (default) is passed the projection for cartopy
is set to cartopy.crs.PlateCarree
bus_colors : dict/pandas.Series
Colors for the buses, defaults to "cadetblue". If bus_sizes is a
pandas.Series with a Multiindex, bus_colors defaults to the
n.carriers['color'] column.
bus_alpha : float
Adds alpha channel to buses, defaults to 1.
bus_sizes : dict/pandas.Series
Sizes of bus points, defaults to 1e-2. If a multiindexed Series is passed,
the function will draw pies for each bus (first index level) with
segments of different color (second index level). Such a Series is ob-
tained by e.g. n.generators.groupby(['bus', 'carrier']).p_nom.sum()
bus_cmap : plt.cm.ColorMap/str
If bus_colors are floats, this color map will assign the colors
line_colors : str/pandas.Series
Colors for the lines, defaults to 'rosybrown'.
link_colors : str/pandas.Series
Colors for the links, defaults to 'darkseagreen'.
transfomer_colors : str/pandas.Series
Colors for the transfomer, defaults to 'orange'.
line_widths : dict/pandas.Series
Widths of lines, defaults to 1.5
link_widths : dict/pandas.Series
Widths of links, defaults to 1.5
transformer_widths : dict/pandas.Series
Widths of transformer, defaults to 1.5
line_cmap : plt.cm.ColorMap/str|dict
If line_colors are floats, this color map will assign the colors.
link_cmap : plt.cm.ColorMap/str|dict
If link_colors are floats, this color map will assign the colors.
transformer_cmap : plt.cm.ColorMap/str|dict
If transformer_colors are floats, this color map will assign the colors.
flow : snapshot/pandas.Series/function/string
Flow to be displayed in the plot, defaults to None. If an element of
n.snapshots is given, the flow at this timestamp will be
displayed. If an aggregation function is given, is will be applied
to the total network flow via pandas.DataFrame.agg (accepts also
function names). Otherwise flows can be specified by passing a pandas
Series with MultiIndex including all necessary branch components.
Use the line_widths argument to additionally adjust the size of the
flow arrows.
layouter : networkx.drawing.layout function, default None
Layouting function from `networkx <https://networkx.github.io/>`_ which
overrules coordinates given in ``n.buses[['x','y']]``. See
`list <https://networkx.github.io/documentation/stable/reference/drawing.html#module-networkx.drawing.layout>`_
of available options.
title : string
Graph title
boundaries : list of four floats
Boundaries of the plot in format [x1,x2,y1,y2]
branch_components : list of str
Branch components to be plotted, defaults to Line and Link.
jitter : None|float
Amount of random noise to add to bus positions to distinguish
overlapping buses
color_geomap : dict or bool
Specify colors to paint land and sea areas in.
If True, it defaults to `{'ocean': 'lightblue', 'land': 'whitesmoke'}`.
If no dictionary is provided, colors are white.
Returns
-------
bus_collection, branch_collection1, ... : tuple of Collections
Collections for buses and branches.
"""
x, y = _get_coordinates(n, layouter=layouter)
if boundaries is None and margin:
boundaries = sum(zip(*compute_bbox_with_margins(margin, x, y)), ())
if geomap:
if not cartopy_present:
logger.warning("Cartopy needs to be installed to use `geomap=True`.")
geomap = False
if projection is None:
projection = get_projection_from_crs(n.srid)
if ax is None:
ax = plt.gca(projection=projection)
else:
assert isinstance(ax, cartopy.mpl.geoaxes.GeoAxesSubplot), (
'The passed axis is not a GeoAxesSubplot. You can '
'create one with: \nimport cartopy.crs as ccrs \n'
'fig, ax = plt.subplots('
'subplot_kw={"projection":ccrs.PlateCarree()})')
transform = draw_map_cartopy(n, x, y, ax, geomap, color_geomap)
x, y, z = ax.projection.transform_points(transform, x.values, y.values).T
x, y = pd.Series(x, n.buses.index), pd.Series(y, n.buses.index)
if boundaries is not None:
ax.set_extent(boundaries, crs=transform)
elif ax is None:
ax = plt.gca()
if not geomap and boundaries:
ax.axis(boundaries)
ax.set_aspect('equal')
ax.axis('off')
ax.set_title(title)
# Plot buses:
if jitter is not None:
x = x + np.random.uniform(low=-jitter, high=jitter, size=len(x))
y = y + np.random.uniform(low=-jitter, high=jitter, size=len(y))
if isinstance(bus_sizes, pd.Series) and isinstance(bus_sizes.index, pd.MultiIndex):
# We are drawing pies to show all the different shares
assert len(bus_sizes.index.levels[0].difference(n.buses.index)) == 0, \
"The first MultiIndex level of bus_sizes must contain buses"
if isinstance(bus_colors, dict):
bus_colors = pd.Series(bus_colors)
# case bus_colors isn't a series or dict: look in n.carriers for existent colors
if not isinstance(bus_colors, pd.Series):
bus_colors = n.carriers.color.dropna()
assert bus_sizes.index.levels[1].isin(bus_colors.index).all(), (
"Colors not defined for all elements in the second MultiIndex "
"level of bus_sizes, please make sure that all the elements are "
"included in bus_colors or in n.carriers.color")
bus_sizes = bus_sizes.sort_index(level=0, sort_remaining=False)
if geomap:
bus_sizes = bus_sizes * projected_area_factor(ax, n.srid)**2
patches = []
for b_i in bus_sizes.index.levels[0]:
s = bus_sizes.loc[b_i]
radius = s.sum()**0.5
if radius == 0.0:
ratios = s
else:
ratios = s/s.sum()
start = 0.25
for i, ratio in ratios.iteritems():
patches.append(Wedge((x.at[b_i], y.at[b_i]), radius,
360*start, 360*(start+ratio),
facecolor=bus_colors[i], alpha=bus_alpha))
start += ratio
bus_collection = PatchCollection(patches, match_original=True, zorder=5)
ax.add_collection(bus_collection)
else:
c = pd.Series(bus_colors, index=n.buses.index)
s = pd.Series(bus_sizes, index=n.buses.index, dtype="float")
if geomap:
s = s * projected_area_factor(ax, n.srid)**2
if bus_cmap is not None and c.dtype is np.dtype('float'):
if isinstance(bus_cmap, str):
bus_cmap = plt.cm.get_cmap(bus_cmap)
norm = plt.Normalize(vmin=c.min(), vmax=c.max())
c = c.apply(lambda cval: bus_cmap(norm(cval)))
patches = []
for b_i in s.index:
radius = s.at[b_i]**0.5
patches.append(Circle((x.at[b_i], y.at[b_i]), radius,
facecolor=c.at[b_i], alpha=bus_alpha))
bus_collection = PatchCollection(patches, match_original=True, zorder=5)
ax.add_collection(bus_collection)
# Plot branches:
if isinstance(line_widths, pd.Series):
if isinstance(line_widths.index, pd.MultiIndex):
raise TypeError("Index of argument 'line_widths' is a Multiindex, "
"this is not support since pypsa v0.17. "
"Set differing widths with arguments 'line_widths', "
"'link_widths' and 'transformer_widths'.")
if isinstance(line_colors, pd.Series):
if isinstance(line_colors.index, pd.MultiIndex):
raise TypeError("Index of argument 'line_colors' is a Multiindex, "
"this is not support since pypsa v0.17. "
"Set differing colors with arguments 'line_colors', "
"'link_colors' and 'transformer_colors'.")
if branch_components is None:
branch_components = n.branch_components
branch_colors = {'Line': line_colors, 'Link': link_colors,
'Transformer': transformer_colors}
branch_widths = {'Line': line_widths, 'Link': link_widths,
'Transformer': transformer_widths}
branch_cmap = {'Line': line_cmap, 'Link': link_cmap,
'Transformer': transformer_cmap}
branch_collections = []
arrow_collections = []
if flow is not None:
rough_scale = sum(len(n.df(c)) for c in branch_components) + 100
flow = _flow_ds_from_arg(flow, n, branch_components) / rough_scale
for c in n.iterate_components(branch_components):
b_widths = as_branch_series(branch_widths[c.name], 'width', c.name, n)
b_colors = as_branch_series(branch_colors[c.name], 'color', c.name, n)
b_nums = None
b_cmap = branch_cmap[c.name]
b_flow = flow.get(c.name, None) if flow is not None else None
if issubclass(b_colors.dtype.type, np.number):
b_nums = b_colors
b_colors = None
if not geometry:
segments = (np.asarray(((c.df.bus0.map(x), c.df.bus0.map(y)),
(c.df.bus1.map(x), c.df.bus1.map(y))))
.transpose(2, 0, 1))
else:
from shapely.wkt import loads
from shapely.geometry import LineString
linestrings = c.df.geometry[lambda ds: ds != ''].map(loads)
assert all(isinstance(ls, LineString) for ls in linestrings), (
"The WKT-encoded geometry in the 'geometry' column must be "
"composed of LineStrings")
segments = np.asarray(list(linestrings.map(np.asarray)))
if b_flow is not None:
coords = pd.DataFrame({'x1': c.df.bus0.map(x), 'y1': c.df.bus0.map(y),
'x2': c.df.bus1.map(x), 'y2': c.df.bus1.map(y)})
b_flow = b_flow.mul(b_widths[b_flow.index], fill_value=0)
# update the line width, allows to set line widths separately from flows
b_widths.update((5 * b_flow.abs()).pipe(np.sqrt))
area_factor = projected_area_factor(ax, n.srid)
f_collection = directed_flow(coords, b_flow, b_colors, area_factor,
b_cmap)
if b_nums is not None:
f_collection.set_array(np.asarray(b_nums))
f_collection.set_cmap(b_cmap)
f_collection.autoscale()
arrow_collections.append(f_collection)
ax.add_collection(f_collection)
b_collection = LineCollection(segments, linewidths=b_widths,
antialiaseds=(1,), colors=b_colors,
transOffset=ax.transData)
if b_nums is not None:
b_collection.set_array(np.asarray(b_nums))
b_collection.set_cmap(b_cmap)
b_collection.autoscale()
ax.add_collection(b_collection)
b_collection.set_zorder(3)
branch_collections.append(b_collection)
if boundaries is None:
ax.autoscale()
return (bus_collection,) + tuple(branch_collections) + tuple(arrow_collections)
def as_branch_series(ser, arg, c, n):
ser = pd.Series(ser, index=n.df(c).index)
assert not ser.isnull().any(), (f'{c}_{arg}s does not specify all '
f'entries. Missing values for {c}: {list(ser[ser.isnull()].index)}')
return ser
def get_projection_from_crs(crs):
if crs == 4326:
# if data is in latlon system, return default map with latlon system
return ccrs.PlateCarree()
try:
return ccrs.epsg(crs)
except requests.RequestException:
logger.warning("A connection to http://epsg.io/ is "
"required for a projected coordinate reference system. "
"Falling back to latlong.")
except ValueError:
logger.warning("'{crs}' does not define a projected coordinate system. "
"Falling back to latlong.".format(crs=crs))
return ccrs.PlateCarree()
def compute_bbox_with_margins(margin, x, y):
'Helper function to compute bounding box for the plot'
# set margins
pos = np.asarray((x, y))
minxy, maxxy = pos.min(axis=1), pos.max(axis=1)
xy1 = minxy - margin*(maxxy - minxy)
xy2 = maxxy + margin*(maxxy - minxy)
return tuple(xy1), tuple(xy2)
def projected_area_factor(ax, original_crs=4326):
"""
Helper function to get the area scale of the current projection in
reference to the default projection. The default 'original crs' is assumed
to be 4326, which translates to the cartopy default cartopy.crs.PlateCarree()
"""
if not hasattr(ax, 'projection'):
return 1
if isinstance(ax.projection, ccrs.PlateCarree):
return 1
x1, x2, y1, y2 = ax.get_extent()
pbounds = \
get_projection_from_crs(original_crs).transform_points(ax.projection,
np.array([x1, x2]), np.array([y1, y2]))
return np.sqrt(abs((x2 - x1) * (y2 - y1))
/abs((pbounds[0] - pbounds[1])[:2].prod()))
def draw_map_cartopy(n, x, y, ax, geomap=True, color_geomap=None):
resolution = '50m' if isinstance(geomap, bool) else geomap
assert resolution in ['10m', '50m', '110m'], (
"Resolution has to be one of '10m', '50m', '110m'")
axis_transformation = get_projection_from_crs(n.srid)
if color_geomap is None:
color_geomap = {'ocean': 'w', 'land': 'w'}
elif color_geomap and not isinstance(color_geomap, dict):
color_geomap = {'ocean': 'lightblue', 'land': 'whitesmoke'}
ax.add_feature(cartopy.feature.LAND.with_scale(resolution),
facecolor=color_geomap['land'])
ax.add_feature(cartopy.feature.OCEAN.with_scale(resolution),
facecolor=color_geomap['ocean'])
ax.coastlines(linewidth=0.4, zorder=2, resolution=resolution)
border = cartopy.feature.BORDERS.with_scale(resolution)
ax.add_feature(border, linewidth=0.3)
return axis_transformation
def _flow_ds_from_arg(flow, n, branch_components):
if isinstance(flow, pd.Series):
if not isinstance(flow.index, pd.MultiIndex):
raise ValueError("Argument 'flow' is a pandas.Series without "
"a MultiIndex. Please provide a multiindexed series, with "
"the first level being a subset of 'branch_components'.")
return flow
if flow in n.snapshots:
return (pd.concat([n.pnl(c).p0.loc[flow]
for c in branch_components],
keys=branch_components, sort=True))
elif isinstance(flow, str) or callable(flow):
return (pd.concat([n.pnl(c).p0 for c in branch_components],
axis=1, keys=branch_components, sort=True)
.agg(flow, axis=0))
def directed_flow(coords, flow, color, area_factor=1, cmap=None):
"""
Helper function to generate arrows from flow data.
"""
# this funtion is used for diplaying arrows representing the network flow
data = pd.DataFrame(
{'arrowsize': flow.abs().pipe(np.sqrt).clip(lower=1e-8),
'direction': np.sign(flow),
'linelength': (np.sqrt((coords.x1 - coords.x2)**2. +
(coords.y1 - coords.y2)**2))})
data = data.join(coords)
if area_factor:
data['arrowsize']= data['arrowsize'].mul(area_factor)
data['arrowtolarge'] = (1.5 * data.arrowsize > data.linelength)
# swap coords for negativ directions
data.loc[data.direction == -1., ['x1', 'x2', 'y1', 'y2']] = \
data.loc[data.direction == -1., ['x2', 'x1', 'y2', 'y1']].values
if ((data.linelength > 0.) & (~data.arrowtolarge)).any():
data['arrows'] = (
data[(data.linelength > 0.) & (~data.arrowtolarge)]
.apply(lambda ds:
FancyArrow(ds.x1, ds.y1,
0.6*(ds.x2 - ds.x1) - ds.arrowsize
* 0.75 * (ds.x2 - ds.x1) / ds.linelength,
0.6 * (ds.y2 - ds.y1) - ds.arrowsize
* 0.75 * (ds.y2 - ds.y1)/ds.linelength,
head_width=ds.arrowsize), axis=1))
data.loc[(data.linelength > 0.) & (data.arrowtolarge), 'arrows'] = \
(data[(data.linelength > 0.) & (data.arrowtolarge)]
.apply(lambda ds:
FancyArrow(ds.x1, ds.y1,
0.001*(ds.x2 - ds.x1),
0.001*(ds.y2 - ds.y1),
head_width=ds.arrowsize), axis=1))
data = data.dropna(subset=['arrows'])
arrowcol = PatchCollection(data.arrows,
color=color,
edgecolors='k',
linewidths=0.,
zorder=4, alpha=1)
return arrowcol
def autogenerate_coordinates(n, assign=False, layouter=None):
"""
Automatically generate bus coordinates for the network graph
according to a layouting function from `networkx <https://networkx.github.io/>`_.
Parameters
----------
n : pypsa.Network
assign : bool, default False
Assign generated coordinates to the network bus coordinates
at ``n.buses[['x','y']]``.
layouter : networkx.drawing.layout function, default None
Layouting function from `networkx <https://networkx.github.io/>`_. See
`list <https://networkx.github.io/documentation/stable/reference/drawing.html#module-networkx.drawing.layout>`_
of available options. By default coordinates are determined for a
`planar layout <https://networkx.github.io/documentation/stable/reference/generated/networkx.drawing.layout.planar_layout.html#networkx.drawing.layout.planar_layout>`_
if the network graph is planar, otherwise for a
`Kamada-Kawai layout <https://networkx.github.io/documentation/stable/reference/generated/networkx.drawing.layout.kamada_kawai_layout.html#networkx.drawing.layout.kamada_kawai_layout>`_.
Returns
-------
coordinates : pd.DataFrame
DataFrame containing the generated coordinates with
buses as index and ['x', 'y'] as columns.
Examples
--------
>>> autogenerate_coordinates(network)
>>> autogenerate_coordinates(network, assign=True, layouter=nx.circle_layout)
"""
G = n.graph()
if layouter is None:
is_planar = nx.check_planarity(G)[0]
if is_planar:
layouter = nx.planar_layout
else:
layouter = nx.kamada_kawai_layout
coordinates = pd.DataFrame(layouter(G)).T.rename({0: 'x', 1: 'y'}, axis=1)
if assign:
n.buses[['x', 'y']] = coordinates
return coordinates
def _get_coordinates(n, layouter=None):
if layouter is not None or n.buses[['x', 'y']].isin([np.nan, 0]).all().all():
coordinates = autogenerate_coordinates(n, layouter=layouter)
return coordinates["x"], coordinates["y"]
else:
return n.buses["x"], n.buses["y"]
_token_required_mb_styles = ['basic', 'streets', 'outdoors', 'light', 'dark',
'satellite', 'satellite-streets']
_open__mb_styles = ['open-street-map', 'white-bg', 'carto-positron',
'carto-darkmatter', 'stamen-terrain', 'stamen-toner',
'stamen-watercolor']
#This function was borne out of a breakout group at the October 2017
#Munich Open Energy Modelling Initiative Workshop to hack together a
#working example of plotly for networks, see:
#https://forum.openmod-initiative.org/t/breakout-group-on-visualising-networks-with-plotly/384/7
#We thank Bryn Pickering for holding the tutorial on plotly which
#inspired the breakout group and for contributing ideas to the iplot
#function below.
def iplot(n, fig=None, bus_colors='cadetblue', bus_alpha=1, bus_sizes=10,
bus_cmap=None, bus_colorbar=None, bus_text=None,
line_colors='rosybrown', link_colors='darkseagreen',
transformer_colors='orange', line_widths=3, link_widths=3,
transformer_widths=3, line_text=None, link_text=None,
transformer_text=None, layouter=None, title="", size=None,
branch_components=None, iplot=True, jitter=None, mapbox=False,
mapbox_style='open-street-map', mapbox_token="",mapbox_parameters={}):
"""
Plot the network buses and lines interactively using plotly.
Parameters
----------
fig : dict, default None
If not None, figure is built upon this fig.
bus_colors : dict/pandas.Series
Colors for the buses, defaults to "cadetblue". If bus_sizes is a
pandas.Series with a Multiindex, bus_colors defaults to the
n.carriers['color'] column.
bus_alpha : float
Adds alpha channel to buses, defaults to 1.
bus_sizes : float/pandas.Series
Sizes of bus points, defaults to 10.
bus_cmap : plt.cm.ColorMap/str
If bus_colors are floats, this color map will assign the colors
bus_colorbar : dict
Plotly colorbar, e.g. {'title' : 'my colorbar'}
bus_text : pandas.Series
Text for each bus, defaults to bus names
line_colors : str/pandas.Series
Colors for the lines, defaults to 'rosybrown'.
link_colors : str/pandas.Series
Colors for the links, defaults to 'darkseagreen'.
transfomer_colors : str/pandas.Series
Colors for the transfomer, defaults to 'orange'.
line_widths : dict/pandas.Series
Widths of lines, defaults to 1.5
link_widths : dict/pandas.Series
Widths of links, defaults to 1.5
transformer_widths : dict/pandas.Series
Widths of transformer, defaults to 1.5
line_text : pandas.Series
Text for lines, defaults to line names.
link_text : pandas.Series
Text for links, defaults to link names.
tranformer_text : pandas.Series
Text for transformers, defaults to transformer names.
layouter : networkx.drawing.layout function, default None
Layouting function from `networkx <https://networkx.github.io/>`_ which
overrules coordinates given in ``n.buses[['x','y']]``. See
`list <https://networkx.github.io/documentation/stable/reference/drawing.html#module-networkx.drawing.layout>`_
of available options.
title : string
Graph title
size : None|tuple
Tuple specifying width and height of figure; e.g. (width, heigh).
branch_components : list of str
Branch components to be plotted, defaults to Line and Link.
iplot : bool, default True
Automatically do an interactive plot of the figure.
jitter : None|float
Amount of random noise to add to bus positions to distinguish
overlapping buses
mapbox : bool, default False
Switch to use Mapbox.
mapbox_style : str, default 'open-street-map'
Define the mapbox layout style of the interactive plot. If this is set
to a mapbox layout, the argument ``mapbox_token`` must be a valid Mapbox
API access token.
Valid open layouts are:
open-street-map, white-bg, carto-positron, carto-darkmatter,
stamen-terrain, stamen-toner, stamen-watercolor
Valid mapbox layouts are:
basic, streets, outdoors, light, dark, satellite, satellite-streets
mapbox_token : string
Mapbox API access token. Obtain from https://www.mapbox.com.
Can also be included in mapbox_parameters as `accesstoken=mapbox_token`.
mapbox_parameters : dict
Configuration parameters of the Mapbox layout.
E.g. {"bearing": 5, "pitch": 10, "zoom": 1, "style": 'dark'}.
Returns
-------
fig: dictionary for plotly figure
"""
if fig is None:
fig = dict(data=[],layout={})
if bus_text is None:
bus_text = 'Bus ' + n.buses.index
x, y = _get_coordinates(n, layouter=layouter)
if jitter is not None:
x = x + np.random.uniform(low=-jitter, high=jitter, size=len(x))
y = y + np.random.uniform(low=-jitter, high=jitter, size=len(y))
bus_trace = dict(x=x, y=y,
text=bus_text,
type="scatter",
mode="markers",
hoverinfo="text",
opacity=bus_alpha,
marker=dict(color=bus_colors,
size=bus_sizes))
if bus_cmap is not None:
bus_trace['marker']['colorscale'] = bus_cmap
if bus_colorbar is not None:
bus_trace['marker']['colorbar'] = bus_colorbar
# Plot branches:
if isinstance(line_widths, pd.Series):
if isinstance(line_widths.index, pd.MultiIndex):
raise TypeError("Index of argument 'line_widths' is a Multiindex, "
"this is not support since pypsa v0.17. "
"Set differing widths with arguments 'line_widths', "
"'link_widths' and 'transformer_widths'.")
if isinstance(line_colors, pd.Series):
if isinstance(line_colors.index, pd.MultiIndex):
raise TypeError("Index of argument 'line_colors' is a Multiindex, "
"this is not support since pypsa v0.17. "
"Set differing colors with arguments 'line_colors', "
"'link_colors' and 'transformer_colors'.")
if branch_components is None:
branch_components = n.branch_components
branch_colors = {'Line': line_colors, 'Link': link_colors,
'Transformer': transformer_colors}
branch_widths = {'Line': line_widths, 'Link': link_widths,
'Transformer': transformer_widths}
branch_text = {'Line': line_text, 'Link': link_text,
'Transformer': transformer_text}
shapes = []
shape_traces = []
for c in n.iterate_components(branch_components):
b_widths = as_branch_series(branch_widths[c.name], 'width', c.name, n)
b_colors = as_branch_series(branch_colors[c.name], 'color', c.name, n)
b_text = branch_text[c.name]
if b_text is None:
b_text = c.name + ' ' + c.df.index
x0 = c.df.bus0.map(x)
x1 = c.df.bus1.map(x)
y0 = c.df.bus0.map(y)
y1 = c.df.bus1.map(y)
for b in c.df.index:
shapes.append(dict(type='line', opacity=0.8,
x0=x0[b], y0=y0[b], x1=x1[b], y1=y1[b],
line=dict(color=b_colors[b], width=b_widths[b])))
shape_traces.append(dict(x=0.5*(x0+x1), y=0.5*(y0+y1),
text=b_text, type="scatter",
mode="markers", hoverinfo="text",
marker=dict(opacity=0.)))
if mapbox:
shape_traces_latlon = []
for st in shape_traces:
st['lon'] = st.pop('x')
st['lat'] = st.pop('y')
shape_traces_latlon.append(go.Scattermapbox(st))
shape_traces = shape_traces_latlon
shapes_mapbox = []
for s in shapes:
s['lon'] = [s.pop('x0'), s.pop('x1')]
s['lat'] = [s.pop('y0'), s.pop('y1')]
shapes_mapbox.append(go.Scattermapbox(s, mode='lines'))
shapes = shapes_mapbox
bus_trace['lon'] = bus_trace.pop('x')
bus_trace['lat'] = bus_trace.pop('y')
bus_trace = go.Scattermapbox(bus_trace)
fig['data'].extend(shapes + shape_traces + [bus_trace])
else:
fig['data'].extend([bus_trace]+shape_traces)
fig['layout'].update(dict(title=title,
hovermode='closest',
showlegend=False))
if size is not None:
assert len(size) == 2, "Parameter size must specify a tuple (width, height)."
fig['layout'].update(dict(width=size[0],
height=size[1]))
if mapbox:
if mapbox_token != "":
mapbox_parameters['accesstoken'] = mapbox_token
mapbox_parameters.setdefault('style', mapbox_style)
if mapbox_parameters['style'] in _token_required_mb_styles:
assert 'accesstoken' in mapbox_parameters.keys(), ("Using Mapbox "
"layout styles requires a valid access token from https://www.mapbox.com/, "
f"style which do not require a token are:\n{', '.join(_open__mb_styles)}.")
if 'center' not in mapbox_parameters.keys():
lon=(n.buses.x.min() + n.buses.x.max()) / 2
lat=(n.buses.y.min() + n.buses.y.max()) / 2
mapbox_parameters['center'] = dict(lat=lat, lon=lon)
if 'zoom' not in mapbox_parameters.keys():
mapbox_parameters['zoom'] = 2
fig['layout']['mapbox'] = mapbox_parameters
else:
fig['layout']['shapes'] = shapes
if iplot:
if not pltly_present:
logger.warning("Plotly is not present, so interactive plotting won't work.")
else:
pltly.iplot(fig)
return fig
| gpl-3.0 |
lhilt/scipy | scipy/optimize/nonlin.py | 4 | 48289 | r"""
Nonlinear solvers
-----------------
.. currentmodule:: scipy.optimize
This is a collection of general-purpose nonlinear multidimensional
solvers. These solvers find *x* for which *F(x) = 0*. Both *x*
and *F* can be multidimensional.
Routines
~~~~~~~~
Large-scale nonlinear solvers:
.. autosummary::
newton_krylov
anderson
General nonlinear solvers:
.. autosummary::
broyden1
broyden2
Simple iterations:
.. autosummary::
excitingmixing
linearmixing
diagbroyden
Examples
~~~~~~~~
**Small problem**
>>> def F(x):
... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
>>> import scipy.optimize
>>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14)
>>> x
array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251])
>>> np.cos(x) + x[::-1]
array([ 1., 2., 3., 4.])
**Large problem**
Suppose that we needed to solve the following integrodifferential
equation on the square :math:`[0,1]\times[0,1]`:
.. math::
\nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
the square.
The solution can be found using the `newton_krylov` solver:
.. plot::
import numpy as np
from scipy.optimize import newton_krylov
from numpy import cosh, zeros_like, mgrid, zeros
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def residual(P):
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y - 10*cosh(P).mean()**2
# solve
guess = zeros((nx, ny), float)
sol = newton_krylov(residual, guess, method='lgmres', verbose=1)
print('Residual: %g' % abs(residual(sol)).max())
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.pcolor(x, y, sol)
plt.colorbar()
plt.show()
"""
# Copyright (C) 2009, Pauli Virtanen <[email protected]>
# Distributed under the same license as SciPy.
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from scipy._lib.six import callable, exec_, xrange
from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
from numpy import asarray, dot, vdot
import scipy.sparse.linalg
import scipy.sparse
from scipy.linalg import get_blas_funcs
import inspect
from scipy._lib._util import getargspec_no_self as _getargspec
from .linesearch import scalar_search_wolfe1, scalar_search_armijo
__all__ = [
'broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'newton_krylov']
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
class NoConvergence(Exception):
pass
def maxnorm(x):
return np.absolute(x).max()
def _as_inexact(x):
"""Return `x` as an array, of either floats or complex floats"""
x = asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
return asarray(x, dtype=np.float_)
return x
def _array_like(x, x0):
"""Return ndarray `x` as same array subclass and shape as `x0`"""
x = np.reshape(x, np.shape(x0))
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
return wrap(x)
def _safe_norm(v):
if not np.isfinite(v).all():
return np.array(np.inf)
return norm(v)
#------------------------------------------------------------------------------
# Generic nonlinear solver machinery
#------------------------------------------------------------------------------
_doc_parts = dict(
params_basic="""
F : function(x) -> f
Function whose root to find; should take and return an array-like
object.
xin : array_like
Initial guess for the solution
""".strip(),
params_extra="""
iter : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
verbose : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
f_tol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
f_rtol : float, optional
Relative tolerance for the residual. If omitted, not used.
x_tol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
x_rtol : float, optional
Relative minimum step size. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual.
Returns
-------
sol : ndarray
An array (of similar array type as `x0`) containing the final solution.
Raises
------
NoConvergence
When a solution was not found.
""".strip()
)
def _set_doc(obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__ % _doc_parts
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None,
full_output=False, raise_exception=True):
"""
Find a root of a function, in a way suitable for large-scale problems.
Parameters
----------
%(params_basic)s
jacobian : Jacobian
A Jacobian approximation: `Jacobian` object or something that
`asjacobian` can transform to one. Alternatively, a string specifying
which of the builtin Jacobian approximations to use:
krylov, broyden1, broyden2, anderson
diagbroyden, linearmixing, excitingmixing
%(params_extra)s
full_output : bool
If true, returns a dictionary `info` containing convergence
information.
raise_exception : bool
If True, a `NoConvergence` exception is raise if no solution is found.
See Also
--------
asjacobian, Jacobian
Notes
-----
This algorithm implements the inexact Newton method, with
backtracking or full line searches. Several Jacobian
approximations are available, including Krylov and Quasi-Newton
methods.
References
----------
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
Equations\". Society for Industrial and Applied Mathematics. (1995)
https://archive.siam.org/books/kelley/fr16/
"""
# Can't use default parameters because it's being explicitly passed as None
# from the calling function, so we need to set it here.
tol_norm = maxnorm if tol_norm is None else tol_norm
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
x_tol=x_tol, x_rtol=x_rtol,
iter=iter, norm=tol_norm)
x0 = _as_inexact(x0)
func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
x = x0.flatten()
dx = np.inf
Fx = func(x)
Fx_norm = norm(Fx)
jacobian = asjacobian(jacobian)
jacobian.setup(x.copy(), Fx, func)
if maxiter is None:
if iter is not None:
maxiter = iter + 1
else:
maxiter = 100*(x.size+1)
if line_search is True:
line_search = 'armijo'
elif line_search is False:
line_search = None
if line_search not in (None, 'armijo', 'wolfe'):
raise ValueError("Invalid line search")
# Solver tolerance selection
gamma = 0.9
eta_max = 0.9999
eta_treshold = 0.1
eta = 1e-3
for n in xrange(maxiter):
status = condition.check(Fx, x, dx)
if status:
break
# The tolerance, as computed for scipy.sparse.linalg.* routines
tol = min(eta, eta*Fx_norm)
dx = -jacobian.solve(Fx, tol=tol)
if norm(dx) == 0:
raise ValueError("Jacobian inversion yielded zero vector. "
"This indicates a bug in the Jacobian "
"approximation.")
# Line search, or Newton step
if line_search:
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
line_search)
else:
s = 1.0
x = x + dx
Fx = func(x)
Fx_norm_new = norm(Fx)
jacobian.update(x.copy(), Fx)
if callback:
callback(x, Fx)
# Adjust forcing parameters for inexact methods
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
if gamma * eta**2 < eta_treshold:
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma*eta**2))
Fx_norm = Fx_norm_new
# Print status
if verbose:
sys.stdout.write("%d: |F(x)| = %g; step %g\n" % (
n, tol_norm(Fx), s))
sys.stdout.flush()
else:
if raise_exception:
raise NoConvergence(_array_like(x, x0))
else:
status = 2
if full_output:
info = {'nit': condition.iteration,
'fun': Fx,
'status': status,
'success': status == 1,
'message': {1: 'A solution was found at the specified '
'tolerance.',
2: 'The maximum number of iterations allowed '
'has been reached.'
}[status]
}
return _array_like(x, x0), info
else:
return _array_like(x, x0)
_set_doc(nonlin_solve)
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
smin=1e-2):
tmp_s = [0]
tmp_Fx = [Fx]
tmp_phi = [norm(Fx)**2]
s_norm = norm(x) / norm(dx)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0]
xt = x + s*dx
v = func(xt)
p = _safe_norm(v)**2
if store:
tmp_s[0] = s
tmp_phi[0] = p
tmp_Fx[0] = v
return p
def derphi(s):
ds = (abs(s) + s_norm + 1) * rdiff
return (phi(s+ds, store=False) - phi(s)) / ds
if search_type == 'wolfe':
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
xtol=1e-2, amin=smin)
elif search_type == 'armijo':
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
amin=smin)
if s is None:
# XXX: No suitable step length found. Take the full Newton step,
# and hope for the best.
s = 1.0
x = x + s*dx
if s == tmp_s[0]:
Fx = tmp_Fx[0]
else:
Fx = func(x)
Fx_norm = norm(Fx)
return s, x, Fx, Fx_norm
class TerminationCondition(object):
"""
Termination condition for an iteration. It is terminated if
- |F| < f_rtol*|F_0|, AND
- |F| < f_tol
AND
- |dx| < x_rtol*|x|, AND
- |dx| < x_tol
"""
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
iter=None, norm=maxnorm):
if f_tol is None:
f_tol = np.finfo(np.float_).eps ** (1./3)
if f_rtol is None:
f_rtol = np.inf
if x_tol is None:
x_tol = np.inf
if x_rtol is None:
x_rtol = np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
self.norm = norm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return 1
if self.iter is not None:
# backwards compatibility with SciPy 0.6.0
return 2 * (self.iteration > self.iter)
# NB: condition must succeed for rtol=inf even if norm == 0
return int((f_norm <= self.f_tol
and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol
and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian(object):
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc. algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(x, F)
class InverseJacobian(object):
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# Autoscale the initial Jacobian parameter
# unless we have already guessed the solution.
normf0 = norm(f0)
if normf0:
self.alpha = 0.5*max(norm(x0), 1) / normf0
else:
self.alpha = 1.0
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix(object):
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [1]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q = max(0, min(q, p-1))
m = len(self.cs)
if m < p:
# nothing to do
return
C = np.array(self.cs).T
D = np.array(self.ds).T
D, R = qr(D, mode='economic')
C = dot(C, R.T.conj())
U, S, WH = svd(C, full_matrices=False, compute_uv=True)
C = dot(C, inv(WH))
D = dot(D, WH.T.conj())
for k in xrange(q):
self.cs[k] = C[:,k].copy()
self.ds[k] = D[:,k].copy()
del self.cs[q:]
del self.ds[q:]
_doc_parts['broyden_params'] = """
alpha : float, optional
Initial guess for the Jacobian is ``(-1/alpha)``.
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden matrix
stays low. Can either be a string giving the name of the method,
or a tuple of the form ``(method, param1, param2, ...)``
that gives the name of the method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no extra parameters.
- ``simple``: drop oldest matrix column. Has no extra parameters.
- ``svd``: keep only the most significant SVD components.
Takes an extra parameter, ``to_retain``, which determines the
number of SVD components to retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
""".strip()
class BroydenFirst(GenericBroyden):
r"""
Find a root of a function, using Broyden's first Jacobian approximation.
This method is also known as \"Broyden's good method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='broyden1'`` in particular.
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
which corresponds to Broyden's first Jacobian update
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
GenericBroyden.__init__(self)
self.alpha = alpha
self.Gm = None
if max_rank is None:
max_rank = np.inf
self.max_rank = max_rank
if isinstance(reduction_method, str):
reduce_params = ()
else:
reduce_params = reduction_method[1:]
reduction_method = reduction_method[0]
reduce_params = (max_rank - 1,) + reduce_params
if reduction_method == 'svd':
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
elif reduction_method == 'simple':
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
elif reduction_method == 'restart':
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
else:
raise ValueError("Unknown rank reduction method '%s'" %
reduction_method)
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
def todense(self):
return inv(self.Gm)
def solve(self, f, tol=0):
r = self.Gm.matvec(f)
if not np.isfinite(r).all():
# singular; reset the Jacobian approximation
self.setup(self.last_x, self.last_f, self.func)
return self.Gm.matvec(f)
def matvec(self, f):
return self.Gm.solve(f)
def rsolve(self, f, tol=0):
return self.Gm.rmatvec(f)
def rmatvec(self, f):
return self.Gm.rsolve(f)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = self.Gm.rmatvec(dx)
c = dx - self.Gm.matvec(df)
d = v / vdot(df, v)
self.Gm.append(c, d)
class BroydenSecond(BroydenFirst):
"""
Find a root of a function, using Broyden\'s second Jacobian approximation.
This method is also known as \"Broyden's bad method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='broyden2'`` in particular.
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df)
corresponding to Broyden's second method.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = df
c = dx - self.Gm.matvec(df)
d = v / df_norm**2
self.Gm.append(c, d)
#------------------------------------------------------------------------------
# Broyden-like (restricted memory)
#------------------------------------------------------------------------------
class Anderson(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='anderson'`` in particular.
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, ie.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in xrange(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in xrange(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
class DiagBroyden(GenericBroyden):
"""
Find a root of a function, using diagonal Broyden Jacobian approximation.
The Jacobian approximation is derived from previous iterations, by
retaining only the diagonal of Broyden matrices.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='diagbroyden'`` in particular.
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.d = np.full((self.shape[0],), 1 / self.alpha, dtype=self.dtype)
def solve(self, f, tol=0):
return -f / self.d
def matvec(self, f):
return -f * self.d
def rsolve(self, f, tol=0):
return -f / self.d.conj()
def rmatvec(self, f):
return -f * self.d.conj()
def todense(self):
return np.diag(-self.d)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self.d -= (df + self.d*dx)*dx/dx_norm**2
class LinearMixing(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='linearmixing'`` in particular.
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(np.full(self.shape[0], -1/self.alpha))
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
class ExcitingMixing(GenericBroyden):
"""
Find a root of a function, using a tuned diagonal Jacobian approximation.
The Jacobian matrix is diagonal and is tuned on each iteration.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='excitingmixing'`` in particular.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
%(params_extra)s
"""
def __init__(self, alpha=None, alphamax=1.0):
GenericBroyden.__init__(self)
self.alpha = alpha
self.alphamax = alphamax
self.beta = None
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.beta = np.full((self.shape[0],), self.alpha, dtype=self.dtype)
def solve(self, f, tol=0):
return -f*self.beta
def matvec(self, f):
return -f/self.beta
def rsolve(self, f, tol=0):
return -f*self.beta.conj()
def rmatvec(self, f):
return -f/self.beta.conj()
def todense(self):
return np.diag(-1/self.beta)
def _update(self, x, f, dx, df, dx_norm, df_norm):
incr = f*self.last_f > 0
self.beta[incr] += self.alpha
self.beta[~incr] = self.alpha
np.clip(self.beta, 0, self.alphamax, out=self.beta)
#------------------------------------------------------------------------------
# Iterative/Krylov approximated Jacobians
#------------------------------------------------------------------------------
class KrylovJacobian(Jacobian):
r"""
Find a root of a function, using Krylov approximation for inverse Jacobian.
This method is suitable for solving large-scale problems.
Parameters
----------
%(params_basic)s
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same interface as
the iterative solvers in `scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> from scipy.optimize.nonlin import BroydenFirst, KrylovJacobian
>>> from scipy.optimize.nonlin import InverseJacobian
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac))
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the \"inner\" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='krylov'`` in particular.
scipy.sparse.linalg.gmres
scipy.sparse.linalg.lgmres
Notes
-----
This function implements a Newton-Krylov solver. The basic idea is
to compute the inverse of the Jacobian with an iterative Krylov
method. These methods require only evaluating the Jacobian-vector
products, which are conveniently approximated by a finite difference:
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
Due to the use of iterative matrix inverses, these methods can
deal with large nonlinear problems.
SciPy's `scipy.sparse.linalg` module offers a selection of Krylov
solvers to choose from. The default here is `lgmres`, which is a
variant of restarted GMRES iteration that reuses some of the
information obtained in the previous Newton steps to invert
Jacobians in subsequent steps.
For a review on Newton-Krylov methods, see for example [1]_,
and for the LGMRES sparse inverse method, see [2]_.
References
----------
.. [1] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004).
:doi:`10.1016/j.jcp.2003.08.010`
.. [2] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
:doi:`10.1137/S0895479803422014`
"""
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
inner_M=None, outer_k=10, **kw):
self.preconditioner = inner_M
self.rdiff = rdiff
self.method = dict(
bicgstab=scipy.sparse.linalg.bicgstab,
gmres=scipy.sparse.linalg.gmres,
lgmres=scipy.sparse.linalg.lgmres,
cgs=scipy.sparse.linalg.cgs,
minres=scipy.sparse.linalg.minres,
).get(method, method)
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
if self.method is scipy.sparse.linalg.gmres:
# Replace GMRES's outer iteration with Newton steps
self.method_kw['restrt'] = inner_maxiter
self.method_kw['maxiter'] = 1
self.method_kw.setdefault('atol', 0)
elif self.method is scipy.sparse.linalg.gcrotmk:
self.method_kw.setdefault('atol', 0)
elif self.method is scipy.sparse.linalg.lgmres:
self.method_kw['outer_k'] = outer_k
# Replace LGMRES's outer iteration with Newton steps
self.method_kw['maxiter'] = 1
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
self.method_kw.setdefault('outer_v', [])
self.method_kw.setdefault('prepend_outer_v', True)
# But don't carry the corresponding Jacobian*v products, in case
# the Jacobian changes a lot in the nonlinear step
#
# XXX: some trust-region inspired ideas might be more efficient...
# See eg. Brown & Saad. But needs to be implemented separately
# since it's not an inexact Newton method.
self.method_kw.setdefault('store_outer_Av', False)
self.method_kw.setdefault('atol', 0)
for key, value in kw.items():
if not key.startswith('inner_'):
raise ValueError("Unknown parameter %s" % key)
self.method_kw[key[6:]] = value
def _update_diff_step(self):
mx = abs(self.x0).max()
mf = abs(self.f0).max()
self.omega = self.rdiff * max(1, mx) / max(1, mf)
def matvec(self, v):
nv = norm(v)
if nv == 0:
return 0*v
sc = self.omega / nv
r = (self.func(self.x0 + sc*v) - self.f0) / sc
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
raise ValueError('Function returned non-finite results')
return r
def solve(self, rhs, tol=0):
if 'tol' in self.method_kw:
sol, info = self.method(self.op, rhs, **self.method_kw)
else:
sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
return sol
def update(self, x, f):
self.x0 = x
self.f0 = f
self._update_diff_step()
# Update also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'update'):
self.preconditioner.update(x, f)
def setup(self, x, f, func):
Jacobian.setup(self, x, f, func)
self.x0 = x
self.f0 = f
self.op = scipy.sparse.linalg.aslinearoperator(self)
if self.rdiff is None:
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
self._update_diff_step()
# Setup also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'setup'):
self.preconditioner.setup(x, f, func)
#------------------------------------------------------------------------------
# Wrapper functions
#------------------------------------------------------------------------------
def _nonlin_wrapper(name, jac):
"""
Construct a solver wrapper with given name and jacobian approx.
It inspects the keyword arguments of ``jac.__init__``, and allows to
use the same arguments in the wrapper function, in addition to the
keyword arguments of `nonlin_solve`
"""
args, varargs, varkw, defaults = _getargspec(jac.__init__)
kwargs = list(zip(args[-len(defaults):], defaults))
kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
if kw_str:
kw_str = ", " + kw_str
kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
if kwkw_str:
kwkw_str = kwkw_str + ", "
# Construct the wrapper function so that its keyword arguments
# are visible in pydoc.help etc.
wrapper = """
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None, **kw):
jac = %(jac)s(%(kwkw)s **kw)
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
callback)
"""
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
kwkw=kwkw_str)
ns = {}
ns.update(globals())
exec_(wrapper, ns)
func = ns[name]
func.__doc__ = jac.__doc__
_set_doc(func)
return func
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
anderson = _nonlin_wrapper('anderson', Anderson)
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
| bsd-3-clause |
darcamo/pyphysim | tests/simulations_package_test.py | 1 | 131568 | #!/usr/bin/env python
# pylint: disable=E1101,E1103,W0403
"""
Tests for the modules in the simulations package.
Each module has doctests for its functions and all we need to do is run all
of them.
"""
import doctest
import glob
import json
import os
import sys
import unittest
from copy import copy
from itertools import repeat
from pathlib import Path
import numpy as np
from pyphysim import progressbar
from pyphysim.simulations import (configobjvalidation, parameters, results,
runner, simulationhelpers)
# noinspection PyProtectedMember
from pyphysim.simulations.configobjvalidation import (
_parse_float_range_expr, integer_scalar_or_integer_numpy_array_check,
real_scalar_or_real_numpy_array_check)
from pyphysim.simulations.parameters import (SimulationParameters,
combine_simulation_parameters)
from pyphysim.simulations.results import (Result, SimulationResults,
combine_simulation_results)
from pyphysim.simulations.runner import (SimulationRunner, SkipThisOne,
get_common_parser)
from pyphysim.util.misc import calc_confidence_interval
try:
# noinspection PyUnresolvedReferences
from ipyparallel import CompositeError
_IPYTHON_AVAILABLE = True
except ImportError: # pragma: no cover
_IPYTHON_AVAILABLE = False
try: # pragma: nocover
# noinspection PyUnresolvedReferences
from pandas import DataFrame
assert DataFrame # Avoid unused import warning for DataFrame
_PANDAS_AVAILABLE = True
except ImportError: # pragma: nocover
_PANDAS_AVAILABLE = False
def delete_file_if_possible(filename):
"""
Try to delete the file with name `filename`.
Parameters
----------
filename : str
The name of the file to be removed.
"""
try:
os.remove(filename)
except OSError: # pragma: no cover
pass
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxx Doctests xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# noinspection PyMethodMayBeStatic
class SimulationsDoctestsTestCase(unittest.TestCase):
"""
Test case that run all the doctests in the modules of the simulations
package.
"""
def test_configobjvalidation(self) -> None:
"""Run configobjvalidation doctests"""
doctest.testmod(configobjvalidation)
def test_parameters(self) -> None:
"""Run parameters doctests"""
doctest.testmod(parameters)
def test_progressbar(self) -> None:
"""Run progressbar doctests"""
doctest.testmod(progressbar)
def test_results(self) -> None:
"""Run results doctests"""
doctest.testmod(results)
def test_simulationhelpers(self) -> None:
"""Run simulationhelpers doctests"""
doctest.testmod(simulationhelpers)
def test_runner(self) -> None:
"""Run runner doctests"""
doctest.testmod(runner)
class SimulationHelpersTestCase(unittest.TestCase):
def test_get_common_parser(self) -> None:
p = get_common_parser()
p2 = get_common_parser()
self.assertTrue(p is p2)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxx configobjvalidation Module xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# noinspection PyUnboundLocalVariable
class ConfigobjvalidationModuleFunctionsTestCase(unittest.TestCase):
"""
Unit-tests for the module functions in the in the configobjvalidation
module.
"""
def setUp(self) -> None:
"""Called before each test."""
pass
def test_parse_range_expr(self) -> None:
try:
# noinspection PyUnresolvedReferences
import validate
except ImportError: # pragma: no cover
self.skipTest("test_parse_range_expr - "
"The validate module is not installed")
expr = "10:15"
expected_parsed_expr = np.r_[10:15]
parsed_expr = _parse_float_range_expr(expr)
np.testing.assert_array_almost_equal(expected_parsed_expr, parsed_expr)
expr = "10:2:15"
expected_parsed_expr = np.r_[10:15:2]
parsed_expr = _parse_float_range_expr(expr)
np.testing.assert_array_almost_equal(expected_parsed_expr, parsed_expr)
expr = "-3.4:0.5:5"
expected_parsed_expr = np.r_[-3.4:5.0:0.5]
parsed_expr = _parse_float_range_expr(expr)
np.testing.assert_array_almost_equal(expected_parsed_expr, parsed_expr)
# xxxxx Test invalid values xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
expr = "a string"
with self.assertRaises(validate.VdtTypeError):
_parse_float_range_expr(expr)
expr = "10,5"
with self.assertRaises(validate.VdtTypeError):
_parse_float_range_expr(expr)
expr = "10.5."
with self.assertRaises(validate.VdtTypeError):
_parse_float_range_expr(expr)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Note: Since the "real_scalar_or_real_numpy_array_check" function will
# call the "real_numpy_array_check" function we only need a test case
# for the "real_scalar_or_real_numpy_array_check" function.
def test_real_scalar_or_real_numpy_array_check(self) -> None:
try:
# noinspection PyUnresolvedReferences
import validate
except ImportError: # pragma: no cover
self.skipTest("test_real_scalar_or_real_numpy_array_check - "
"The validate module is not installed")
# xxxxxxxxxx Try to parse float scalar values xxxxxxxxxxxxxxxxxxxxx
value = "4.6"
expected_parsed_value = 4.6
self.assertAlmostEqual(real_scalar_or_real_numpy_array_check(value),
expected_parsed_value)
self.assertTrue(
isinstance(real_scalar_or_real_numpy_array_check(value), float))
value = "76.21"
expected_parsed_value = 76.21
self.assertAlmostEqual(real_scalar_or_real_numpy_array_check(value),
expected_parsed_value)
self.assertTrue(
isinstance(real_scalar_or_real_numpy_array_check(value), float))
# Test validation against the minimum and maximum allowed value
value = "5.7"
with self.assertRaises(validate.VdtValueTooSmallError):
real_scalar_or_real_numpy_array_check(value, min=10.0)
with self.assertRaises(validate.VdtValueTooBigError):
real_scalar_or_real_numpy_array_check(value, max=5.0)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Now we will parse range expressions xxxxxxxxxxxxxxxxxx
# Test when the input is a list of strings (with the numbers)
list_of_strings = ['0', '6', '17']
parsed_array = real_scalar_or_real_numpy_array_check(list_of_strings,
min=0,
max=30)
expected_parsed_array = np.array([0., 6., 17.])
self.assertTrue(np.array(parsed_array).dtype is np.dtype('float'))
np.testing.assert_array_almost_equal(parsed_array,
expected_parsed_array)
# Test when the input is a string representation of a list with
# numbers and range expressions.
array_string = "[0 5 10:15]"
parsed_array = real_scalar_or_real_numpy_array_check(array_string,
min=0,
max=30)
expected_parsed_array = np.array([0., 5., 10., 11., 12., 13., 14.])
self.assertTrue(np.array(parsed_array).dtype is np.dtype('float'))
np.testing.assert_array_almost_equal(parsed_array,
expected_parsed_array)
array_string = "10:15"
parsed_array = real_scalar_or_real_numpy_array_check(array_string,
min=0,
max=30)
expected_parsed_array = np.array([10., 11., 12., 13., 14.])
self.assertTrue(np.array(parsed_array).dtype is np.dtype('float'))
np.testing.assert_array_almost_equal(parsed_array,
expected_parsed_array)
array_string = "[10:15]"
parsed_array = real_scalar_or_real_numpy_array_check(array_string,
min=0,
max=30)
expected_parsed_array = np.array([10., 11., 12., 13., 14.])
self.assertTrue(np.array(parsed_array).dtype is np.dtype('float'))
np.testing.assert_array_almost_equal(parsed_array,
expected_parsed_array)
array_string = "[0,5,10:15,20]"
parsed_array = real_scalar_or_real_numpy_array_check(array_string,
min=0,
max=30)
expected_parsed_array = np.array(
[0., 5., 10., 11., 12., 13., 14., 20.])
self.assertTrue(np.array(parsed_array).dtype is np.dtype('float'))
np.testing.assert_array_almost_equal(parsed_array,
expected_parsed_array)
# xxxxx Test validation against the minimum allowed value xxxxxxxxx
array_string = "[0,5,10:15,20]"
with self.assertRaises(validate.VdtValueTooSmallError):
real_scalar_or_real_numpy_array_check(array_string, min=4, max=30)
# xxxxx Test validation against the minimum allowed value xxxxxxxxx
with self.assertRaises(validate.VdtValueTooBigError):
real_scalar_or_real_numpy_array_check(array_string, min=0, max=15)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Note: Since the "integer_scalar_or_integer_numpy_array_check"
# function will call the "integer_numpy_array_check" function we only
# need a test case for the
# "integer_scalar_or_integer_numpy_array_check" function.
def test_integer_scalar_or_integer_numpy_array_check(self) -> None:
try:
# noinspection PyUnresolvedReferences
import validate
except ImportError: # pragma: no cover
self.skipTest("test_integer_scalar_or_integer_numpy_array_check - "
"The validate module is not installed")
# xxxxxxxxxx Try to parse float scalar values xxxxxxxxxxxxxxxxxxxxx
value = "4"
expected_parsed_value = 4
self.assertAlmostEqual(
integer_scalar_or_integer_numpy_array_check(value),
expected_parsed_value)
self.assertTrue(
isinstance(integer_scalar_or_integer_numpy_array_check(value),
int))
value = "76"
expected_parsed_value = 76
self.assertAlmostEqual(
integer_scalar_or_integer_numpy_array_check(value),
expected_parsed_value)
self.assertTrue(
isinstance(integer_scalar_or_integer_numpy_array_check(value),
int))
# Test validation against the minimum and maximum allowed value
value = "6"
with self.assertRaises(validate.VdtValueTooSmallError):
integer_scalar_or_integer_numpy_array_check(value, min=10)
with self.assertRaises(validate.VdtValueTooBigError):
integer_scalar_or_integer_numpy_array_check(value, max=5)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Now we will parse range expressions xxxxxxxxxxxxxxxxxx
# test when the input is a list of strings
list_of_strings = ['0', '6', '17']
parsed_array = integer_scalar_or_integer_numpy_array_check(
list_of_strings, min=0, max=30)
expected_parsed_array = np.array([0, 6, 17])
self.assertTrue(np.array(parsed_array).dtype is np.dtype('int'))
np.testing.assert_array_equal(parsed_array, expected_parsed_array)
array_string = "[0 5 10:15]"
parsed_array = integer_scalar_or_integer_numpy_array_check(
array_string, min=0, max=30)
expected_parsed_array = np.array([0, 5, 10, 11, 12, 13, 14])
self.assertTrue(np.array(parsed_array).dtype is np.dtype('int'))
np.testing.assert_array_equal(parsed_array, expected_parsed_array)
array_string = "10:15"
parsed_array = integer_scalar_or_integer_numpy_array_check(
array_string, min=0, max=30)
expected_parsed_array = np.array([10, 11, 12, 13, 14])
self.assertTrue(np.array(parsed_array).dtype is np.dtype('int'))
np.testing.assert_array_equal(parsed_array, expected_parsed_array)
array_string = "[10:15]"
parsed_array = integer_scalar_or_integer_numpy_array_check(
array_string, min=0, max=30)
expected_parsed_array = np.array([10, 11, 12, 13, 14])
self.assertTrue(np.array(parsed_array).dtype is np.dtype('int'))
np.testing.assert_array_equal(parsed_array, expected_parsed_array)
array_string = "[0,5,10:15,20]"
parsed_array = integer_scalar_or_integer_numpy_array_check(
array_string, min=0, max=30)
expected_parsed_array = np.array([0, 5, 10, 11, 12, 13, 14, 20])
self.assertTrue(np.array(parsed_array).dtype is np.dtype('int'))
np.testing.assert_array_equal(parsed_array, expected_parsed_array)
# xxxxx Test validation against the minimum allowed value xxxxxxxxx
array_string = "[0,5,10:15,20]"
with self.assertRaises(validate.VdtValueTooSmallError):
integer_scalar_or_integer_numpy_array_check(array_string,
min=4,
max=30)
with self.assertRaises(validate.VdtValueTooBigError):
integer_scalar_or_integer_numpy_array_check(array_string,
min=0,
max=15)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxx Parameters Module xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
class ParametersModuleFunctionsTestCase(unittest.TestCase):
"""
Unit-tests for the functions in the parameters module.
"""
def setUp(self) -> None:
"""Called before each test."""
pass
def test_combine_simulation_parameters(self) -> None:
sim_params1 = SimulationParameters.create({
'first':
10,
'second':
20,
'third':
np.array([1, 3, 2, 5]),
'fourth': ['A', 'B']
})
sim_params1.set_unpack_parameter('third')
sim_params1.set_unpack_parameter('fourth')
sim_params2 = SimulationParameters.create({
'first':
10,
'second':
20,
'third':
np.array([-1, 1, 3, 8])
})
sim_params2.set_unpack_parameter('third')
sim_params3 = SimulationParameters.create({
'first':
10,
'third':
np.array([-1, 1, 3, 8]),
'fourth': ['B', 'C']
})
sim_params3.set_unpack_parameter('third')
sim_params3.set_unpack_parameter('fourth')
sim_params4 = SimulationParameters.create({
'first':
10,
'second':
30,
'third':
np.array([-1, 1, 3, 8]),
'fourth': ['B', 'C']
})
sim_params4.set_unpack_parameter('third')
sim_params4.set_unpack_parameter('fourth')
sim_params5 = SimulationParameters.create({
'first':
10,
'second':
20,
'third':
np.array([-1, 1, 3, 8]),
'fourth': ['B', 'C']
})
sim_params5.set_unpack_parameter('fourth')
sim_params6 = SimulationParameters.create({
'first':
10,
'second':
20,
'third':
np.array([-1, 1, 3, 8]),
'fourth': ['B', 'C']
})
sim_params6.set_unpack_parameter('third')
sim_params6.set_unpack_parameter('fourth')
# xxxxxxxxxx Test invalid cases xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
with self.assertRaises(RuntimeError):
# sim_params2 does not have the same parameters of sim_params1
combine_simulation_parameters(sim_params1, sim_params2)
with self.assertRaises(RuntimeError):
# sim_params3 does not have the same parameters of sim_params1
combine_simulation_parameters(sim_params1, sim_params3)
with self.assertRaises(RuntimeError):
# sim_params4 has the same parameters of sim_params1, but the
# value of one of the fixed parameters is different
combine_simulation_parameters(sim_params1, sim_params4)
with self.assertRaises(RuntimeError):
# sim_params4 has the same parameters of sim_params1, but the
# parameters set to be unpacked are different
combine_simulation_parameters(sim_params1, sim_params5)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Test the valid case xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
union = combine_simulation_parameters(sim_params1, sim_params6)
self.assertEqual(union['first'], sim_params1['first'])
self.assertEqual(union['second'], sim_params1['second'])
np.testing.assert_array_almost_equal(union['third'],
np.array([-1, 1, 2, 3, 5, 8]))
np.testing.assert_array_equal(union['fourth'],
np.array(['A', 'B', 'C']))
self.assertEqual(set(union.unpacked_parameters), {'third', 'fourth'})
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
class SimulationParametersTestCase(unittest.TestCase):
"""
Unit-tests for the SimulationParameters class in the parameters module.
"""
def setUp(self) -> None:
params_dict = {'first': 10, 'second': 20}
self.sim_params = SimulationParameters.create(params_dict)
def test_create(self) -> None:
# The create method was already called in the setUp.
self.assertEqual(len(self.sim_params), 2)
self.assertEqual(self.sim_params['first'], 10)
self.assertEqual(self.sim_params['second'], 20)
def test_add(self) -> None:
self.sim_params.add('third', np.array([1, 3, 2, 5]))
self.assertEqual(len(self.sim_params), 3)
np.testing.assert_array_equal(self.sim_params['third'],
np.array([1, 3, 2, 5]))
# We can also add a parameter with __setitem__
self.sim_params["fourth"] = 17.5
self.assertEqual(len(self.sim_params), 4)
np.testing.assert_array_equal(self.sim_params['fourth'], 17.5)
def test_unpacking_parameters(self) -> None:
self.sim_params.add('third', np.array([1, 3, 2, 5]))
self.sim_params.add('fourth', ['A', 'B'])
self.assertEqual(self.sim_params.get_num_unpacked_variations(), 1)
# Test the correct static parameters (should be all parameters for
# now)
self.assertEqual(set(self.sim_params.fixed_parameters),
{'first', 'second', 'third', 'fourth'})
# Let's unpack the parameters 'third' and 'fourth'
self.sim_params.set_unpack_parameter('third')
self.sim_params.set_unpack_parameter('fourth')
# One unpacked param with four values and other with two will give
# us 4x2=8 unpacked variations. Let's test the
# get_num_unpacked_variations() method.
self.assertEqual(self.sim_params.get_num_unpacked_variations(), 8)
# We make the unpacked_parameters and the expected value sets
# because the order does not matter
self.assertEqual(set(self.sim_params.unpacked_parameters),
{'third', 'fourth'})
# We may have 8 variations, but there are still only 4 parameters
self.assertEqual(len(self.sim_params), 4)
# Test the correct static parameters
self.assertEqual(set(self.sim_params.fixed_parameters),
{'first', 'second'})
# Test if an exception is raised if we try to set a non iterable
# parameter to be unpacked.
self.sim_params.add('fifth', 10)
with self.assertRaises(ValueError):
self.sim_params.set_unpack_parameter('fifth')
# Test if an exception is thrown if we try to set a non existing
# parameter to be unset.
with self.assertRaises(ValueError):
self.sim_params.set_unpack_parameter('sixth')
# xxxxx THIS TEST WILL NOT BE PERFORMED IN PYTHON 3 xxxxxxxxxxxxxxx
if sys.version_info[0] < 3:
# Now that a few parameters were added and set to be unpacked,
# lets test the representation of the SimulationParameters
# object. Note that the parameters that are marked for
# unpacking have '*' appended to their name.
self.assertEqual(
self.sim_params.__repr__(),
"{'second': 20, 'fifth': 10, 'fourth*': ['A', 'B'], "
"'third*': [1 3 2 5], 'first': 10}")
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Test if we can unset a parameter that was previously set to be
# unpacked.
self.sim_params.set_unpack_parameter('fourth', False)
self.assertEqual(set(self.sim_params.unpacked_parameters), {'third'})
def test_remove(self) -> None:
self.sim_params.add('third', np.array([1, 3, 2, 5]))
self.sim_params.add('fourth', ['A', 'B'])
self.sim_params.add('fifth', ['Z', 'W', 'Y'])
self.sim_params.set_unpack_parameter('third')
self.sim_params.set_unpack_parameter('fourth')
# Remove parameters second and third
self.sim_params.remove('second')
# Note that this parameter was marked to be unpacked
self.sim_params.remove('third')
expected_parameters = {
'first': 10,
'fourth': ['A', 'B'],
'fifth': ['Z', 'W', 'Y']
}
expected_unpacked_parameters = {'fourth'}
self.assertEqual(self.sim_params.parameters, expected_parameters)
self.assertEqual(set(self.sim_params.unpacked_parameters),
expected_unpacked_parameters)
def test_equal_and_not_equal_operators(self) -> None:
other = SimulationParameters()
self.assertFalse(self.sim_params == other)
self.assertTrue(self.sim_params != other)
other.add('first', 10)
other.add('second', 20)
self.assertTrue(self.sim_params == other)
self.assertFalse(self.sim_params != other)
self.sim_params.add('third', np.array([1, 3, 2, 5]))
self.assertFalse(self.sim_params == other)
other.add('third', np.array([1, 3, 2, 5]))
self.assertTrue(self.sim_params == other)
self.sim_params.set_unpack_parameter('third')
self.assertFalse(self.sim_params == other)
other.set_unpack_parameter('third')
self.assertTrue(self.sim_params == other)
other.parameters['third'][2] = 10
self.assertFalse(self.sim_params == other)
self.sim_params.parameters['third'][2] = 10
self.assertTrue(self.sim_params == other)
# The rep_max parameter is not considering when testing if two
# SimulationParameters objects are equal or not.
other.add('rep_max', 30)
self.sim_params.add('rep_max', 40)
self.assertTrue(self.sim_params == other)
# Test comparison with something of a different class
# noinspection PyTypeChecker
self.assertFalse(self.sim_params == 10.4)
# Test if two objects are different if the _unpack_index value is
# different
a = SimulationParameters()
a.add('first', [10, 20, 30])
a.add('second', [60, 20, 0])
a.set_unpack_parameter('first')
b = SimulationParameters()
b.add('first', [10, 20, 30])
b.add('second', [60, 20, 0])
b.set_unpack_parameter('first')
self.assertTrue(a == b)
a._unpack_index = 1
b._unpack_index = 3
self.assertFalse(a == b)
def test_repr(self) -> None:
self.sim_params.add('third', np.array([1, 3, 2, 5]))
self.sim_params.set_unpack_parameter("third")
self.assertEqual(repr(self.sim_params),
"{'first': 10, 'second': 20, 'third*': [1 3 2 5]}")
def test_get_unpacked_params_list(self) -> None:
self.sim_params.add('third', np.array([1, 3, 2, 5]))
self.sim_params.add('fourth', ['A', 'B'])
unpacked_param_list = self.sim_params.get_unpacked_params_list()
self.assertEqual(unpacked_param_list, [self.sim_params])
self.sim_params.set_unpack_parameter('third')
self.sim_params.set_unpack_parameter('fourth')
params_dict = {'first': [], 'second': [], 'third': [], 'fourth': []}
unpacked_param_list = self.sim_params.get_unpacked_params_list()
for i in unpacked_param_list:
# This will add value multiple times when it shouldn't
params_dict['first'].append(i['first'])
params_dict['second'].append(i['second'])
params_dict['third'].append(i['third'])
params_dict['fourth'].append(i['fourth'])
# We change all values to sets to remove repeated values for
# testing purposes.
self.assertEqual(set(params_dict['first']), {self.sim_params['first']})
self.assertEqual(set(params_dict['second']),
{self.sim_params['second']})
self.assertEqual(set(params_dict['third']),
set(self.sim_params['third']))
self.assertEqual(set(params_dict['fourth']),
set(self.sim_params['fourth']))
# Test if the _unpack_index and the _original_sim_params member
# variables are correct for each unpacked variation
for i in range(self.sim_params.get_num_unpacked_variations()):
self.assertEqual(unpacked_param_list[i]._unpack_index, i)
self.assertTrue(
unpacked_param_list[i]._original_sim_params is self.sim_params)
def test_get_num_unpacked_variations(self) -> None:
self.sim_params.add('third', np.array([1, 3, 2, 5]))
self.sim_params.add('fourth', ['A', 'B'])
self.assertEqual(self.sim_params.get_num_unpacked_variations(), 1)
self.sim_params.set_unpack_parameter('third')
self.sim_params.set_unpack_parameter('fourth')
# One unpacked param with four values and other with two will give
# us 4x2=8 unpacked variations. Let's test the
# get_num_unpacked_variations() method.
self.assertEqual(self.sim_params.get_num_unpacked_variations(), 8)
# Get the unpacked params list.
unpacked_params_list = self.sim_params.get_unpacked_params_list()
# If we call the get_num_unpacked_variations method of a unpacked
# SimulationParameters object, it should return the number of
# unpacked variations of the PARENT object, instead just returning
# 1.
for u in unpacked_params_list:
self.assertEqual(self.sim_params.get_num_unpacked_variations(),
u.get_num_unpacked_variations())
def test_get_pack_indexes(self) -> None:
self.sim_params.add('third', np.array([1, 3, 2, 5]))
self.sim_params.add('fourth', ['A', 'B'])
self.sim_params.add('fifth', ['Z', 'X', 'W'])
self.sim_params.set_unpack_parameter('third')
self.sim_params.set_unpack_parameter('fourth')
self.sim_params.set_unpack_parameter('fifth')
unpacked_list = self.sim_params.get_unpacked_params_list()
# The parameters 'third' and 'fourth' are marked to be unpacked,
# while the parameters 'first' and 'second' will always be the
# same. The combinations after unpacking are shown below (the order
# might be different depending on which version of the python
# interpreter was used)
#
# {'second': 20, 'fifth': Z, 'fourth': A, 'third': 1, 'first': 10}
# {'second': 20, 'fifth': Z, 'fourth': A, 'third': 3, 'first': 10}
# {'second': 20, 'fifth': Z, 'fourth': A, 'third': 2, 'first': 10}
# {'second': 20, 'fifth': Z, 'fourth': A, 'third': 5, 'first': 10}
# {'second': 20, 'fifth': Z, 'fourth': B, 'third': 1, 'first': 10}
# {'second': 20, 'fifth': Z, 'fourth': B, 'third': 3, 'first': 10}
# {'second': 20, 'fifth': Z, 'fourth': B, 'third': 2, 'first': 10}
# {'second': 20, 'fifth': Z, 'fourth': B, 'third': 5, 'first': 10}
# {'second': 20, 'fifth': X, 'fourth': A, 'third': 1, 'first': 10}
# {'second': 20, 'fifth': X, 'fourth': A, 'third': 3, 'first': 10}
# {'second': 20, 'fifth': X, 'fourth': A, 'third': 2, 'first': 10}
# {'second': 20, 'fifth': X, 'fourth': A, 'third': 5, 'first': 10}
# {'second': 20, 'fifth': X, 'fourth': B, 'third': 1, 'first': 10}
# {'second': 20, 'fifth': X, 'fourth': B, 'third': 3, 'first': 10}
# {'second': 20, 'fifth': X, 'fourth': B, 'third': 2, 'first': 10}
# {'second': 20, 'fifth': X, 'fourth': B, 'third': 5, 'first': 10}
# {'second': 20, 'fifth': W, 'fourth': A, 'third': 1, 'first': 10}
# {'second': 20, 'fifth': W, 'fourth': A, 'third': 3, 'first': 10}
# {'second': 20, 'fifth': W, 'fourth': A, 'third': 2, 'first': 10}
# {'second': 20, 'fifth': W, 'fourth': A, 'third': 5, 'first': 10}
# {'second': 20, 'fifth': W, 'fourth': B, 'third': 1, 'first': 10}
# {'second': 20, 'fifth': W, 'fourth': B, 'third': 3, 'first': 10}
# {'second': 20, 'fifth': W, 'fourth': B, 'third': 2, 'first': 10}
# {'second': 20, 'fifth': W, 'fourth': B, 'third': 5, 'first': 10}
#
# Lets focus on the 'third' and 'fourth' parameters, since they are
# the only ones changing. Suppose we want to get the indexes
# corresponding to varying the 'fourth' parameters with the 'third'
# parameter equal to 2. We create a dictionary
fixed_third_2 = {'third': 2}
# Get the indexes where the third parameter has a value of 2.
fixed_third_2_indexes = self.sim_params.get_pack_indexes(fixed_third_2)
self.assertEqual(len(fixed_third_2_indexes), 6)
# Now we test if for these indexes the value of the third parameter
# is really 2
for i in fixed_third_2_indexes:
self.assertEqual(unpacked_list[i]['third'], 2)
fixed_third_5 = {'third': 5}
fixed_third_5_indexes = self.sim_params.get_pack_indexes(fixed_third_5)
self.assertEqual(len(fixed_third_5_indexes), 6)
for i in fixed_third_5_indexes:
self.assertEqual(unpacked_list[i]['third'], 5)
# now lets fix the 'fourth' parameter and let the 'third' vary.
fixed_fourth_A = {'fourth': 'A'}
fixed_fourth_A_indexes = self.sim_params.get_pack_indexes(
fixed_fourth_A)
self.assertEqual(len(fixed_fourth_A_indexes), 12)
for i in fixed_fourth_A_indexes:
self.assertEqual(unpacked_list[i]['fourth'], 'A')
fixed_fourth_B = {'fourth': 'B'}
fixed_fourth_B_indexes = self.sim_params.get_pack_indexes(
fixed_fourth_B)
self.assertEqual(len(fixed_fourth_B_indexes), 12)
for i in fixed_fourth_B_indexes:
self.assertEqual(unpacked_list[i]['fourth'], 'B')
# Lets try to fix some invalid value to see if an exception is
# raised
fixed_fourth_invalid = {'fourth': 'C'}
with self.assertRaises(ValueError):
# This should raise a ValueError, since the parameter 'fourth'
# has no value 'C'
self.sim_params.get_pack_indexes(fixed_fourth_invalid)
# Now lets fix the third, fourth and fifth parameters. This should
# get me a single index.
index1 = self.sim_params.get_pack_indexes({
'third': 5,
'fourth': 'B',
'fifth': 'Z'
})[0]
# Now we use the index to get an element in the unpacked_list and
# check if the values are the ones that we have fixed.
self.assertEqual(unpacked_list[index1]['third'], 5)
self.assertEqual(unpacked_list[index1]['fourth'], 'B')
self.assertEqual(unpacked_list[index1]['fifth'], 'Z')
index2 = self.sim_params.get_pack_indexes({
'third': 5,
'fourth': 'B',
'fifth': 'X'
})[0]
# Now we use the index to get an element in the unpacked_list and
# check if the values are the ones that we have fixed.
self.assertEqual(unpacked_list[index2]['third'], 5)
self.assertEqual(unpacked_list[index2]['fourth'], 'B')
self.assertEqual(unpacked_list[index2]['fifth'], 'X')
index3 = self.sim_params.get_pack_indexes({
'third': 2,
'fourth': 'A',
'fifth': 'Z'
})[0]
# Now we use the index to get an element in the unpacked_list and
# check if the values are the ones that we have fixed.
self.assertEqual(unpacked_list[index3]['third'], 2)
self.assertEqual(unpacked_list[index3]['fourth'], 'A')
self.assertEqual(unpacked_list[index3]['fifth'], 'Z')
def test_to_dict_and_from_dict(self) -> None:
self.sim_params.add('third', np.array([1, 3, 2, 5]))
self.sim_params.add('fourth', ['A', 'B'])
self.sim_params.set_unpack_parameter('third')
self.sim_params.set_unpack_parameter('fourth')
# xxxxxxxxxx Test converting to and from a dictionary xxxxxxxxxxxxx
# First test converting to a dictionary
out = self.sim_params._to_dict()
self.assertIsInstance(out, dict)
self.assertEqual(out['parameters'], self.sim_params.parameters)
self.assertEqual(out['unpacked_parameters_set'],
self.sim_params._unpacked_parameters_set)
self.assertEqual(out['unpack_index'], self.sim_params._unpack_index)
self.assertEqual(out['original_sim_params'],
self.sim_params._original_sim_params)
# Now test converting from the dictionary
sim_params = SimulationParameters._from_dict(out)
self.assertEqual(sim_params, self.sim_params)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Repeat tests with unpacked parameters xxxxxxxxxxxxxxxx
# First test converting to a dictionary
sim_params_unpacked_list = self.sim_params.get_unpacked_params_list()
for sim_params_elem in sim_params_unpacked_list:
# sim_params_elem = sim_params_unpacked_list[0]
out_elem = sim_params_elem._to_dict()
self.assertIsInstance(out_elem, dict)
self.assertEqual(out_elem['parameters'],
sim_params_elem.parameters)
self.assertEqual(out_elem['unpacked_parameters_set'],
sim_params_elem._unpacked_parameters_set)
self.assertEqual(out_elem['unpack_index'],
sim_params_elem._unpack_index)
self.assertEqual(out_elem['original_sim_params'],
sim_params_elem._original_sim_params._to_dict())
# Now test converting from the dictionary
sim_params_elem_d = SimulationParameters._from_dict(out_elem)
self.assertEqual(sim_params_elem, sim_params_elem_d)
self.assertEqual(sim_params_elem_d._original_sim_params,
self.sim_params)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Test with an empty SimulationParameters xxxxxxxxxxxxxx
empty_simparams = SimulationParameters()
empty_simparams_dict = empty_simparams._to_dict()
empty_simparams_d = SimulationParameters._from_dict(
empty_simparams_dict)
self.assertEqual(empty_simparams, empty_simparams_d)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
def test_to_json_and_from_json(self) -> None:
self.sim_params.add('third', np.array([1, 3, 2, 5]))
self.sim_params.add('fourth', ['A', 'B'])
self.sim_params.set_unpack_parameter('third')
self.sim_params.set_unpack_parameter('fourth')
# xxxxxxxxxx Test converting to and from a json xxxxxxxxxxxxxxxxxxx
# First test converting to json
encoded_params = self.sim_params.to_json()
self.assertIsInstance(encoded_params, str)
# This will raise an exception if encoded_params is not valid json
_ = json.loads(encoded_params)
# Now test converting from json
decoded_params = SimulationParameters.from_json(encoded_params)
self.assertTrue(self.sim_params == decoded_params)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Repeat tests with unpacked parameters xxxxxxxxxxxxxxxx
sim_params_unpacked_list = self.sim_params.get_unpacked_params_list()
for sim_params_elem in sim_params_unpacked_list:
# First test converting to json
encoded_sim_params_elem = sim_params_elem.to_json()
self.assertIsInstance(encoded_sim_params_elem, str)
# This will raise an exception if encoded_params is not valid json
_ = json.loads(encoded_params)
# Now test converting from json
decoded_sim_params_elem = SimulationParameters.from_json(
encoded_sim_params_elem)
self.assertEqual(decoded_sim_params_elem, sim_params_elem)
self.assertEqual(decoded_sim_params_elem._original_sim_params,
self.sim_params)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
def test_save_to_and_load_from_pickle_file(self) -> None:
self.sim_params.add('third', np.array([1, 3, 2, 5]))
self.sim_params.add('fourth', ['A', 'B'])
self.sim_params.set_unpack_parameter('third')
self.sim_params.set_unpack_parameter('fourth')
filename = 'params.pickle'
# Let's make sure the file does not exist
delete_file_if_possible(filename)
# Save to the file
self.sim_params.save_to_pickled_file(filename)
# Load from the file
sim_params2 = SimulationParameters.load_from_pickled_file(filename)
self.assertEqual(self.sim_params['first'], sim_params2['first'])
self.assertEqual(self.sim_params['second'], sim_params2['second'])
self.assertEqual(len(self.sim_params), len(sim_params2))
self.assertEqual(self.sim_params.get_num_unpacked_variations(),
sim_params2.get_num_unpacked_variations())
# Delete the where the parameters were saved
delete_file_if_possible(filename)
# xxxxx Test saving and loading one of the unpacked variations xxxx
filename2 = 'params_3.pickle'
fourth_unpacked_param = self.sim_params.get_unpacked_params_list()[3]
fourth_unpacked_param.save_to_pickled_file(filename2)
fourth_unpacked_param2 = SimulationParameters.load_from_pickled_file(
filename2)
self.assertEqual(fourth_unpacked_param, fourth_unpacked_param2)
# Delete the where the parameters were saved
delete_file_if_possible(filename2)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
def test_save_and_load_sanity(self) -> None:
# Test if when saving and loading the simulation parameters the
# list of unpacked parameters is in the same order.
#
# Previously the order of the list of unpacked parameters could
# change after saving and loading the SimulationParameters object
# from a file. This would cause problems for the SimulationResults
# class. This bug was fixed by making sure the order of the
# unpacked parameters list is fixed (sorted by alphabetic order)
# and here we test for the bug in case it ever comes back after
# some code change.
params = SimulationParameters.create({
'first':
np.array([0, 5, 10, 26]),
'second': ['aha', 'ahe'],
'third': [10],
'fourth': [30],
'fifth': ['hum', 'blabla'],
'sixth': ['a', 'b', 'c'],
'seventh': ['ok', 'not ok', 'maybe']
})
filename = 'paramsfile.pickle'
# Test without unpacking any parameter
unpacked_parameters = copy(params.unpacked_parameters)
delete_file_if_possible(filename)
params.save_to_pickled_file(filename)
params2 = SimulationParameters.load_from_pickled_file(filename)
self.assertEqual(unpacked_parameters, params2.unpacked_parameters)
delete_file_if_possible(filename)
# Unpack the first one and test
params.set_unpack_parameter('first')
unpacked_parameters = copy(params.unpacked_parameters)
delete_file_if_possible(filename)
params.save_to_pickled_file(filename)
params2 = SimulationParameters.load_from_pickled_file(filename)
self.assertEqual(unpacked_parameters, params2.unpacked_parameters)
delete_file_if_possible(filename)
# Unpack the second one and test again
params.set_unpack_parameter('second')
unpacked_parameters = copy(params.unpacked_parameters)
delete_file_if_possible(filename)
params.save_to_pickled_file(filename)
params2 = SimulationParameters.load_from_pickled_file(filename)
self.assertEqual(unpacked_parameters, params2.unpacked_parameters)
delete_file_if_possible(filename)
# Unpack the third one and test again
params.set_unpack_parameter('third')
unpacked_parameters = copy(params.unpacked_parameters)
delete_file_if_possible(filename)
params.save_to_pickled_file(filename)
params2 = SimulationParameters.load_from_pickled_file(filename)
self.assertEqual(unpacked_parameters, params2.unpacked_parameters)
delete_file_if_possible(filename)
# Unpack the fourth one and test again
params.set_unpack_parameter('fourth')
unpacked_parameters = copy(params.unpacked_parameters)
delete_file_if_possible(filename)
params.save_to_pickled_file(filename)
params2 = SimulationParameters.load_from_pickled_file(filename)
self.assertEqual(unpacked_parameters, params2.unpacked_parameters)
delete_file_if_possible(filename)
# Unpack the fifth one and test again
params.set_unpack_parameter('fifth')
unpacked_parameters = copy(params.unpacked_parameters)
delete_file_if_possible(filename)
params.save_to_pickled_file(filename)
params2 = SimulationParameters.load_from_pickled_file(filename)
self.assertEqual(unpacked_parameters, params2.unpacked_parameters)
delete_file_if_possible(filename)
# Unpack the sixth one and test again
params.set_unpack_parameter('sixth')
unpacked_parameters = copy(params.unpacked_parameters)
delete_file_if_possible(filename)
params.save_to_pickled_file(filename)
params2 = SimulationParameters.load_from_pickled_file(filename)
self.assertEqual(unpacked_parameters, params2.unpacked_parameters)
delete_file_if_possible(filename)
# Unpack the seventh one and test again
params.set_unpack_parameter('seventh')
unpacked_parameters = copy(params.unpacked_parameters)
delete_file_if_possible(filename)
params.save_to_pickled_file(filename)
params2 = SimulationParameters.load_from_pickled_file(filename)
self.assertEqual(unpacked_parameters, params2.unpacked_parameters)
delete_file_if_possible(filename)
# noinspection PyUnresolvedReferences
def test_load_from_config_file(self) -> None:
try:
import configobj
import validate
del configobj
del validate
except ImportError: # pragma: no cover
self.skipTest(
"test_load_from_config_file - "
"This configobj and validate modules must be installed.")
filename = 'test_config_file.txt'
# xxxxxxxxxx Write the config file xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# First we delete the file if it exists
delete_file_if_possible(filename)
fid = open(filename, 'w')
fid.write("modo=test\n[Scenario]\nSNR=0,5,10\nM=4\nmodulator=PSK\n"
"[IA Algorithm]\nmax_iterations=60")
fid.close()
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Read the parameters from the file xxxxxxxxxxxxxxxxxxxx
# Since we are not specifying a "validation spec" all parameters
# will be read as strings or list of strings.
params = SimulationParameters.load_from_config_file(filename)
self.assertEqual(len(params), 5)
self.assertEqual(params['modo'], 'test')
self.assertEqual(params['SNR'], ['0', '5', '10'])
self.assertEqual(params['M'], '4')
self.assertEqual(params['modulator'], 'PSK')
self.assertEqual(params['max_iterations'], '60')
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxx Read the parameters from file with a validation spec xxxxxx
spec = """modo=string
[Scenario]
SNR=real_numpy_array(default=15)
M=integer(min=4, max=512, default=4)
modulator=option('PSK', 'QAM', 'BPSK', default="PSK")
[IA Algorithm]
max_iterations=integer(min=1)
unpacked_parameters=string_list(default=list('SNR'))
""".split("\n")
params2 = SimulationParameters.load_from_config_file(filename, spec)
self.assertEqual(len(params2), 6)
self.assertEqual(params2['modo'], 'test')
np.testing.assert_array_almost_equal(params2['SNR'],
np.array([0., 5., 10.]))
self.assertEqual(params2['M'], 4)
self.assertEqual(params2['modulator'], 'PSK')
self.assertEqual(params2['max_iterations'], 60)
self.assertEqual(params2['unpacked_parameters'], ['SNR'])
self.assertEqual(params2.unpacked_parameters, ['SNR'])
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Lets create an invalid config file and try to load the parameters
# First we provide an invalid value for M
fid = open(filename, 'w')
fid.write("modo=test\n[Scenario]\nSNR=0,5,10\nM=-4\nmodulator=PSK\n"
"[IA Algorithm]\nmax_iterations=60")
fid.close()
with self.assertRaises(Exception):
SimulationParameters.load_from_config_file(filename, spec)
# Now we do not provide the required parameter max_iterations
fid = open(filename, 'w')
fid.write("modo=test\n[Scenario]\nSNR=0,5,10\nM=4\nmodulator=PSK\n"
"[IA Algorithm]")
fid.close()
with self.assertRaises(Exception):
SimulationParameters.load_from_config_file(filename, spec)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Remove the config file used in this test xxxxxxxxxxxxx
delete_file_if_possible(filename)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
def test_to_dataframe(self) -> None:
# If the pandas package is not installed, we will skip testing this
# method
if not _PANDAS_AVAILABLE: # pragma: nocover
self.skipTest("test_to_dataframe - Pandas is not installed")
# Create some dummy parameters (including two parameters set to be
# unpacked)
params = SimulationParameters()
# Try converting an empty SimulationParameters
df = params.to_dataframe()
self.assertTrue(isinstance(df, DataFrame))
self.assertTrue(df.empty)
params.add("extra", 2.3)
params.add("SNR", np.array([0, 3, 6, 9]))
params.add("bias", [1.2, 1.6])
params.set_unpack_parameter('SNR')
params.set_unpack_parameter('bias')
params.add("Name", "Some string")
df = params.to_dataframe()
self.assertEqual(len(df.columns), 4)
self.assertEqual(len(df), params.get_num_unpacked_variations())
col_names = ["SNR", "bias", "Name", "extra"]
for name in col_names:
self.assertIn(name, df.columns)
for i, l in enumerate(params.get_unpacked_params_list()):
for col_names in ["SNR", "bias", "Name", "extra"]:
self.assertAlmostEqual(l[col_names], df.loc[i, col_names])
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxx Results Module xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
class ResultsModuleFunctionsTestCase(unittest.TestCase):
def setUp(self) -> None:
"""Called before each test."""
pass
def test_combine_simulation_results(self) -> None:
dummyrunner = _DummyRunner()
dummyrunner.simulate()
dummyrunner2 = _DummyRunner()
dummyrunner2.simulate()
dummyrunner3 = _DummyRunner()
dummyrunner3.params.add('extra', np.array([4.1, 5.7, 6.2]))
dummyrunner3.simulate()
results1 = dummyrunner.results
results2 = dummyrunner2.results
results3 = dummyrunner3.results
results2.add_new_result('bla', Result.SUMTYPE, 10)
# Lets modify the result1 just to make the tests later not trivial
for r in results1['lala']:
r._value += 0.8
# If both SimulationResults objects do not have exactly the same
# results (names, not values), an exception will be raised.
with self.assertRaises(RuntimeError):
combine_simulation_results(results1, results2)
# results1 and results3 can be combined
union = combine_simulation_results(results1, results3)
# xxxxxxxxxx Test the parameters of the combined object xxxxxxxxxxx
# Note that combine_simulation_parameters is already tested
comb_params = combine_simulation_parameters(results1.params,
results3.params)
# Test if the parameters of the combined result object are correct
self.assertEqual(comb_params, union.params)
# xxxxxxxxxx Test the results of the combined object xxxxxxxxxxxxxx
# Note that the 'elapsed_time' and 'num_skipped_reps' results are
# always added by the SimulationRunner class.
self.assertEqual(set(union.get_result_names()),
{'elapsed_time', 'lala', 'num_skipped_reps'})
# The unpacked param list of union is (the order might be different)
# [{'bias': 1.3, 'SNR': 0.0, 'extra': 2.2},
# {'bias': 1.3, 'SNR': 0.0, 'extra': 4.1},
# {'bias': 1.3, 'SNR': 0.0, 'extra': 5.7},
# {'bias': 1.3, 'SNR': 0.0, 'extra': 6.2},
# {'bias': 1.3, 'SNR': 5.0, 'extra': 2.2},
# {'bias': 1.3, 'SNR': 5.0, 'extra': 4.1},
# {'bias': 1.3, 'SNR': 5.0, 'extra': 5.7},
# {'bias': 1.3, 'SNR': 5.0, 'extra': 6.2},
# {'bias': 1.3, 'SNR': 10.0, 'extra': 2.2},
# {'bias': 1.3, 'SNR': 10.0, 'extra': 4.1},
# {'bias': 1.3, 'SNR': 10.0, 'extra': 5.7},
# {'bias': 1.3, 'SNR': 10.0, 'extra': 6.2},
# {'bias': 1.3, 'SNR': 15.0, 'extra': 2.2},
# {'bias': 1.3, 'SNR': 15.0, 'extra': 4.1},
# {'bias': 1.3, 'SNR': 15.0, 'extra': 5.7},
# {'bias': 1.3, 'SNR': 15.0, 'extra': 6.2},
# {'bias': 1.3, 'SNR': 20.0, 'extra': 2.2},
# {'bias': 1.3, 'SNR': 20.0, 'extra': 4.1},
# {'bias': 1.3, 'SNR': 20.0, 'extra': 5.7},
# {'bias': 1.3, 'SNR': 20.0, 'extra': 6.2}]
#
# The number of updates of each result in union should be equal to
# 2, except for the results corresponding to the 'extra' parameter
# value of 4.1, which is repeated in results1 and results3.
all_indexes = set(range(union.params.get_num_unpacked_variations()))
index_extra_4dot1 = set(union.params.get_pack_indexes({'extra': 4.1}))
other_indexes = all_indexes - index_extra_4dot1
for index in index_extra_4dot1:
self.assertEqual(union['lala'][index].num_updates, 4)
for index in other_indexes:
self.assertEqual(union['lala'][index].num_updates, 2)
# Calculate the expected lala results
expected_lala_results = list(
repeat(0, union.params.get_num_unpacked_variations()))
for index, variation in enumerate(
union.params.get_unpacked_params_list()):
count = 0.
snr = variation['SNR']
extra = variation['extra']
try:
r1index = results1.params.get_pack_indexes({
'SNR': snr,
'extra': extra
})[0]
count += 1.
expected_lala_results[index] \
+= results1['lala'][r1index].get_result()
except ValueError:
pass
try:
r3index = results3.params.get_pack_indexes({
'SNR': snr,
'extra': extra
})[0]
count += 1.
expected_lala_results[index] \
+= results3['lala'][r3index].get_result()
except ValueError:
pass
expected_lala_results[index] /= count
union_lala_results = [v.get_result() for v in union['lala']]
self.assertEqual(expected_lala_results, union_lala_results)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
class ResultTestCase(unittest.TestCase):
"""
Unit-tests for the Result class in the results module.
"""
def setUp(self) -> None:
"""Called before each test."""
self.result1 = Result("name", Result.SUMTYPE)
self.result2 = Result("name2", Result.RATIOTYPE)
self.result3 = Result("name3", Result.MISCTYPE)
self.result4 = Result("name4", Result.CHOICETYPE, choice_num=6)
def test_init(self) -> None:
# Test if an exception is raised if we try to create a result type
# with choice_num not being an integer
with self.assertRaises(RuntimeError):
# noinspection PyTypeChecker
Result("name4", Result.CHOICETYPE, choice_num=6.6)
def test_get_update_type(self) -> None:
"""
Test the two properties, one to get the update type code and other to
get the update type name. Note that both properties reflect the
value of the same variable, the self._update_type variable.
"""
self.assertEqual(self.result1.type_code, Result.SUMTYPE)
self.assertEqual(self.result1.type_name, "SUMTYPE")
self.assertEqual(self.result2.type_code, Result.RATIOTYPE)
self.assertEqual(self.result2.type_name, "RATIOTYPE")
self.assertEqual(self.result3.type_code, Result.MISCTYPE)
self.assertEqual(self.result3.type_name, "MISCTYPE")
self.assertEqual(self.result4.type_code, Result.CHOICETYPE)
self.assertEqual(self.result4.type_name, "CHOICETYPE")
# Test that the possible type codes are different
self.assertEqual(
4,
len({
Result.SUMTYPE, Result.RATIOTYPE, Result.MISCTYPE,
Result.CHOICETYPE
}))
def test_update(self) -> None:
# Test the update function of the SUMTYPE
self.result1.update(13)
self.result1.update(4)
self.assertEqual(self.result1._value, 17)
self.assertEqual(self.result1.get_result(), 17)
# Test the update function of the RATIOTYPE
self.result2.update(3, 4)
self.result2.update(9, 36)
self.assertEqual(self.result2._value, 12)
self.assertEqual(self.result2._total, 40)
self.assertEqual(self.result2.get_result(), 0.3)
self.result2.update(12, 8)
self.assertEqual(self.result2.get_result(), 0.5)
# Test the update function of the MISCTYPE. Note how we can store
# anything.
self.result3.update("First")
self.assertEqual(self.result3.get_result(), "First")
self.result3.update("Second")
self.assertEqual(self.result3.get_result(), "Second")
self.result3.update(0.4)
self.assertEqual(self.result3.get_result(), 0.4)
self.result3.update(0.4)
self.assertEqual(self.result3.get_result(), 0.4)
# Test the update function of the CHOICETYPE.
self.result4.update(0)
self.result4.update(1)
self.result4.update(0)
self.result4.update(4)
np.testing.assert_array_almost_equal(self.result4.get_result(),
[.5, .25, 0, 0, .25, 0])
self.result4.update(5)
np.testing.assert_array_almost_equal(self.result4.get_result(),
[.4, .2, 0, 0, .2, .2])
# Test if an exception is raised when updating a Result of the
# RATIOTYPE without specifying both the value and the total.
with self.assertRaises(ValueError):
self.result2.update(3)
# Test if an exception is thrown when updating a result of some
# unknown type
# noinspection PyTypeChecker
result_invalid = Result('invalid', 'invalid_type')
with self.assertRaises(ValueError):
result_invalid.update(10)
# Test if an exception is raised for the CHOICETYPE if the updated
# value is not a correct index.
with self.assertRaises(IndexError):
self.result4.update(8)
with self.assertRaises(AssertionError):
self.result4.update(3.4)
def test_update_with_accumulate(self) -> None:
result1 = Result('name', Result.SUMTYPE, accumulate_values=True)
self.assertEqual(result1.accumulate_values_bool,
result1._accumulate_values_bool)
result1.update(13)
result1.update(30)
self.assertEqual(result1._value, 43)
self.assertEqual(result1.get_result(), 43)
self.assertEqual(result1._total, 0)
self.assertEqual(result1._value_list, [13, 30])
self.assertEqual(result1._total_list, [])
result2 = Result('name', Result.RATIOTYPE, accumulate_values=True)
result2.update(3, 10)
result2.update(6, 7)
result2.update(1, 15)
self.assertEqual(result2._value, 10)
self.assertEqual(result2._total, 32)
self.assertEqual(result2.get_result(), 0.3125)
self.assertEqual(result2._value_list, [3, 6, 1])
self.assertEqual(result2._total_list, [10, 7, 15])
result3 = Result('name', Result.MISCTYPE, accumulate_values=True)
result3.update(3)
result3.update("some string")
result3.update(2)
self.assertEqual(result3._value, 2)
self.assertEqual(result3._total, 0)
self.assertEqual(result3.get_result(), 2)
self.assertEqual(result3._value_list, [3, "some string", 2])
self.assertEqual(result3._total_list, [])
result4 = Result('name',
Result.CHOICETYPE,
accumulate_values=True,
choice_num=5)
result4.update(3)
result4.update(1)
result4.update(0)
result4.update(3)
result4.update(4)
np.testing.assert_array_almost_equal(result4._value, [1, 1, 0, 2, 1])
np.testing.assert_array_almost_equal(result4.get_result(),
[.2, .2, 0, .4, .2])
self.assertEqual(result4._total, 5)
self.assertEqual(result4._value_list, [3, 1, 0, 3, 4])
self.assertEqual(result4._total_list, [])
def test_create(self) -> None:
r1 = Result.create(name='nome1',
update_type=Result.SUMTYPE,
value=10.4,
accumulate_values=False)
r1.update(3.5)
self.assertEqual(r1.num_updates, 2)
self.assertEqual(r1.get_result(), 13.9)
self.assertEqual(r1._value_list, [])
self.assertEqual(r1._total_list, [])
r2 = Result.create(name='nome2',
update_type=Result.RATIOTYPE,
value=10.4,
total=20.2,
accumulate_values=True)
r2.update(2, 3)
r2.update(4.2, 5.3)
self.assertEqual(r2.num_updates, 3)
self.assertAlmostEqual(r2.get_result(), 0.58245614)
self.assertEqual(r2._value_list, [10.4, 2, 4.2])
self.assertEqual(r2._total_list, [20.2, 3, 5.3])
r3 = Result.create(name='nome3',
update_type=Result.MISCTYPE,
value='valor',
accumulate_values=True)
self.assertEqual(r3.num_updates, 1)
self.assertEqual(r3.get_result(), 'valor')
with self.assertRaises(RuntimeError):
# Try to create a result of CHOICETYPE without specifying the total
# raises an exception
Result.create(name='nome4',
update_type=Result.CHOICETYPE,
value=3,
accumulate_values=True)
r4 = Result.create(name='nome4',
update_type=Result.CHOICETYPE,
value=3,
total=6,
accumulate_values=True)
r4.update(5)
self.assertEqual(r4.num_updates, 2)
np.testing.assert_equal(r4._value, [0, 0, 0, 1, 0, 1])
def test_merge(self) -> None:
# Test merge of Results of SUMTYPE
self.result1.update(13)
self.result1.update(30)
result_sum_before1 = self.result1._result_sum
result_sum_sqr_before1 = self.result1._result_squared_sum
result1_other = Result.create("name", Result.SUMTYPE, 11)
expected_result_sum1 = result_sum_before1 + result1_other._result_sum
expected_result_sqr_sum1 = (result_sum_sqr_before1 +
result1_other._result_squared_sum)
self.result1.merge(result1_other)
self.assertEqual(self.result1.name, "name")
self.assertEqual(self.result1.get_result(), 54)
self.assertEqual(self.result1.num_updates, 3)
self.assertEqual(self.result1._result_sum, expected_result_sum1)
self.assertEqual(self.result1._result_squared_sum,
expected_result_sqr_sum1)
# Test merge of Results of RATIOTYPE
self.result2.update(3, 10)
self.result2.update(6, 7)
self.result2.update(1, 15)
result_sum_before2 = self.result2._result_sum
result_sum_sqr_before2 = self.result2._result_squared_sum
result2_other = Result.create("name2", Result.RATIOTYPE, 34, 50)
result2_other.update(12, 18)
expected_result_sum2 = result_sum_before2 + result2_other._result_sum
expected_result_sqr_sum2 = (result_sum_sqr_before2 +
result2_other._result_squared_sum)
self.result2.merge(result2_other)
self.assertEqual(self.result2.name, "name2")
self.assertEqual(self.result2._value, 56)
self.assertEqual(self.result2._total, 100)
self.assertEqual(self.result2.get_result(), 0.56)
self.assertEqual(self.result2.num_updates, 5)
self.assertEqual(self.result2._result_sum, expected_result_sum2)
self.assertEqual(self.result2._result_squared_sum,
expected_result_sqr_sum2)
# Test merge of Results of MISCTYPE
# There is no merge for misc type and an exception should be raised
self.result3.update(0.4)
result3_other = Result.create("name3", Result.MISCTYPE, 3)
# with self.assertRaises(AssertionError):
self.result3.merge(result3_other)
self.assertEqual(self.result3.get_result(), 3)
# Test merge of Results of CHOICETYPE
result4_other = Result.create("name4", Result.CHOICETYPE, 3, 6)
result4_other.update(2)
self.result4.update(1)
self.result4.update(0)
self.result4.update(3)
self.result4.merge(result4_other)
self.assertEqual(self.result4.name, 'name4')
np.testing.assert_array_almost_equal(self.result4._value,
[1, 1, 1, 2, 0, 0])
self.assertEqual(self.result4._total, 5)
np.testing.assert_array_almost_equal(self.result4.get_result(),
[.2, .2, .2, .4, 0, 0])
self.assertEqual(self.result4.num_updates, 5)
# Test merging results with different name or type
result5 = Result.create("name5", Result.SUMTYPE, 3)
with self.assertRaises(AssertionError):
self.result1.merge(result5)
result6 = Result.create("name", Result.RATIOTYPE, 3, 4)
with self.assertRaises(AssertionError):
self.result1.merge(result6)
def test_merge_with_accumulate(self) -> None:
result1 = Result('name', Result.SUMTYPE, accumulate_values=True)
result1.update(13)
result1.update(30)
result1_other = Result.create("name",
Result.SUMTYPE,
11,
accumulate_values=True)
result1_other.update(22)
result1_other.update(4)
result1.merge(result1_other)
self.assertEqual(result1.get_result(), 80)
self.assertEqual(result1._value_list, [13, 30, 11, 22, 4])
self.assertEqual(result1._total_list, [])
result2 = Result('name2', Result.RATIOTYPE, accumulate_values=True)
result2.update(3, 10)
result2.update(6, 7)
result2.update(1, 15)
result2_other = Result.create("name2",
Result.RATIOTYPE,
34,
50,
accumulate_values=True)
result2_other.update(12, 18)
result2.merge(result2_other)
self.assertEqual(result2._value, 56)
self.assertEqual(result2._value_list, [3, 6, 1, 34, 12])
self.assertEqual(result2._total, 100)
self.assertEqual(result2._total_list, [10, 7, 15, 50, 18])
self.assertEqual(result2.get_result(), 0.56)
self.assertEqual(result2.num_updates, 5)
# Test if an exception is raised if we try to merge with a Result
# object which does not have the accumulate_values property set to
# True
result3 = Result('name2', Result.RATIOTYPE, accumulate_values=False)
result3.update(2, 6)
result3.update(7, 15)
result3.update(2, 5)
with self.assertRaises(AssertionError):
result2.merge(result3)
# Not that the opposite is possible. That is, a Result object
# without accumulated values can be merged with a Result object
# with accumulated values. In that case, the accumulated values of
# the second object will be ignored.
result3.merge(result2)
self.assertEqual(result3._value, 67)
self.assertEqual(result3._total, 126)
self.assertEqual(result3._value_list, [])
self.assertEqual(result3._total_list, [])
# Test for the CHOICETYPE type
result4 = Result.create('name4',
Result.CHOICETYPE,
2,
4,
accumulate_values=True)
result4.update(0)
result4.update(3)
result4_other = Result.create('name4',
Result.CHOICETYPE,
0,
4,
accumulate_values=True)
result4_other.update(3)
result4.merge(result4_other)
self.assertEqual(result4._value_list, [2, 0, 3, 0, 3])
self.assertEqual(result4._total_list, [])
np.testing.assert_array_almost_equal(result4.get_result(),
np.array([2, 0, 1, 2]) / 5.)
def test_get_result_mean_and_var(self) -> None:
# Test for Result.SUMTYPE
result1 = Result('name', Result.SUMTYPE, accumulate_values=True)
result1.update(13)
result1.update(30)
result1_other = Result.create("name",
Result.SUMTYPE,
11,
accumulate_values=True)
result1_other.update(22)
result1_other.update(4)
result1.merge(result1_other)
self.assertEqual(result1.get_result(), 80)
expected_mean1 = np.array(result1._value_list, dtype=float).mean()
self.assertAlmostEqual(result1.get_result_mean(), expected_mean1)
expected_var1 = np.array(result1._value_list).var()
self.assertAlmostEqual(result1.get_result_var(), expected_var1)
# Test for Result.RATIOTYPE
result2 = Result('name2', Result.RATIOTYPE, accumulate_values=True)
result2.update(3, 10)
result2.update(6, 7)
result2.update(1, 15)
result2_other = Result.create("name2",
Result.RATIOTYPE,
34,
50,
accumulate_values=True)
result2_other.update(12, 18)
result2.merge(result2_other)
aux2 = (np.array(result2._value_list, dtype=float) /
np.array(result2._total_list, dtype=float))
expected_mean2 = aux2.mean()
self.assertAlmostEqual(result2.get_result_mean(), expected_mean2)
expected_var2 = aux2.var()
self.assertAlmostEqual(result2.get_result_var(), expected_var2)
def test_representation(self) -> None:
self.assertEqual(self.result1.__repr__(),
"Result -> name: Nothing yet")
self.assertEqual(self.result2.__repr__(),
"Result -> name2: 0/0 -> NaN")
self.assertEqual(self.result3.__repr__(),
"Result -> name3: Nothing yet")
self.assertEqual(self.result4.__repr__(),
"Result -> name4: Nothing yet")
self.result1.update(10)
self.result2.update(2, 4)
self.result3.update(0.4)
self.result4.update(3)
self.result4.update(2)
self.assertEqual(self.result1.__repr__(), "Result -> name: 10")
self.assertEqual(self.result2.__repr__(),
"Result -> name2: 2/4 -> 0.5")
self.assertEqual(self.result3.__repr__(), "Result -> name3: 0.4")
self.assertEqual(self.result4.__repr__(),
"Result -> name4: [0. 0. 0.5 0.5 0. 0. ]")
def test_equal_and_not_equal_operators(self) -> None:
self.result1.update(10)
self.result1.update(7)
result1 = Result.create("name", Result.SUMTYPE, 7)
result1.update(10)
self.assertTrue(self.result1 == result1)
self.assertFalse(self.result1 != result1)
result1_other = Result.create("name", Result.SUMTYPE, 7)
result1_other.update(9)
self.assertFalse(self.result1 == result1_other)
self.assertTrue(self.result1 != result1_other)
result1._update_type_code = 1
self.assertFalse(self.result1 == result1)
self.assertTrue(self.result1 != result1)
# Also test for the CHOICETYPE type, since it store the _value
# member variable as a numpy array
self.result4.update(3)
self.result4.update(1)
result4 = Result.create('name4', Result.CHOICETYPE, 1, 4)
result4.update(3)
# result4 only has 4 elements, while self.result6 has 6.
self.assertTrue(self.result4 != result4)
result4_other = Result.create('name4', Result.CHOICETYPE, 1, 6)
result4_other.update(3)
self.assertTrue(self.result4 == result4_other)
result4_other.update(2)
self.result4.update(1)
self.assertFalse(self.result4 == result4_other)
# Test comparison with something of a different class
self.assertFalse(self.result4 == 10.4)
def test_calc_confidence_interval(self) -> None:
# Test if an exceptions is raised for a Result object of the
# MISCTYPE update type.
with self.assertRaises(RuntimeError):
# A result object of the MISCTYPE type cannot use the
# calc_confidence_interval method, since this update ignores
# the accumulate_values option and never accumulates any value.
self.result3.get_confidence_interval()
# # Test if an exception is raised if the accumulate_values option
# # was not set to True
# with self.assertRaises(RuntimeError):
# self.result1.get_confidence_interval()
result = Result('name', Result.RATIOTYPE, accumulate_values=True)
# # Test if an exception is raised if there are not stored values yet
# with self.assertRaises(RuntimeError):
# result.get_confidence_interval()
# Now lets finally store some values.
result.update(10, 30)
result.update(3, 24)
result.update(15, 42)
result.update(5, 7)
# Calculate the expected confidence interval
A = (np.array(result._value_list, dtype=float) /
np.array(result._total_list, dtype=float))
expected_confidence_interval = calc_confidence_interval(A.mean(),
A.std(),
A.size,
P=95)
confidence_interval = result.get_confidence_interval(P=95)
np.testing.assert_array_almost_equal(expected_confidence_interval,
confidence_interval)
def test_to_dict_from_dict(self) -> None:
self.result1.update(13)
self.result1.update(30)
self.result2.update(3, 10)
self.result2.update(6, 7)
self.result2.update(1, 15)
self.result3.update(3)
self.result3.update("some string")
self.result3.update(2)
self.result4.update(3)
self.result4.update(1)
self.result4.update(0)
self.result4.update(3)
self.result4.update(4)
# xxxxxxxxxx Test converting to a dictionary xxxxxxxxxxxxxxxxxxxxxx
result1_dict = self.result1.to_dict()
result2_dict = self.result2.to_dict()
result3_dict = self.result3.to_dict()
result4_dict = self.result4.to_dict()
self.assertIsInstance(result1_dict, dict)
self.assertIsInstance(result2_dict, dict)
self.assertIsInstance(result3_dict, dict)
self.assertIsInstance(result4_dict, dict)
# We will not test individual dictionary keys here, since we will
# test later if we can recover the actual Result object from the
# dictionary and that should be enough.
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Test converting from a dictionary xxxxxxxxxxxxxxxxxxxx
result1 = Result.from_dict(result1_dict)
result2 = Result.from_dict(result2_dict)
result3 = Result.from_dict(result3_dict)
result4 = Result.from_dict(result4_dict)
self.assertEqual(self.result1, result1)
self.assertEqual(self.result2, result2)
self.assertEqual(self.result3, result3)
self.assertEqual(self.result4, result4)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
def test_to_json_and_from_json(self) -> None:
self.result1.update(13)
self.result1.update(30)
self.result2.update(3, 10)
self.result2.update(6, 7)
self.result2.update(1, 15)
self.result3.update(3)
self.result3.update("some string")
self.result3.update(2)
self.result4.update(3)
self.result4.update(1)
self.result4.update(0)
self.result4.update(3)
self.result4.update(4)
# TODO: finish implementation
# xxxxxxxxxx Test converting to and from a json xxxxxxxxxxxxxxxxxxx
# First test converting to json
result1_json = self.result1.to_json()
result2_json = self.result2.to_json()
result3_json = self.result3.to_json()
result4_json = self.result4.to_json()
# This will raise an exception if encoded_params is not valid json
_ = json.loads(result1_json)
_ = json.loads(result2_json)
_ = json.loads(result3_json)
_ = json.loads(result4_json)
# Now test converting from json
result1 = Result.from_json(result1_json)
result2 = Result.from_json(result2_json)
result3 = Result.from_json(result3_json)
result4 = Result.from_json(result4_json)
self.assertEqual(self.result1, result1)
self.assertEqual(self.result2, result2)
self.assertEqual(self.result3, result3)
self.assertEqual(self.result4, result4)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# noinspection PyMethodMayBeStatic
class SimulationResultsTestCase(unittest.TestCase):
"""
Unit-tests for the SimulationResults class in the results module.
"""
def setUp(self) -> None:
# First SimulationResults object
self.simresults = SimulationResults()
self.simresults.add_new_result("lala", Result.SUMTYPE, 13)
result2 = Result("lele", Result.RATIOTYPE)
result2.update(3, 10)
result2.update(8, 10)
self.simresults.add_result(result2)
result4 = Result.create('lulu', Result.CHOICETYPE, 3, 6)
result4.update(1)
self.simresults.add_result(result4)
# Second SimulationResults object
self.other_simresults = SimulationResults()
result1_other = Result.create('lala', Result.SUMTYPE, 30)
result2_other = Result.create('lele', Result.RATIOTYPE, 4, 10)
result3 = Result.create('lili', Result.MISCTYPE, "a string")
self.other_simresults.add_result(result1_other)
self.other_simresults.add_result(result2_other)
self.other_simresults.add_result(result3)
result4_other = Result.create('lulu', Result.CHOICETYPE, 0, 6)
result4_other.update(5)
result4_other.update(5)
self.other_simresults.add_result(result4_other)
def test_params_property(self) -> None:
params = SimulationParameters()
params.add('number', 10)
params.add('name', 'lala')
# Try to set the parameters to an invalid object
with self.assertRaises(ValueError):
# noinspection PyTypeChecker
self.simresults.set_parameters(10)
# Set the simulation parameters
self.simresults.set_parameters(params)
# test the get property
params2 = self.simresults.params
self.assertEqual(len(params), len(params2))
self.assertEqual(params['number'], params2['number'])
self.assertEqual(params['name'], params2['name'])
def test_get_result_names(self) -> None:
# The output of the get_result_names is a list of names. We
# transform it into a set in this test only to make the order of
# the names unimportant.
expected_output = {'lala', 'lele', 'lulu'}
self.assertEqual(set(self.simresults.get_result_names()),
expected_output)
# Test also the representation of the SimulationResults object
self.assertEqual(self.simresults.__repr__(),
"""SimulationResults: ['lala', 'lele', 'lulu']""")
def test_add_result(self) -> None:
# Add a result with the same name of an existing result -> Should
# replace it
result1_other = Result.create("lala", Result.SUMTYPE, 25)
self.simresults.add_result(result1_other)
self.assertEqual(len(self.simresults['lala']), 1)
self.assertEqual(self.simresults['lala'][0].get_result(), 25)
# Add a new result
result3 = Result.create('lili', Result.MISCTYPE, "a string")
self.simresults.add_result(result3)
self.assertEqual(set(self.simresults.get_result_names()),
{"lala", "lele", "lili", "lulu"})
self.assertEqual(self.simresults['lili'][0].get_result(), "a string")
def test_append_result(self) -> None:
result1_other = Result.create("lala", Result.SUMTYPE, 25)
self.simresults.append_result(result1_other)
# Since we append a new Result with the name 'lala', then now we
# should have two Results for 'lala' (in a simulation these two
# results would probably corresponds to 'lala' results with
# different simulation parameters)
self.assertEqual(len(self.simresults['lala']), 2)
self.assertEqual(self.simresults['lala'][0].get_result(), 13)
self.assertEqual(self.simresults['lala'][1].get_result(), 25)
# Test if an exception is thrown if we try to append result with a
# different type
result1_wrong = Result.create("lala", Result.RATIOTYPE, 25, 30)
with self.assertRaises(ValueError):
self.simresults.append_result(result1_wrong)
def test_append_all_results(self) -> None:
self.simresults.append_all_results(self.other_simresults)
# Note that self.simresults only has the 'lala' and 'lele' results.
# After we append the results in self.other_simresults
# self.simresults should have also the 'lili' result, but with only
# a single result for 'lili' and two results for both 'lala' and
# 'lele'..
self.assertEqual(set(self.simresults.get_result_names()),
{"lulu", "lala", "lele", "lili"})
self.assertEqual(len(self.simresults['lala']), 2)
self.assertEqual(len(self.simresults['lele']), 2)
self.assertEqual(len(self.simresults['lili']), 1)
self.assertEqual(len(self.simresults['lulu']), 2)
def test_merge_all_results(self) -> None:
# Note that even though there is a 'lili' result in
# self.other_simresults, only 'lala' and 'lele' will be
# merged. Also, self.other_simresults must have all the results in
# self.simresults otherwise there will be a KeyError.
self.simresults.merge_all_results(self.other_simresults)
self.assertEqual(self.simresults['lala'][-1].get_result(), 43)
self.assertEqual(self.simresults['lele'][-1].get_result(),
(11. + 4.) / (20. + 10.))
# One update from the 'lala' result in self.simresults and other
# from the 'lala' result in self.other_simresults
self.assertEqual(self.simresults['lala'][0].num_updates, 2)
# Two updates from the 'lele' result in self.simresults and other
# from the 'lele' result in self.other_simresults
self.assertEqual(self.simresults['lele'][0].num_updates, 3)
# Test if an empty SimulationResults object can merge with another
# SimulationResults object.
emptyresults = SimulationResults()
emptyresults.merge_all_results(self.simresults)
self.assertEqual(set(emptyresults.get_result_names()),
{'lala', 'lele', 'lulu'})
# xxxxx Test the merge with the num_skipped_reps result xxxxxxxxxxx
simresults1 = SimulationResults()
simresults2 = SimulationResults()
simresults1.add_new_result('name1', Result.SUMTYPE, 3)
simresults1.add_new_result('num_skipped_reps', Result.SUMTYPE, 3)
simresults2.add_new_result('name1', Result.SUMTYPE, 2)
simresults1.merge_all_results(simresults2)
self.assertEqual(set(simresults1.get_result_names()),
{'name1', 'num_skipped_reps'})
self.assertEqual(set(simresults2.get_result_names()), {'name1'})
simresults3 = SimulationResults()
simresults3.add_new_result('name1', Result.SUMTYPE, 4)
simresults3.merge_all_results(simresults1)
self.assertEqual(set(simresults3.get_result_names()),
{'name1', 'num_skipped_reps'})
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
def test_equal_and_not_equal_operators(self) -> None:
elapsed_time_result = Result.create('elapsed_time', Result.SUMTYPE, 30)
self.simresults.add_result(elapsed_time_result)
simresults = SimulationResults()
lala_result = Result('lala', Result.SUMTYPE)
lele_result = Result('lele', Result.RATIOTYPE)
lulu_result = Result('lulu', Result.CHOICETYPE, choice_num=6)
lala_result.update(13)
lele_result.update(8, 10)
lele_result.update(3, 10)
lulu_result.update(3)
lulu_result.update(1)
elapsed_time_result2 = Result.create('elapsed_time', Result.SUMTYPE,
20)
simresults.add_result(lala_result)
simresults.add_result(lele_result)
simresults.add_result(lulu_result)
simresults.add_result(elapsed_time_result2)
# Note that the elapsed_time result is different, but it is not
# accounted
self.assertTrue(self.simresults == simresults)
self.assertFalse(self.simresults != simresults)
# Let's change the parameters in the SimulationResults objects to
# see if it impacts equality
simresults.params.add('value', 10)
self.assertFalse(self.simresults == simresults)
self.assertTrue(self.simresults != simresults)
self.simresults.params.add('value', 10)
self.assertTrue(self.simresults == simresults)
self.assertFalse(self.simresults != simresults)
# Let's change one Result in one of them to see if it impacts
# equality
simresults['lala'][0].update(5)
self.assertFalse(self.simresults == simresults)
self.assertTrue(self.simresults != simresults)
self.simresults['lala'][0].update(5)
self.assertTrue(self.simresults == simresults)
self.assertFalse(self.simresults != simresults)
# Lets add a new result to one of them which is not in the other
lili_result = Result('lili', Result.SUMTYPE)
simresults.add_result(lili_result)
self.assertFalse(self.simresults == simresults)
self.assertTrue(self.simresults != simresults)
# Let's test with something of a different class
# noinspection PyTypeChecker
self.assertFalse(self.simresults == 20)
def test_get_result_values_list(self) -> None:
self.simresults.append_all_results(self.other_simresults)
self.assertEqual(self.simresults.get_result_values_list('lala'),
[13, 30])
self.assertEqual(self.simresults.get_result_values_list('lele'),
[0.55, 0.4])
lulu_list = self.simresults.get_result_values_list('lulu')
np.testing.assert_array_almost_equal(
lulu_list[0], np.array([0., 0.5, 0., 0.5, 0., 0.]))
np.testing.assert_array_almost_equal(
lulu_list[1], np.array([0.33333333, 0., 0., 0., 0., 0.66666667]))
# There is only one result for 'lili', which comes from
# self.other_simresults.
self.assertEqual(self.simresults.get_result_values_list('lili'),
['a string'])
def test_get_result_values_confidence_intervals(self) -> None:
simresults = SimulationResults()
simresults.params.add('P', [1, 2])
simresults.params.set_unpack_parameter('P')
result = Result('name', Result.RATIOTYPE, accumulate_values=True)
result_other = Result('name', Result.RATIOTYPE, accumulate_values=True)
result.update(3, 10)
result.update(7, 9)
result.update(2, 5)
result.update(3, 3)
result.update(7, 15)
result_other.update(13, 15)
result_other.update(15, 20)
result_other.update(4, 9)
simresults.add_result(result)
simresults.append_result(result_other)
P = 95 # Confidence level of 95%
list_of_confidence_intervals \
= simresults.get_result_values_confidence_intervals('name', P)
# Calculates the expected list of confidence intervals
expected_list_of_confidence_intervals \
= [i.get_confidence_interval(P) for i in simresults['name']]
# Test of they are equal
for a, b in zip(list_of_confidence_intervals,
expected_list_of_confidence_intervals):
np.testing.assert_array_almost_equal(a, b)
# xxxxxxxxxx Test for a subset of the parameters xxxxxxxxxxxxxxxxxx
c1 = simresults.get_result_values_confidence_intervals(
'name', P, fixed_params={'P': 1.0})
c2 = simresults.get_result_values_confidence_intervals(
'name', P, fixed_params={'P': 2.0})
np.testing.assert_array_almost_equal(
c1[0], expected_list_of_confidence_intervals[0])
np.testing.assert_array_almost_equal(
c2[0], expected_list_of_confidence_intervals[1])
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
def test_to_dict_from_dict(self) -> None:
# xxxxxxxxxx Test converting to a dictionary xxxxxxxxxxxxxxxxxxxxxx
simresults_dict = self.simresults._to_dict()
self.assertIsInstance(simresults_dict, dict)
# We will not test all individual dictionary keys here, since we
# will test later if we can recover the actual SimulationResults
# object from the dictionary and that should be enough.
# For now we only test in the individual Result objects in
# SimulationResults were converted to their own dictionary
# representations
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Test converting from a dictionary xxxxxxxxxxxxxxxxxxxx
simresults = SimulationResults._from_dict(simresults_dict)
self.assertEqual(self.simresults, simresults)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
def test_to_json_and_from_json(self) -> None:
# xxxxxxxxxx Test converting to and from a json xxxxxxxxxxxxxxxxxxx
# First test converting to json
simresults_json = self.simresults.to_json()
# This will raise an exception if encoded_params is not valid json
_ = json.loads(simresults_json)
# Now test converting from json
simresults = SimulationResults.from_json(simresults_json)
self.assertEqual(self.simresults, simresults)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
def test_save_to_and_load_from_file(self) -> None:
base_filename = 'results_({age})_({temperature})_({factor})'
base_pickle_filename = "{0}.pickle".format(base_filename)
base_json_filename = "{0}.json".format(base_filename)
# Set simulation parameters
self.simresults.params.add('factor', 0.5)
self.simresults.params.add('temperature', 50.5)
self.simresults.params.add('age', 3)
# xxxxx Add a result with accumulate set to True xxxxxxxxxxxxxxxxxx
result_acu = Result('name', Result.SUMTYPE, accumulate_values=True)
result_acu.update(13)
result_acu.update(30)
self.simresults.add_result(result_acu)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Save to the file and get the actual filename used to save the file
# Note that if we pass the base filename without extension a 'pickle'
# extension will be added
filename_pickle = self.simresults.save_to_file(base_filename)
filename_json = self.simresults.save_to_file(base_json_filename)
# Load from the file
simresults2 = SimulationResults.load_from_file(filename_pickle)
simresults3 = SimulationResults.load_from_file(filename_json)
# xxxxxxxxxx Test file saved and loaded from pickle file xxxxxxxxxx
self.assertEqual(simresults2.original_filename, base_pickle_filename)
self.assertEqual(len(self.simresults), len(simresults2))
self.assertEqual(set(self.simresults.get_result_names()),
set(simresults2.get_result_names()))
self.assertEqual(self.simresults['lala'][0].type_code,
simresults2['lala'][0].type_code)
self.assertEqual(self.simresults['lele'][0].type_code,
simresults2['lele'][0].type_code)
self.assertEqual(self.simresults['lulu'][0].type_code,
simresults2['lulu'][0].type_code)
self.assertAlmostEqual(
self.simresults['lala'][0].get_result(),
simresults2['lala'][0].get_result(),
)
self.assertAlmostEqual(
self.simresults['lele'][0].get_result(),
simresults2['lele'][0].get_result(),
)
np.testing.assert_array_almost_equal(
self.simresults['lulu'][0].get_result(),
simresults2['lulu'][0].get_result(),
)
self.assertAlmostEqual(
self.simresults['name'][0].get_result(),
simresults2['name'][0].get_result(),
)
# test if the parameters were also saved
self.assertEqual(self.simresults.params['age'],
simresults2.params['age'])
self.assertAlmostEqual(self.simresults.params['temperature'],
simresults2.params['temperature'])
self.assertAlmostEqual(self.simresults.params['factor'],
simresults2.params['factor'])
# Delete the where the results were saved
delete_file_if_possible(filename_pickle)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Test file saved and loaded from json file xxxxxxxxxxxx
# This will raise an exception if encoded_params is not valid json
with open(filename_json, 'r') as fid:
_ = json.load(fid)
self.assertEqual(simresults3.original_filename, base_json_filename)
self.assertEqual(len(self.simresults), len(simresults3))
self.assertEqual(set(self.simresults.get_result_names()),
set(simresults3.get_result_names()))
self.assertEqual(self.simresults['lala'][0].type_code,
simresults3['lala'][0].type_code)
self.assertEqual(self.simresults['lele'][0].type_code,
simresults3['lele'][0].type_code)
self.assertEqual(self.simresults['lulu'][0].type_code,
simresults3['lulu'][0].type_code)
self.assertAlmostEqual(
self.simresults['lala'][0].get_result(),
simresults3['lala'][0].get_result(),
)
self.assertAlmostEqual(
self.simresults['lele'][0].get_result(),
simresults3['lele'][0].get_result(),
)
np.testing.assert_array_almost_equal(
self.simresults['lulu'][0].get_result(),
simresults3['lulu'][0].get_result(),
)
self.assertAlmostEqual(
self.simresults['name'][0].get_result(),
simresults3['name'][0].get_result(),
)
# test if the parameters were also saved
self.assertEqual(self.simresults.params['age'],
simresults3.params['age'])
self.assertAlmostEqual(self.simresults.params['temperature'],
simresults3.params['temperature'])
self.assertAlmostEqual(self.simresults.params['factor'],
simresults3.params['factor'])
# Delete the where the results were saved
delete_file_if_possible(filename_json)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
def test_to_dataframe(self) -> None:
# If the pandas package is not installed, we will skip testing this
# method
if not _PANDAS_AVAILABLE: # pragma: nocover
self.skipTest("test_to_dataframe - Pandas is not installed")
# Create some dummy parameters (including two parameters set to be
# unpacked)
params = SimulationParameters()
params.add("extra", 2.3)
params.add("SNR", np.array([0, 3, 6, 9]))
params.add("bias", [1.2, 1.6])
params.set_unpack_parameter('SNR')
params.set_unpack_parameter('bias')
params.add("Name", "Some string")
sim_results = SimulationResults()
# Try converting an empty SimulationResults
df = sim_results.to_dataframe()
self.assertTrue(isinstance(df, DataFrame))
self.assertTrue(df.empty)
for p in params.get_unpacked_params_list():
extra = p['extra']
SNR = p['SNR']
bias = p['bias']
sim_results.append_result(
Result.create('res1', Result.SUMTYPE, extra * SNR + bias))
sim_results.append_result(
Result.create('res2', Result.SUMTYPE, bias * SNR + extra))
sim_results.set_parameters(params)
# When a simulation results is obtained from the simulation runner it
# will also have a "runner_reps" attribute that should be converted to the dtaframe
runned_reps = [2] * params.get_num_unpacked_variations()
sim_results.runned_reps = runned_reps
# Now lets convert this SimulationResults object to a pandas
# DataFrame
df = sim_results.to_dataframe()
# The DataFrame should have the same number of rows as the number
# of parameters variations
self.assertEqual(len(df), params.get_num_unpacked_variations())
# Test the parameters
expected_bias = [a['bias'] for a in params.get_unpacked_params_list()]
expected_SNR = [a['SNR'] for a in params.get_unpacked_params_list()]
expected_name = [a['Name'] for a in params.get_unpacked_params_list()]
expected_extra \
= [a['extra'] for a in params.get_unpacked_params_list()]
np.testing.assert_array_equal(df.bias, expected_bias)
np.testing.assert_array_equal(df.SNR, expected_SNR)
np.testing.assert_array_equal(df.Name, expected_name)
np.testing.assert_array_equal(df.extra, expected_extra)
np.testing.assert_array_equal(df.runned_reps, runned_reps)
# Test the results
for index, p in enumerate(params.get_unpacked_params_list()):
extra = p['extra']
SNR = p['SNR']
bias = p['bias']
expected_res1 = extra * SNR + bias
expected_res2 = bias * SNR + extra
self.assertAlmostEqual(expected_res1, df.res1[index])
self.assertAlmostEqual(expected_res2, df.res2[index])
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxx Runner Module xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# This function is used in test methods for the SimulationRunner class
def _delete_pickle_files():
"""
Delete all files with a '.pickle' extension in the current folder.
"""
files = glob.glob('*.pickle')
for f in files:
os.remove(f)
try:
# Remove files in the partial_results folder
files = glob.glob('./partial_results/*.pickle')
for f in files:
os.remove(f)
os.rmdir('./partial_results')
except OSError: # pragma: nocover
pass
def _delete_progressbar_output_files(): # pragma: no cover
"""
Delete all the files with *results*.txt names in the current folder.
"""
progressbar_files = glob.glob('*results*.txt')
for f in progressbar_files: # pragma: no cover
try:
os.remove(f)
except OSError: # pragma: nocover
pass
# Define a _DummyRunner class for the testing the simulate and
# simulate_in_parallel methods in the SimulationRunner class.
class _DummyRunner(SimulationRunner):
def __init__(self):
super().__init__(read_command_line_args=False)
# Set the progress bar message to None to avoid print the
# progressbar in these testes.
self.rep_max = 2
self.update_progress_function_style = None
# Now we add a dummy parameter to our runner object
self.params.add('SNR', np.array([0., 5., 10., 15., 20.]))
self.params.add('bias', 1.3)
self.params.add('extra', np.array([2.2, 4.1]))
self.params.set_unpack_parameter('SNR')
self.params.set_unpack_parameter('extra')
self.delete_partial_results_bool = True
@staticmethod
def calc_result(SNR, bias, extra):
value = 1.2 * SNR + bias + extra
return value
def _run_simulation(self, current_params):
SNR = current_params['SNR']
bias = current_params['bias']
extra = current_params['extra']
sim_results = SimulationResults()
value = self.calc_result(SNR, bias, extra)
# The correct result will be SNR * 1.2 + 1.3 + extra
sim_results.add_new_result('lala', Result.RATIOTYPE, value, 1)
return sim_results
class _DummyRunnerRandom(SimulationRunner): # pragma: no cover
def __init__(self):
super().__init__(read_command_line_args=False)
# Set the progress bar message to None to avoid print the
# progressbar in these testes.
self.rep_max = 2
self.update_progress_function_style = None
# Now we add a dummy parameter to our runner object
self.params.add('P', np.array([2., 2., 2., 2., 2.]))
self.params.set_unpack_parameter('P')
self.delete_partial_results_bool = True
self.rs = np.random.RandomState()
self.rs2 = np.random.RandomState()
def _on_simulate_current_params_start(self, current_params):
# Ideally we should re-seed any random number sources stored in a
# SimulationRunner object. However, for testing purposes we will
# only re-seed self.rs2 here.
self.rs2.seed()
def _run_simulation(self, current_params):
P = current_params['P']
sim_results = SimulationResults()
# This will have a different value for each simulation parameters
random_value = np.random.random_sample()
value = 1.2 * P + random_value
sim_results.add_new_result('result1', Result.RATIOTYPE, value, 1)
random_value2 = self.rs.rand()
value2 = 1.2 * P + random_value2
sim_results.add_new_result('result2', Result.RATIOTYPE, value2, 1)
# self.rs2.seed()
random_value3 = self.rs2.rand()
value3 = 1.2 * P + random_value3
sim_results.add_new_result('result3', Result.RATIOTYPE, value3, 1)
return sim_results
# Define a _DummyRunnerWithSkip class for the testing the simulate when a
# SkipThisOne exception is raised in the implemented _run_simulation
# method.
class _DummyRunnerWithSkip(SimulationRunner):
def __init__(self):
super().__init__(read_command_line_args=False)
# This is used only for testing purposes. You would not have this
# attribute in derives classes from SimulationRunner
self._num_skipped = -1
self.rep_max = 5
self.update_progress_function_style = None
# Now we add a dummy parameter to our runner object
self.params.add('SNR', np.array([0., 5., 10., 15., 20.]))
self.params.add('bias', 1.3)
self.params.add('extra', np.array([2.2, 4.1]))
self.params.set_unpack_parameter('SNR')
self.params.set_unpack_parameter('extra')
self.delete_partial_results_bool = True
@staticmethod
def calc_result(SNR, bias, extra):
value = 1.2 * SNR + bias + extra
return value
def _run_simulation(self, current_params):
# In practice, you would raise a SkipThisOne exception when some
# problem occurs, such as a bad channel realization, singular
# matrix in some algorithm, etc. Here will will raise this
# exception in the first two times the _run_simulation method is
# called for testing purposes.
if self._num_skipped < 2:
self._num_skipped += 1
if self._num_skipped > 0:
raise SkipThisOne('Skipping this one')
else:
pass # pragma: nocover
SNR = current_params['SNR']
bias = current_params['bias']
extra = current_params['extra']
sim_results = SimulationResults()
value = self.calc_result(SNR, bias, extra)
# The correct result will be SNR * 1.2 + 1.3 + extra
sim_results.add_new_result('lala', Result.RATIOTYPE, value, 1)
return sim_results
# noinspection PyUnboundLocalVariable
class SimulationRunnerTestCase(unittest.TestCase):
"""
Unit-tests for the SimulationRunner class in the runner module.
"""
def setUp(self) -> None:
self.runner = SimulationRunner(read_command_line_args=False)
# Test if the SimulationRunner sets a few default attributes in its init
# method.
def test_default_values(self) -> None:
# Note that we are also testing the elapsed_time and runned_reps
# properties, which should just return these attributes.
self.assertEqual(self.runner.rep_max, 1)
self.assertEqual(self.runner.elapsed_time, "0.00s")
self.assertEqual(self.runner.runned_reps, [])
self.assertTrue(isinstance(self.runner.params, SimulationParameters))
self.assertTrue(isinstance(self.runner.results, SimulationResults))
self.assertEqual(self.runner.progressbar_message, "Progress")
self.assertEqual(self.runner.update_progress_function_style, 'text2')
def test_not_implemented_methods(self) -> None:
# self.assertRaises(NotImplementedError,
# self.S1._get_vertex_positions)
with self.assertRaises(NotImplementedError):
# noinspection PyTypeChecker
self.runner._run_simulation(None)
def test_keep_going(self) -> None:
# the _keep_going method in the SimulationRunner class should
# return True
# noinspection PyTypeChecker
self.assertTrue(self.runner._keep_going(None, None, None))
def test_set_results_filename(self) -> None:
dummyrunner = _DummyRunner()
dummyrunner.set_results_filename()
self.assertIsNone(dummyrunner.results_filename)
dummyrunner.set_results_filename("some_name_{bias}_{extra}")
# Note that this line would be unnecessary if we had call the
# 'simulate' method of dummyrunner.
dummyrunner.results.set_parameters(dummyrunner.params)
self.assertEqual("some_name_1.3_[2.2,4.1].pickle",
dummyrunner.results_filename)
# xxxxxxxxxx Test setting file name with extension xxxxxxxxxxxxxxxx
dummyrunner2 = _DummyRunner()
dummyrunner2.set_results_filename()
self.assertIsNone(dummyrunner2.results_filename)
dummyrunner2.set_results_filename("some_name_{bias}_{extra}.pickle")
# Note that this line would be unnecessary if we had call the
# 'simulate' method of dummyrunner.
dummyrunner2.results.set_parameters(dummyrunner.params)
self.assertEqual("some_name_1.3_[2.2,4.1].pickle",
dummyrunner2.results_filename)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
def test_simulate(self) -> None:
self.runner.progressbar_message = "Simulating"
self.assertEqual(self.runner.progressbar_message, "Simulating")
self.assertEqual(self.runner.partial_results_folder, 'partial_results')
self.runner.partial_results_folder = "partial_test_results"
# from tests.simulations_package_test import _DummyRunner
dummyrunner = _DummyRunner()
# xxxxxxxxxx Set the name of the results file xxxxxxxxxxxxxxxxxxxxx
filename = 'dummyrunner_results_bias_{bias}'
dummyrunner.set_results_filename(filename)
# We will set the progress_output_type to 'file'
# This will make the progressbar print to a file, instead of stdout
dummyrunner.progress_output_type = 'file' # Default is 'screen'
self.assertEqual(dummyrunner.progress_output_type, 'file')
# Note that progress_output_type can only be 'screen' of 'file' and any
# other value raises an exception without changing the value
with self.assertRaises(RuntimeError):
dummyrunner.progress_output_type = 'invalid_value' # type: ignore
self.assertEqual(dummyrunner.progress_output_type, 'file')
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Perform a simulation of a single params variation xxxx
delete_file_if_possible(
"partial_results/dummyrunner_results_bias_1.3_unpack_03.pickle")
partial_file = Path(
"partial_results/dummyrunner_results_bias_1.3_unpack_03.pickle")
self.assertFalse(partial_file.exists())
dummyrunner.simulate(param_variation_index=3)
self.assertTrue(partial_file.exists())
delete_file_if_possible(
"partial_results/dummyrunner_results_bias_1.3_unpack_03.pickle")
# The results in the SImulationRunner is still empty, since only partial
# results were saved to a file
self.assertEqual(len(dummyrunner.results), 0)
dummyrunner.clear()
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Perform the simulation xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# The results will be the SNR values multiplied by 1.2. plus the
# bias parameter
self.assertEqual(dummyrunner.runned_reps, [])
dummyrunner.simulate()
self.assertGreater(dummyrunner._simulation_tracking._elapsed_time, 0.0)
self.assertEqual(dummyrunner.runned_reps, [dummyrunner.rep_max] *
dummyrunner.params.get_num_unpacked_variations())
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Perform the tests xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
results_extra_1 = dummyrunner.results.get_result_values_list(
'lala', {'extra': 2.2})
expected_results_extra_1 = [3.5, 9.5, 15.5, 21.5, 27.5]
np.testing.assert_array_almost_equal(results_extra_1,
expected_results_extra_1)
results_extra_2 = dummyrunner.results.get_result_values_list(
'lala', {'extra': 4.1})
expected_results_extra_2 = [5.4, 11.4, 17.4, 23.4, 29.4]
np.testing.assert_array_almost_equal(results_extra_2,
expected_results_extra_2)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Test if the results were saved correctly xxxxxxxxxxxxx
sim_results = SimulationResults.load_from_file(
dummyrunner.results_filename)
self.assertEqual(sim_results, dummyrunner.results)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Repeat the test xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Now we do not set the results filename
dummyrunner2 = _DummyRunner()
dummyrunner2.simulate()
self.assertGreater(dummyrunner2._simulation_tracking._elapsed_time,
0.0)
self.assertEqual(dummyrunner.results, dummyrunner2.results)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Repeat the test with wrong partial results xxxxxxxxxxx
# First we run a usual simulation and keep the partial results
dummyrunner3 = _DummyRunner()
dummyrunner3.set_results_filename('dummyrunner3_results')
self.assertEqual(dummyrunner3.delete_partial_results_bool, True)
dummyrunner3.delete_partial_results_bool = False
self.assertEqual(dummyrunner3.delete_partial_results_bool, False)
dummyrunner3.simulate()
self.assertGreater(dummyrunner3._simulation_tracking._elapsed_time,
0.0)
# Now we change the bias parameter
dummyrunner3.params.add('bias', 1.5)
# If we run a simulation with different parameters it will try to
# load the partial results with wrong parameters and an exception
# should be raised
with self.assertRaises(ValueError):
dummyrunner3.simulate()
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Repeat the test loading the partial results xxxxxxxxxx
dummyrunner4 = _DummyRunner()
dummyrunner4.set_results_filename('dummyrunner3_results')
dummyrunner4.delete_partial_results_bool = True
dummyrunner4.simulate()
self.assertGreater(dummyrunner4._simulation_tracking._elapsed_time,
0.0)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Delete the pickle files in the same folder
_delete_pickle_files()
def test_simulate_with_param_variation_index(self) -> None:
# Test the "simulate" method when the param_variation_index
# argument is specified.
# from tests.simulations_package_test import _DummyRunner
dummyrunner = _DummyRunner()
# Try to simulate for a given param_variation_index before setting
# the filename. This should raise an exception.
with self.assertRaises(RuntimeError):
dummyrunner.simulate(3)
# xxxxxxxxxx Set the name of the results file xxxxxxxxxxxxxxxxxxxxx
filename = 'dummyrunner_results_bias_{bias}'
dummyrunner.set_results_filename(filename)
# This will make the progressbar print to a file, instead of stdout
dummyrunner.progress_output_type = 'file' # Default is 'screen'
# Now we perform the simulation
dummyrunner.simulate(param_variation_index=4)
# Note that `dummyrunner.runned_reps` is not a list, but an integer
self.assertEqual(dummyrunner.runned_reps, 2)
pr = SimulationResults.load_from_file(
'partial_results/dummyrunner_results_bias_1.3_unpack_04.pickle')
# Get the parameters from the loded result
bias = pr.params['bias']
snr = pr.params['SNR']
extra = pr.params['extra']
# Calculate the expected value
expected_value = _DummyRunner.calc_result(snr, bias, extra)
self.assertEqual(len(pr['lala']), 1)
self.assertAlmostEqual(pr['lala'][0].get_result(), expected_value)
_delete_pickle_files()
# This test method is normally skipped, unless you have started an
# IPython cluster with a "tests" profile so that you have at least one
# engine running.
def test_simulate_in_parallel(self) -> None: # pragma: no cover
if not _IPYTHON_AVAILABLE:
self.skipTest(
"test_simulate_in_parallel - IPython is not installed")
try:
from ipyparallel import Client
cl = Client(profile=b"tests", timeout=1.0)
dview = cl.direct_view()
# Reset the engines so that we don't have variables there from
# last computations
dview.execute('%reset')
dview.execute('import sys')
# We use block=True to ensure that all engines have modified
# their path to include the folder with the simulator before we
# create the load lanced view in the following.
dview.execute('sys.path.append("{0}")'.format(os.getcwd()),
block=True)
lview = cl.load_balanced_view()
if len(lview) == 0:
self.skipTest("test_simulate_in_parallel - "
"At least one IPython engine must be running.")
except IOError:
self.skipTest(
"test_simulate_in_parallel - "
"The IPython engines were not found. ('tests' profile)")
# noinspection PyUnresolvedReferences
from tests.simulations_package_test import _DummyRunner
dview.execute('import simulations_package_test', block=True)
sim_runner = _DummyRunner()
sim_runner.progressbar_message = 'bla'
# runner.update_progress_function_style = 'text1'
# xxxxxxxxxx Set the name of the results file xxxxxxxxxxxxxxxxxxxxx
filename = 'runner_results_bias_{bias}.pickle'
sim_runner.set_results_filename(filename)
# This will make the progressbar print to a file, instead of stdout
sim_runner.progress_output_type = 'file' # Default is 'screen'
# Remove old file from previous test run
_delete_pickle_files()
# Delete all the *results*.txt files (created by the progressbar)
_delete_progressbar_output_files()
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
self.assertEqual(sim_runner.runned_reps, [])
sim_runner.simulate_in_parallel(lview)
self.assertGreater(sim_runner._simulation_tracking._elapsed_time, 0.0)
self.assertEqual(sim_runner.runned_reps, [sim_runner.rep_max] *
sim_runner.params.get_num_unpacked_variations())
results_extra_1 = sim_runner.results.get_result_values_list(
'lala', {'extra': 2.2})
expected_results_extra_1 = [3.5, 9.5, 15.5, 21.5, 27.5]
np.testing.assert_array_almost_equal(results_extra_1,
expected_results_extra_1)
results_extra_2 = sim_runner.results.get_result_values_list(
'lala', {'extra': 4.1})
expected_results_extra_2 = [5.4, 11.4, 17.4, 23.4, 29.4]
np.testing.assert_array_almost_equal(results_extra_2,
expected_results_extra_2)
# xxxxxxxxxx Test if the results were saved correctly xxxxxxxxxxxxx
sim_results = SimulationResults.load_from_file(
sim_runner.results_filename)
self.assertEqual(sim_results, sim_runner.results)
_delete_pickle_files()
_delete_progressbar_output_files()
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Repeat the test xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Now we do not set the results filename
runner2 = _DummyRunner()
runner2.simulate_in_parallel(lview)
self.assertEqual(sim_results, runner2.results)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Repeat the test with wrong partial results xxxxxxxxxxx
runner3 = _DummyRunner()
runner3.set_results_filename('runner3_results')
runner3.delete_partial_results_bool = False
runner3.simulate_in_parallel(lview)
# Now we change the bias parameter
runner3.params.add('bias', 1.5)
# If we run a simulation with different parameters it will try to
# load the partial results with wrong parameters and an exception
# should be raised. The raised Exception is an ValueError, but
# since they are raised in the IPython engines, IPython itself will
# raise a CompositeError exception.
with self.assertRaises(CompositeError):
runner3.simulate_in_parallel(lview)
# Delete all *.pickle files in the same folder
_delete_pickle_files()
# Delete all the *results*.txt files (created by the progressbar)
_delete_progressbar_output_files()
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# This test method is normally skipped, unless you have started an
# IPython cluster with a "tests" profile so that you have at least one
# engine running.
def test_simulate_in_parallel_with_random_values( # pragma: no cover
self) -> None:
if not _IPYTHON_AVAILABLE:
self.skipTest("test_simulate_in_parallel_with_random_values - "
"IPython is not installed")
try:
from ipyparallel import Client
cl = Client(profile=b"tests", timeout=1.0)
dview = cl.direct_view()
# Reset the engines so that we don't have variables there from
# last computations
dview.execute('%reset')
dview.execute('import sys')
# We use block=True to ensure that all engines have modified
# their path to include the folder with the simulator before we
# create the load lanced view in the following.
dview.execute('sys.path.append("{0}")'.format(os.getcwd()),
block=True)
lview = cl.load_balanced_view()
if len(lview) == 0: # pragma: no cover
self.skipTest("At least one IPython engine must be running.")
except IOError: # pragma: no cover
self.skipTest("test_simulate_in_parallel_with_random_values - "
"The IPython engines were not found. ('tests' "
"profile)")
#
#
# This test is intended to clarify some special care that must be
# taken regarding random sources when using the
# simulate_in_parallel method.
#
# The _DummyRunnerRandom class will generate three results which
# are generated as a sum of one element of the 'P' parameter and a
# random value. The 'P' parameter is an array with 5 elements, all
# of them equal to 2.0. That means that if we didn't have a random
# part all elements in the returned results would be equal.
# noinspection PyUnresolvedReferences
from tests.simulations_package_test import _DummyRunnerRandom
dummyrunnerrandom = _DummyRunnerRandom()
dummyrunnerrandom.simulate_in_parallel(lview)
# For the result1 the random part is generated by calling the
# numpy.random.rand function inside _run_simulation. Because we are
# using the module level function 'rand' we are using the global
# RandomState object in numpy. This global RandomState object will
# be naturally different in each ipython engine and thus each
# element in result1 will be different.
result1 = dummyrunnerrandom.results.get_result_values_list('result1')
self.assertNotAlmostEqual(result1[0], result1[1])
self.assertNotAlmostEqual(result1[1], result1[2])
self.assertNotAlmostEqual(result1[2], result1[3])
self.assertNotAlmostEqual(result1[3], result1[4])
self.assertEqual(len(set(result1)), 5) # 5 different elements
# print; print result1
# For result2 the random part is generated by calling the rand
# method of a RandomState object created in the __init__ method of
# the _DummyRunnerRandom object. The problem is that each ipython
# engine will receive a copy of the _DummyRunnerRandom object and
# thus of the RandomState object. That means that for each value of
# the parameter 'P' the same random value will be generated and
# thus 'result2' will have 5 equal elements.
result2 = dummyrunnerrandom.results.get_result_values_list('result2')
self.assertAlmostEqual(result2[0], result2[1])
self.assertAlmostEqual(result2[1], result2[2])
self.assertAlmostEqual(result2[2], result2[3])
self.assertAlmostEqual(result2[3], result2[4])
self.assertEqual(len(set(result2)), 1) # 5 equal elements
# print; print result2
# For result3 the random part is generated by calling the rand
# method of a RandomState object created in the __init__ method of
# the _DummyRunnerRandom object. However, in the
# _on_simulate_current_params_start method we re-seed this
# RandomState object. Since _on_simulate_current_params_start is
# called once for each different value of the 'P' parameter, then
# the random value will be different for each value in 'P' and thus
# result3 will have 5 different values.
result3 = dummyrunnerrandom.results.get_result_values_list('result3')
self.assertNotAlmostEqual(result3[0], result3[1])
self.assertNotAlmostEqual(result3[1], result3[2])
self.assertNotAlmostEqual(result3[2], result3[3])
self.assertNotAlmostEqual(result3[3], result3[4])
self.assertEqual(len(set(result3)), 5) # 5 different elements
# Test the simulate method when the SkipThisOne exception is raised in
# the _run_simulation method.
def test_simulate_with_skipthisone(self) -> None:
# from tests.simulations_package_test import _DummyRunnerWithSkip
dummyrunner = _DummyRunnerWithSkip()
# xxxxxxxxxx Set the name of the results file xxxxxxxxxxxxxxxxxxxxx
filename = 'dummyrunnerwithskip_results_bias_{bias}'
dummyrunner.set_results_filename(filename)
# This will make the progressbar print to a file, instead of stdout
dummyrunner.progress_output_type = 'file' # Default is 'screen'
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Perform the simulation xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# The results will be the SNR values multiplied by 1.2. plus the
# bias parameter
dummyrunner.simulate()
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Perform the tests xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
results_extra_1 = dummyrunner.results.get_result_values_list(
'lala', {'extra': 2.2})
expected_results_extra_1 = [3.5, 9.5, 15.5, 21.5, 27.5]
np.testing.assert_array_almost_equal(results_extra_1,
expected_results_extra_1)
results_extra_2 = dummyrunner.results.get_result_values_list(
'lala', {'extra': 4.1})
expected_results_extra_2 = [5.4, 11.4, 17.4, 23.4, 29.4]
np.testing.assert_array_almost_equal(results_extra_2,
expected_results_extra_2)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Test if the results were saved correctly xxxxxxxxxxxxx
sim_results = SimulationResults.load_from_file(
dummyrunner.results_filename)
self.assertEqual(sim_results, dummyrunner.results)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # xxxxxxxxxx Repeat the test xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # Now we do not set the results filename
# dummyrunner2 = _DummyRunner()
# dummyrunner2.simulate()
# self.assertEqual(dummyrunner.results, dummyrunner2.results)
# # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # xxxxxxxxxx Repeat the test with wrong partial results xxxxxxxxxxx
# # First we run a usual simulation and keep the partial results
# dummyrunner3 = _DummyRunner()
# dummyrunner3.set_results_filename('dummyrunner3_results')
# dummyrunner3.delete_partial_results_bool = False
# dummyrunner3.simulate()
# # Now we change the bias parameter
# dummyrunner3.params.add('bias', 1.5)
# # If we run a simulation with different parameters it will try to
# # load the partial results with wrong parameters and an exception
# # should be raised
# with self.assertRaises(ValueError):
# dummyrunner3.simulate()
# # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # xxxxxxxxxx Repeat the test loading the partial results xxxxxxxxxx
# dummyrunner4 = _DummyRunner()
# dummyrunner4.set_results_filename('dummyrunner3_results')
# dummyrunner4.delete_partial_results_bool = True
# dummyrunner4.simulate()
# # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Delete the pickle files in the same folder
_delete_pickle_files()
# xxxxxxxxxx Doctests xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
if __name__ == "__main__":
unittest.main()
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
| gpl-2.0 |
Jimmy-Morzaria/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
DentonW/Ps-H-Scattering | General Code/Python Scripts/Extrapolation.py | 1 | 15927 | #!/usr/bin/python
#TODO: Add checks for whether files are good
#TODO: Make relative difference function
import sys, scipy, pylab
import numpy as np
from math import *
import matplotlib.pyplot as plt
from xml.dom.minidom import parse, parseString
from xml.dom import minidom
def NumTermsOmega(omega): # Return the number of terms for a given omega
"""Uses combination with repetition to determine the number of terms for a given omega"""
f = factorial
k = 6
omega = omega + 1
n = f(omega+k-1) / (f(k) * f(omega-1))
return int(n)
def FindTerms(FileName, NumTerms):
"""Reorders the first NumTerms of the output of Todd program to find omega breakpoints"""
f = open(FileName, 'r')
# Get the value of omega
Omega = int(f.readline().split()[1])
print "Omega =", Omega
# Skip these lines
for i in range(3):
f.readline()
Terms = []
for line in f:
s = line.split()
if len(s) == 0:
break
if s[0].isdigit():
Terms.append(int(s[0]))
f.close()
print NumTerms, len(Terms)
if NumTerms > len(Terms):
print("Requesting more terms than are available in file...exiting.")
exit()
print "Number of terms in file", FileName, ": ", len(Terms)
print "Number of terms to use:", str(NumTerms)
print
TermsSub = Terms[0:NumTerms]
TermsSub.sort()
# Create a list of numbers of terms for the full set for omega = 1 through Omega
FoundTerms = []
OmegaTerms = []
for i in range(Omega+1):
OmegaTerms.append(NumTermsOmega(i))
for i in range(Omega+1):
for j in range(len(TermsSub)):
if TermsSub[j] == OmegaTerms[i]:
print "Found", OmegaTerms[i], "at position", j+1
FoundTerms = FoundTerms + [j+1]
break
if TermsSub[j] > OmegaTerms[i]:
#print "Found next term past", OmegaTerms[i], "at position", j+1
#FoundTerms = FoundTerms + [j+1]
print "Found term before", OmegaTerms[i], "at position", j
FoundTerms = FoundTerms + [j]
break
if TermsSub[len(TermsSub)-1] != OmegaTerms[Omega]:
print "Last term at", len(TermsSub), "is less than", OmegaTerms[Omega]
FoundTerms = FoundTerms + [len(TermsSub)]
# Just here to put some extra space after running
print
return FoundTerms
def Extrapolate(Phases, Omega, OmegaPower, LowerOmega):
"""Fits the data to a straight line use SciPy's polyfit"""
xdata = range(LowerOmega, Omega+1)
xdata[:] = [x**OmegaPower for x in xdata]
ydata = []
for i in range(LowerOmega, Omega+1):
ydata = ydata + [tan(Phases[i])]
fit = scipy.polyfit(xdata, ydata, 1, None, True)
polycoeffs = fit[0]
residuals = fit[1][0]
ExtrapData = [polycoeffs, residuals, xdata, ydata]
return ExtrapData
def ExtrapolatePlot(Phases, Omega, OmegaPower, LowerOmega):
"""Plots the fitted line for the extrapolation"""
ExtrapData = Extrapolate(Phases, Omega, OmegaPower, LowerOmega)
yfit = scipy.polyval(ExtrapData[0], ExtrapData[2])
print yfit
yfit = np.append(yfit, ExtrapData[0][1])
print yfit
p1 = plt.plot(ExtrapData[2], ExtrapData[3], 'k.')
ExtrapData[2].append(0.0)
p2 = plt.plot(ExtrapData[2], yfit, 'r-')
print ExtrapData[2]
print ExtrapData[3]
plt.show()
return
def ToBool(s):
if (s.lower() == 'true'):
return True
return False
def ReadXMLData(xmldoc, tag):
""" Helper function for ReadPhaseShifts """
itemlist = xmldoc.getElementsByTagName(tag)
data = []
for s in itemlist:
data.append(str(s.childNodes[0].nodeValue))
if len(data) > 1:
print "More than one set found for ", tag
if data == []:
return None
return data[0]
def ReadPhaseShifts(Filename, FoundTerms, NumTests):
""" Reads the complete list of phase shifts from a given phase file and returns a 2D array. """
xmldoc = minidom.parse(Filename) # Read the XML file
shortfile = ReadXMLData(xmldoc, 'shortfile')
longfile = ReadXMLData(xmldoc, 'longfile')
energyfile = ReadXMLData(xmldoc, 'energyfile')
lvalue = int(ReadXMLData(xmldoc, 'lvalue'))
numterms = int(ReadXMLData(xmldoc, 'numterms'))
numsets = int(ReadXMLData(xmldoc, 'numsets'))
shielding = ReadXMLData(xmldoc, 'shielding')
#if shielding == None: # Not found in the input file
# shielding = 2*lvalue + 1 #@TODO: Is this a valid assumption?
#else:
# shielding = int(shielding)
if shielding != None:
shielding = int(shielding)
explambda = ReadXMLData(xmldoc, 'lambda')
# Read in nonlinear parameters
#@TODO: Handle multiple sets
alpha = float(ReadXMLData(xmldoc, 'alpha'))
beta = float(ReadXMLData(xmldoc, 'beta'))
gamma = float(ReadXMLData(xmldoc, 'gamma'))
kappa = float(ReadXMLData(xmldoc, 'kappa'))
mu = float(ReadXMLData(xmldoc, 'mu'))
ordering = ReadXMLData(xmldoc, 'ordering')
# Boolean values
paired = ReadXMLData(xmldoc, 'paired')
reorder = ReadXMLData(xmldoc, 'reorder')
paired = ToBool(paired)
reorder = ToBool(reorder)
# Read in the phase shift data
data = str(ReadXMLData(xmldoc, 'data'))
data = data.split('\n')
data = data[1:len(data)-1] # First and last entries are blanks from the newlines
if len(data) != numterms+1: # Include the +1 for the 0th entry
return None
phases = []
for n,d in enumerate(data):
if n not in FoundTerms:
continue
line = d.split()
if n != int(line[0]):
print "Phase shift file indices do not match!"
return None
if len(line) != NumTests+1:
print "Missing phase shift data on line " + str(n)
return None
line = [float(i) for i in line[1:]]
phases.append(line)
return phases
# def GetPhaseShifts(f, FoundTerms, TotalTerms, NumTests):
# """Reads phase shifts at specified terms"""
# Omega = len(FoundTerms)-1
#
# for i in range(3):
# f.readline()
#
# PhaseShifts = range(NumTests)
# for i in range(NumTests):
# PhaseShifts[i] = []
# j = 0 # Corresponds to Omega = 0
#
# for i in range(1,FoundTerms[Omega]+1): # Assuming that the last term is the highest for Omega.
# #@TODO: Check for end of file somehow?
# line = f.readline()
# if line[0] == '0':
# line = f.readline()
# s = line.split()
# if (len(s) == 0):
# print " "
# print "Error reading phase shifts: line length of 0"
# exit()
# if (len(s) < NumTests):
# print " "
# print "Error reading phase shifts: line length of " + str(len(s)) + " < " + str(NumTests)
# exit()
#
# if i == FoundTerms[j]:
# j = j + 1
# if j > Omega+1:
# print "Internal error reading phase shifts" # This shouldn't happen.
# return []
# for k in range(NumTests):
# #PhaseShifts[k+1] = PhaseShifts[k+1] + [float(s[k+1])]
# PhaseShifts[k].append(float(s[k+1]))
#
# # Skip rest of terms if we are not using them all
# print "Skipping " + str(TotalTerms-FoundTerms[Omega]+1) + " terms"
# for i in range(1,TotalTerms-FoundTerms[Omega]+1):
# f.readline()
#
# return PhaseShifts
#
# Main function follows
#
# These are hardcoded right now, but we could probably write something to read them in later.
# 109 of these! #@TODO: Could also just read from file and match up, but that will probably be difficult.
Headings = [ "Kohn", "Inverse Kohn", "Complex Kohn (S)", "Complex Kohn (T)", "Gen Kohn tau = 0.0", "Gen Kohn tau = 0.1", "Gen Kohn tau = 0.2", "Gen Kohn tau = 0.3",
"Gen Kohn tau = 0.4", "Gen Kohn tau = 0.5", "Gen Kohn tau = 0.6", "Gen Kohn tau = 0.7", "Gen Kohn tau = pi/4", "Gen Kohn tau = 0.8", "Gen Kohn tau = 0.9",
"Gen Kohn tau = 1.0", "Gen Kohn tau = 1.1", "Gen Kohn tau = 1.2", "Gen Kohn tau = 1.3", "Gen Kohn tau = 1.4", "Gen Kohn tau = 1.5", "Gen Kohn tau = pi/2",
"Gen Kohn tau = 1.6", "Gen Kohn tau = 1.7", "Gen Kohn tau = 1.8", "Gen Kohn tau = 1.9", "Gen Kohn tau = 2.0", "Gen Kohn tau = 2.1", "Gen Kohn tau = 2.2",
"Gen Kohn tau = 2.3", "Gen Kohn tau = 3*pi/4", "Gen Kohn tau = 2.4", "Gen Kohn tau = 2.5", "Gen Kohn tau = 2.6", "Gen Kohn tau = 2.7", "Gen Kohn tau = 2.8",
"Gen Kohn tau = 2.9", "Gen Kohn tau = 3.0", "Gen Kohn tau = pi", "Gen T Kohn tau = 0.0", "Gen T Kohn tau = 0.1", "Gen T Kohn tau = 0.2", "Gen T Kohn tau = 0.3",
"Gen T Kohn tau = 0.4", "Gen T Kohn tau = 0.5", "Gen T Kohn tau = 0.6", "Gen T Kohn tau = 0.7", "Gen T Kohn tau = pi/4", "Gen T Kohn tau = 0.8",
"Gen T Kohn tau = 0.9", "Gen T Kohn tau = 1.0", "Gen T Kohn tau = 1.1", "Gen T Kohn tau = 1.2", "Gen T Kohn tau = 1.3", "Gen T Kohn tau = 1.4",
"Gen T Kohn tau = 1.5", "Gen T Kohn tau = pi/2", "Gen T Kohn tau = 1.6", "Gen T Kohn tau = 1.7", "Gen T Kohn tau = 1.8", "Gen T Kohn tau = 1.9",
"Gen T Kohn tau = 2.0", "Gen T Kohn tau = 2.1", "Gen T Kohn tau = 2.2", "Gen T Kohn tau = 2.3", "Gen T Kohn tau = 3*pi/4", "Gen T Kohn tau = 2.4",
"Gen T Kohn tau = 2.5", "Gen T Kohn tau = 2.6", "Gen T Kohn tau = 2.7", "Gen T Kohn tau = 2.8", "Gen T Kohn tau = 2.9", "Gen T Kohn tau = 3.0",
"Gen T Kohn tau = pi", "Gen S Kohn tau = 0.0", "Gen S Kohn tau = 0.1", "Gen S Kohn tau = 0.2", "Gen S Kohn tau = 0.3", "Gen S Kohn tau = 0.4",
"Gen S Kohn tau = 0.5", "Gen S Kohn tau = 0.6", "Gen S Kohn tau = 0.7", "Gen S Kohn tau = pi/4", "Gen S Kohn tau = 0.8", "Gen S Kohn tau = 0.9",
"Gen S Kohn tau = 1.0", "Gen S Kohn tau = 1.1", "Gen S Kohn tau = 1.2", "Gen S Kohn tau = 1.3", "Gen S Kohn tau = 1.4", "Gen S Kohn tau = 1.5",
"Gen S Kohn tau = pi/2", "Gen S Kohn tau = 1.6", "Gen S Kohn tau = 1.7", "Gen S Kohn tau = 1.8", "Gen S Kohn tau = 1.9", "Gen S Kohn tau = 2.0",
"Gen S Kohn tau = 2.1", "Gen S Kohn tau = 2.2", "Gen S Kohn tau = 2.3", "Gen S Kohn tau = 3*pi/4", "Gen S Kohn tau = 2.4", "Gen S Kohn tau = 2.5",
"Gen S Kohn tau = 2.6", "Gen S Kohn tau = 2.7", "Gen S Kohn tau = 2.8", "Gen S Kohn tau = 2.9", "Gen S Kohn tau = 3.0", "Gen S Kohn tau = pi" ]
NumTests = 109 #@TODO: Could just calculate the length of Headings
# Headings = [ "Kohn", "Inverse Kohn", "Complex Kohn (S)", "Complex Kohn (T)", "Gen Kohn tau = 0.0", "Gen Kohn tau = 0.1", "Gen Kohn tau = 0.2", "Gen Kohn tau = 0.3",
# "Gen Kohn tau = 0.4", "Gen Kohn tau = 0.5", "Gen Kohn tau = 0.6", "Gen Kohn tau = 0.7", "Gen Kohn tau = pi/4", "Gen Kohn tau = 0.8", "Gen Kohn tau = 0.9",
# "Gen Kohn tau = 1.0", "Gen Kohn tau = 1.1", "Gen Kohn tau = 1.2", "Gen Kohn tau = 1.3", "Gen Kohn tau = 1.4", "Gen Kohn tau = 1.5", "Gen Kohn tau = pi/2",
# "Gen Kohn tau = 1.6", "Gen Kohn tau = 1.7", "Gen Kohn tau = 1.8", "Gen Kohn tau = 1.9", "Gen Kohn tau = 2.0", "Gen Kohn tau = 2.1", "Gen Kohn tau = 2.2",
# "Gen Kohn tau = 2.3", "Gen Kohn tau = 3*pi/4", "Gen Kohn tau = 2.4", "Gen Kohn tau = 2.5", "Gen Kohn tau = 2.6", "Gen Kohn tau = 2.7", "Gen Kohn tau = 2.8",
# "Gen Kohn tau = 2.9", "Gen Kohn tau = 3.0", "Gen Kohn tau = pi" ]
# NumTests = 39
if len(sys.argv) < 6:
print """Usage: Extrapolation.py <energyfile> <phasefile> <outputfile> <# of terms in file> <# of terms to use> <lower omega> <optional: upper omega>
Example: Extrapolation.py energy.txt phase.txt output.txt 1216 1216 3"""
exit()
if sys.argv[4].isdigit() == False:
print "Error: The fourth argument must be a number."
exit()
if sys.argv[5].isdigit() == False:
print "Error: The fifth argument must be a number."
exit()
if sys.argv[6].isdigit() == False:
print "Error: The sixth argument must be a number."
exit()
FoundTerms = FindTerms(sys.argv[1], int(sys.argv[5]))
Omega = len(FoundTerms)-1
UpperOmega = Omega
LowerOmega = int(sys.argv[6])
if len(sys.argv) > 7:
if sys.argv[7].isdigit() == False:
print "Error: The seventh argument must be a number."
exit()
UpperOmega = int(sys.argv[7])
if UpperOmega < LowerOmega or UpperOmega < 0:
print "Error: Upper omega must be in the range " + str(LowerOmega) + "-" + str(Omega)
exit()
if LowerOmega > UpperOmega:
print "Error: Lower omega must be in the range 0-" + str(UpperOmega)
exit()
print
g = open(sys.argv[3], 'w')
g.write("Results from " + sys.argv[1] + " and " + sys.argv[2] + "\n")
g.write(" with " + str(sys.argv[5]) + " terms and starting at omega = " + str(sys.argv[6]) + "\n\n")
g.write("Extrapolated values\n")
g.write("-------------------\n")
PhaseShiftLists = range(NumTests)
ExtrapolationLists = range(NumTests)
DList = range(NumTests)
for i in range(NumTests):
PhaseShiftLists[i] = []
ExtrapolationLists[i] = []
DList[i] = []
PhaseShifts = np.array(ReadPhaseShifts(sys.argv[2], FoundTerms, NumTests))
#print PhaseShifts
#print len(PhaseShifts[0])
#exit()
# Iterate over the sets of tests
for j in range(NumTests):
RMin = 1.0e5 # Just some very high value
MinVal = 0
Phases = PhaseShifts[:,j]
# This loop iterates from d = -6 to -0.1 in increments of 0.01, testing the extrapolation fit by
# comparing the residuals. The d that gives the smallest residuals is used, and the extrapolation
# is saved.
for i in range(0,690):
Residuals = Extrapolate(Phases, UpperOmega, -7.0+i/100.0, LowerOmega)[1]
if Residuals < RMin:
RMin = Residuals
MinVal = i
print
print "Results for " + Headings[j] + ":"
print "Smallest residuals at", -7.0+MinVal/100.0, "of", RMin
DList[j] = -7.0+MinVal/100.0
PhaseShiftLists[j] = Phases
Extrapolation = Extrapolate(Phases, UpperOmega, -7.0+MinVal/100.0, LowerOmega)
ExtrapolationLists[j] = Extrapolation
print "Extrapolated value =", atan(Extrapolation[0][1])
print "Relative difference % =", abs((atan(Extrapolation[0][1]) - Phases[np.size(Phases)-1]) / (atan(Extrapolation[0][1]) + Phases[np.size(Phases)-1]) * 2) * 100
print "Coefficients: ", Extrapolation[0]
Line = Headings[j] + ": " + str(atan(Extrapolation[0][1])) + "\n"
g.write(Line)
print "w3 - w4: " + str(abs(Phases[3] - Phases[4]))
if UpperOmega >= 5:
print "w4 - w5: " + str(abs(Phases[4] - Phases[5]))
if UpperOmega >= 6:
print "w5 - w6: " + str(abs(Phases[5] - Phases[6]))
if UpperOmega >= 7:
print "w6 - w7: " + str(abs(Phases[6] - Phases[7]))
g.write("\n")
g.write("\n")
g.write("\n")
g.write("More detailed analysis\n")
g.write("----------------------\n")
g.write("\n")
g.write("Reordered terms:\n")
for i in range(len(FoundTerms)):
g.write("Found " + str(NumTermsOmega(i)) + " at position " + str(FoundTerms[i]) + "\n")
g.write("\n")
for i in range(NumTests):
g.write("\nResults for " + Headings[i] + ":\n")
g.write("Phase shifts: ")
for j in range(len(PhaseShiftLists[i])):
g.write(str(PhaseShiftLists[i][j]) + " ")
g.write("\n")
g.write("Phase shift differences in omega: ")
for j in range(len(PhaseShiftLists[i]) - 1):
g.write(str(abs(PhaseShiftLists[i][j] - PhaseShiftLists[i][j+1])) + " ")
g.write("\n")
g.write("Phase shift difference ratios: ")
for j in range(len(PhaseShiftLists[i]) - 2):
#print PhaseShiftLists[i][j], PhaseShiftLists[i][j+1], PhaseShiftLists[i][j+2]
g.write(str(abs( (PhaseShiftLists[i][j+1] - PhaseShiftLists[i][j+2]) / (PhaseShiftLists[i][j] - PhaseShiftLists[i][j+1]) )) + " ")
g.write("\n")
for j in range(LowerOmega+1,UpperOmega):
if abs(PhaseShiftLists[i][j] - PhaseShiftLists[i][j+1]) > abs(PhaseShiftLists[i][j-1] - PhaseShiftLists[i][j]):
g.write("No convergence pattern exists.\n")
g.write("Smallest residuals at d = " + str(DList[i]) + " of " + str(ExtrapolationLists[i][1]) + "\n")
g.write("Coefficients of " + str(ExtrapolationLists[i][0]) + "\n")
reldiff = abs((atan(ExtrapolationLists[i][0][1]) - PhaseShiftLists[i][len(PhaseShiftLists[i])-1]) / (atan(ExtrapolationLists[i][0][1]) + PhaseShiftLists[i][len(PhaseShiftLists[i])-1]) * 2) * 100
g.write("Relative difference % = " + str(reldiff) + "\n")
g.write("Extrapolated value = " + str(atan(ExtrapolationLists[i][0][1])) + "\n")
# This can be re-enabled to look at the fit lines with the phase shifts.
#if i == 3: # S-matrix
# ExtrapolatePlot(PhaseShiftLists[i], Omega, DList[i], LowerOmega)
g.close()
exit()
| mit |
bzcheeseman/phys211 | Alex/Compton Scattering/analysis.py | 1 | 2151 | from scipy import optimize
import numpy as np
import matplotlib.pyplot as plt
#constants
mc2 = 511. #keV
E = 662. #keV, energy of initial gamma photons
angles1 = np.array([0.17453293,0.26179939,0.38397244,0.52359878,0.61086524,1.04719755,1.22173048,1.30899694])
angles2 = np.array([1.30899694,1.3962634,1.57079633,1.74532925,2.0943951,2.35619449,0.78539816])
en1 = np.array([645.94535489,638.70777021,609.75743148,572.12199113,558.37058023,404.93378497,361.50827687,342.6905567])
en2 = np.array([339.09451519,322.89846155,291.97872279,274.31030064,227.9306925,206.58134907,482.65044515])
en1err = np.array([6.3184713,6.26564529,6.05434124,5.77964599,5.67927657,4.55936515,4.24240908,4.10506146])
en2err = np.array([4.10100687,3.98279443,3.75711613,3.6281571, 3.28963966,3.13381417,5.14879897])
angles = np.append(angles1, angles2)
en = np.append(en1, en2)
enerr = np.append(en1err, en2err)
def fitfunc(p, x):
y=(p[0]/p[1])*(1-np.cos(x))
return p[0]/(1+y)
def residual(p, x, y, dy):
return (fitfunc(p, x)-y)/dy
p = [E, mc2] #expected
pf, cov, info, mesg, success = optimize.leastsq(residual, p, args=(angles, en, enerr), full_output=1)
chisq = sum(info["fvec"]*info["fvec"])
dof = len(en)-len(pf)
pferr = [np.sqrt(cov[i,i]) for i in range(len(pf))]
print pf
print cov
fig1 = plt.figure()
ax1 = plt.axes()
ax1.errorbar(angles, en, xerr=0., yerr=enerr, fmt='k.', label = 'Data')
T = np.linspace(angles.min(), angles.max(), 5000)
ax1.plot(T, fitfunc(pf, T), 'r-', label = 'Fit')
ax1.plot(T, fitfunc(p, T), 'b-', label = 'Theory')
ax1.set_title('Compton Scattering Energy Fit')
ax1.set_xlabel('Angle (radians), x')
ax1.set_ylabel('Energy (keV), E\'')
ax1.legend()
textfit = '$E\' = E / ((1 + (E/m_e c^2)(1 - cos(x)))$ \n' \
'$E = %.2f \pm %.2f$ keV \n' \
'$m_e c^2 = %.2f \pm %.2f$ keV \n' \
'$\chi^2= %.2f$ \n' \
'$N = %i$ (dof) \n' \
'$\chi^2/N = % .2f$' \
% (pf[0], pferr[0], pf[1], pferr[1], chisq, dof,
chisq/dof)
ax1.text(0.05, .33, textfit, transform=ax1.transAxes, fontsize=12,
verticalalignment='top')
plt.show() | lgpl-3.0 |
alvarofierroclavero/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
RobertABT/heightmap | build/matplotlib/lib/mpl_toolkits/axes_grid1/colorbar.py | 4 | 27744 | '''
Colorbar toolkit with two classes and a function:
:class:`ColorbarBase`
the base class with full colorbar drawing functionality.
It can be used as-is to make a colorbar for a given colormap;
a mappable object (e.g., image) is not needed.
:class:`Colorbar`
the derived class for use with images or contour plots.
:func:`make_axes`
a function for resizing an axes and adding a second axes
suitable for a colorbar
The :meth:`~matplotlib.figure.Figure.colorbar` method uses :func:`make_axes`
and :class:`Colorbar`; the :func:`~matplotlib.pyplot.colorbar` function
is a thin wrapper over :meth:`~matplotlib.figure.Figure.colorbar`.
'''
import numpy as np
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cm as cm
from matplotlib import docstring
import matplotlib.ticker as ticker
import matplotlib.cbook as cbook
import matplotlib.collections as collections
import matplotlib.contour as contour
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.transforms import Bbox
make_axes_kw_doc = '''
============= ====================================================
Property Description
============= ====================================================
*orientation* vertical or horizontal
*fraction* 0.15; fraction of original axes to use for colorbar
*pad* 0.05 if vertical, 0.15 if horizontal; fraction
of original axes between colorbar and new image axes
*shrink* 1.0; fraction by which to shrink the colorbar
*aspect* 20; ratio of long to short dimensions
============= ====================================================
'''
colormap_kw_doc = '''
=========== ====================================================
Property Description
=========== ====================================================
*extend* [ 'neither' | 'both' | 'min' | 'max' ]
If not 'neither', make pointed end(s) for out-of-
range values. These are set for a given colormap
using the colormap set_under and set_over methods.
*spacing* [ 'uniform' | 'proportional' ]
Uniform spacing gives each discrete color the same
space; proportional makes the space proportional to
the data interval.
*ticks* [ None | list of ticks | Locator object ]
If None, ticks are determined automatically from the
input.
*format* [ None | format string | Formatter object ]
If None, the
:class:`~matplotlib.ticker.ScalarFormatter` is used.
If a format string is given, e.g., '%.3f', that is
used. An alternative
:class:`~matplotlib.ticker.Formatter` object may be
given instead.
*drawedges* [ False | True ] If true, draw lines at color
boundaries.
=========== ====================================================
The following will probably be useful only in the context of
indexed colors (that is, when the mappable has norm=NoNorm()),
or other unusual circumstances.
============ ===================================================
Property Description
============ ===================================================
*boundaries* None or a sequence
*values* None or a sequence which must be of length 1 less
than the sequence of *boundaries*. For each region
delimited by adjacent entries in *boundaries*, the
color mapped to the corresponding value in values
will be used.
============ ===================================================
'''
colorbar_doc = '''
Add a colorbar to a plot.
Function signatures for the :mod:`~matplotlib.pyplot` interface; all
but the first are also method signatures for the
:meth:`~matplotlib.figure.Figure.colorbar` method::
colorbar(**kwargs)
colorbar(mappable, **kwargs)
colorbar(mappable, cax=cax, **kwargs)
colorbar(mappable, ax=ax, **kwargs)
arguments:
*mappable*
the :class:`~matplotlib.image.Image`,
:class:`~matplotlib.contour.ContourSet`, etc. to
which the colorbar applies; this argument is mandatory for the
:meth:`~matplotlib.figure.Figure.colorbar` method but optional for the
:func:`~matplotlib.pyplot.colorbar` function, which sets the
default to the current image.
keyword arguments:
*cax*
None | axes object into which the colorbar will be drawn
*ax*
None | parent axes object from which space for a new
colorbar axes will be stolen
Additional keyword arguments are of two kinds:
axes properties:
%s
colorbar properties:
%s
If *mappable* is a :class:`~matplotlib.contours.ContourSet`, its *extend*
kwarg is included automatically.
Note that the *shrink* kwarg provides a simple way to keep a vertical
colorbar, for example, from being taller than the axes of the mappable
to which the colorbar is attached; but it is a manual method requiring
some trial and error. If the colorbar is too tall (or a horizontal
colorbar is too wide) use a smaller value of *shrink*.
For more precise control, you can manually specify the positions of
the axes objects in which the mappable and the colorbar are drawn. In
this case, do not use any of the axes properties kwargs.
It is known that some vector graphics viewer (svg and pdf) renders white gaps
between segments of the colorbar. This is due to bugs in the viewers not
matplotlib. As a workaround the colorbar can be rendered with overlapping
segments::
cbar = colorbar()
cbar.solids.set_edgecolor("face")
draw()
However this has negative consequences in other circumstances. Particularly with
semi transparent images (alpha < 1) and colorbar extensions and is not enabled
by default see (issue #1188).
returns:
:class:`~matplotlib.colorbar.Colorbar` instance; see also its base class,
:class:`~matplotlib.colorbar.ColorbarBase`. Call the
:meth:`~matplotlib.colorbar.ColorbarBase.set_label` method
to label the colorbar.
The transData of the *cax* is adjusted so that the limits in the
longest axis actually corresponds to the limits in colorbar range. On
the other hand, the shortest axis has a data limits of [1,2], whose
unconventional value is to prevent underflow when log scale is used.
''' % (make_axes_kw_doc, colormap_kw_doc)
docstring.interpd.update(colorbar_doc=colorbar_doc)
class CbarAxesLocator(object):
"""
CbarAxesLocator is a axes_locator for colorbar axes. It adjust the
position of the axes to make a room for extended ends, i.e., the
extended ends are located outside the axes area.
"""
def __init__(self, locator=None, extend="neither", orientation="vertical"):
"""
*locator* : the bbox returned from the locator is used as a
initial axes location. If None, axes.bbox is used.
*extend* : same as in ColorbarBase
*orientation* : same as in ColorbarBase
"""
self._locator = locator
self.extesion_fraction = 0.05
self.extend = extend
self.orientation = orientation
def get_original_position(self, axes, renderer):
"""
get the original position of the axes.
"""
if self._locator is None:
bbox = axes.get_position(original=True)
else:
bbox = self._locator(axes, renderer)
return bbox
def get_end_vertices(self):
"""
return a tuple of two vertices for the colorbar extended ends.
The first vertices is for the minimum end, and the second is for
the maximum end.
"""
# Note that concatenating two vertices needs to make a
# vertices for the frame.
extesion_fraction = self.extesion_fraction
corx = extesion_fraction*2.
cory = 1./(1. - corx)
x1, y1, w, h = 0, 0, 1, 1
x2, y2 = x1 + w, y1 + h
dw, dh = w*extesion_fraction, h*extesion_fraction*cory
if self.extend in ["min", "both"]:
bottom = [(x1, y1),
(x1+w/2., y1-dh),
(x2, y1)]
else:
bottom = [(x1, y1),
(x2, y1)]
if self.extend in ["max", "both"]:
top = [(x2, y2),
(x1+w/2., y2+dh),
(x1, y2)]
else:
top = [(x2, y2),
(x1, y2)]
if self.orientation == "horizontal":
bottom = [(y,x) for (x,y) in bottom]
top = [(y,x) for (x,y) in top]
return bottom, top
def get_path_patch(self):
"""
get the path for axes patch
"""
end1, end2 = self.get_end_vertices()
verts = [] + end1 + end2 + end1[:1]
return Path(verts)
def get_path_ends(self):
"""
get the paths for extended ends
"""
end1, end2 = self.get_end_vertices()
return Path(end1), Path(end2)
def __call__(self, axes, renderer):
"""
Return the adjusted position of the axes
"""
bbox0 = self.get_original_position(axes, renderer)
bbox = bbox0
x1, y1, w, h = bbox.bounds
extesion_fraction = self.extesion_fraction
dw, dh = w*extesion_fraction, h*extesion_fraction
if self.extend in ["min", "both"]:
if self.orientation == "horizontal":
x1 = x1 + dw
else:
y1 = y1+dh
if self.extend in ["max", "both"]:
if self.orientation == "horizontal":
w = w-2*dw
else:
h = h-2*dh
return Bbox.from_bounds(x1, y1, w, h)
class ColorbarBase(cm.ScalarMappable):
'''
Draw a colorbar in an existing axes.
This is a base class for the :class:`Colorbar` class, which is the
basis for the :func:`~matplotlib.pyplot.colorbar` method and pylab
function.
It is also useful by itself for showing a colormap. If the *cmap*
kwarg is given but *boundaries* and *values* are left as None,
then the colormap will be displayed on a 0-1 scale. To show the
under- and over-value colors, specify the *norm* as::
colors.Normalize(clip=False)
To show the colors versus index instead of on the 0-1 scale,
use::
norm=colors.NoNorm.
Useful attributes:
:attr:`ax`
the Axes instance in which the colorbar is drawn
:attr:`lines`
a LineCollection if lines were drawn, otherwise None
:attr:`dividers`
a LineCollection if *drawedges* is True, otherwise None
Useful public methods are :meth:`set_label` and :meth:`add_lines`.
'''
def __init__(self, ax, cmap=None,
norm=None,
alpha=1.0,
values=None,
boundaries=None,
orientation='vertical',
extend='neither',
spacing='uniform', # uniform or proportional
ticks=None,
format=None,
drawedges=False,
filled=True,
):
self.ax = ax
if cmap is None: cmap = cm.get_cmap()
if norm is None: norm = colors.Normalize()
self.alpha = alpha
cm.ScalarMappable.__init__(self, cmap=cmap, norm=norm)
self.values = values
self.boundaries = boundaries
self.extend = extend
self.spacing = spacing
self.orientation = orientation
self.drawedges = drawedges
self.filled = filled
# artists
self.solids = None
self.lines = None
self.dividers = None
self.extension_patch1 = None
self.extension_patch2 = None
if orientation == "vertical":
self.cbar_axis = self.ax.yaxis
else:
self.cbar_axis = self.ax.xaxis
if format is None:
if isinstance(self.norm, colors.LogNorm):
# change both axis for proper aspect
self.ax.xaxis.set_scale("log")
self.ax.yaxis.set_scale("log")
self.ax._update_transScale()
self.cbar_axis.set_minor_locator(ticker.NullLocator())
formatter = ticker.LogFormatter()
else:
formatter = None
elif cbook.is_string_like(format):
formatter = ticker.FormatStrFormatter(format)
else:
formatter = format # Assume it is a Formatter
if formatter is None:
formatter = self.cbar_axis.get_major_formatter()
else:
self.cbar_axis.set_major_formatter(formatter)
if cbook.iterable(ticks):
self.cbar_axis.set_ticks(ticks)
elif ticks is not None:
self.cbar_axis.set_major_locator(ticks)
else:
self._select_locator(formatter)
self._config_axes()
self.update_artists()
self.set_label_text('')
def _get_colorbar_limits(self):
"""
initial limits for colorbar range. The returned min, max values
will be used to create colorbar solid(?) and etc.
"""
if self.boundaries is not None:
C = self.boundaries
if self.extend in ["min", "both"]:
C = C[1:]
if self.extend in ["max", "both"]:
C = C[:-1]
return min(C), max(C)
else:
return self.get_clim()
def _config_axes(self):
'''
Adjust the properties of the axes to be adequate for colorbar display.
'''
ax = self.ax
axes_locator = CbarAxesLocator(ax.get_axes_locator(),
extend=self.extend,
orientation=self.orientation)
ax.set_axes_locator(axes_locator)
# override the get_data_ratio for the aspect works.
def _f():
return 1.
ax.get_data_ratio = _f
ax.get_data_ratio_log = _f
ax.set_frame_on(True)
ax.set_navigate(False)
self.ax.set_autoscalex_on(False)
self.ax.set_autoscaley_on(False)
if self.orientation == 'horizontal':
ax.xaxis.set_label_position('bottom')
ax.set_yticks([])
else:
ax.set_xticks([])
ax.yaxis.set_label_position('right')
ax.yaxis.set_ticks_position('right')
def update_artists(self):
"""
Update the colorbar associated artists, *filled* and
*ends*. Note that *lines* are not updated. This needs to be
called whenever clim of associated image changes.
"""
self._process_values()
self._add_ends()
X, Y = self._mesh()
if self.filled:
C = self._values[:,np.newaxis]
self._add_solids(X, Y, C)
ax = self.ax
vmin, vmax = self._get_colorbar_limits()
if self.orientation == 'horizontal':
ax.set_ylim(1, 2)
ax.set_xlim(vmin, vmax)
else:
ax.set_xlim(1, 2)
ax.set_ylim(vmin, vmax)
def _add_ends(self):
"""
Create patches from extended ends and add them to the axes.
"""
del self.extension_patch1
del self.extension_patch2
path1, path2 = self.ax.get_axes_locator().get_path_ends()
fc=mpl.rcParams['axes.facecolor']
ec=mpl.rcParams['axes.edgecolor']
linewidths=0.5*mpl.rcParams['axes.linewidth']
self.extension_patch1 = PathPatch(path1,
fc=fc, ec=ec, lw=linewidths,
zorder=2.,
transform=self.ax.transAxes,
clip_on=False)
self.extension_patch2 = PathPatch(path2,
fc=fc, ec=ec, lw=linewidths,
zorder=2.,
transform=self.ax.transAxes,
clip_on=False)
self.ax.add_artist(self.extension_patch1)
self.ax.add_artist(self.extension_patch2)
def _set_label_text(self):
"""
set label.
"""
self.cbar_axis.set_label_text(self._label, **self._labelkw)
def set_label_text(self, label, **kw):
'''
Label the long axis of the colorbar
'''
self._label = label
self._labelkw = kw
self._set_label_text()
def _edges(self, X, Y):
'''
Return the separator line segments; helper for _add_solids.
'''
N = X.shape[0]
# Using the non-array form of these line segments is much
# simpler than making them into arrays.
if self.orientation == 'vertical':
return [zip(X[i], Y[i]) for i in range(1, N-1)]
else:
return [zip(Y[i], X[i]) for i in range(1, N-1)]
def _add_solids(self, X, Y, C):
'''
Draw the colors using :meth:`~matplotlib.axes.Axes.pcolormesh`;
optionally add separators.
'''
## Change to pcolorfast after fixing bugs in some backends...
if self.extend in ["min", "both"]:
cc = self.to_rgba([C[0][0]])
self.extension_patch1.set_fc(cc[0])
X, Y, C = X[1:], Y[1:], C[1:]
if self.extend in ["max", "both"]:
cc = self.to_rgba([C[-1][0]])
self.extension_patch2.set_fc(cc[0])
X, Y, C = X[:-1], Y[:-1], C[:-1]
if self.orientation == 'vertical':
args = (X, Y, C)
else:
args = (np.transpose(Y), np.transpose(X), np.transpose(C))
kw = {'cmap':self.cmap, 'norm':self.norm,
'shading':'flat', 'alpha':self.alpha,
}
del self.solids
del self.dividers
col = self.ax.pcolormesh(*args, **kw)
self.solids = col
if self.drawedges:
self.dividers = collections.LineCollection(self._edges(X,Y),
colors=(mpl.rcParams['axes.edgecolor'],),
linewidths=(0.5*mpl.rcParams['axes.linewidth'],),
)
self.ax.add_collection(self.dividers)
else:
self.dividers = None
def add_lines(self, levels, colors, linewidths):
'''
Draw lines on the colorbar. It deletes preexisting lines.
'''
del self.lines
N = len(levels)
x = np.array([1.0, 2.0])
X, Y = np.meshgrid(x,levels)
if self.orientation == 'vertical':
xy = [zip(X[i], Y[i]) for i in range(N)]
else:
xy = [zip(Y[i], X[i]) for i in range(N)]
col = collections.LineCollection(xy, linewidths=linewidths,
)
self.lines = col
col.set_color(colors)
self.ax.add_collection(col)
def _select_locator(self, formatter):
'''
select a suitable locator
'''
if self.boundaries is None:
if isinstance(self.norm, colors.NoNorm):
nv = len(self._values)
base = 1 + int(nv/10)
locator = ticker.IndexLocator(base=base, offset=0)
elif isinstance(self.norm, colors.BoundaryNorm):
b = self.norm.boundaries
locator = ticker.FixedLocator(b, nbins=10)
elif isinstance(self.norm, colors.LogNorm):
locator = ticker.LogLocator()
else:
locator = ticker.MaxNLocator(nbins=5)
else:
b = self._boundaries[self._inside]
locator = ticker.FixedLocator(b) #, nbins=10)
self.cbar_axis.set_major_locator(locator)
def _process_values(self, b=None):
'''
Set the :attr:`_boundaries` and :attr:`_values` attributes
based on the input boundaries and values. Input boundaries
can be *self.boundaries* or the argument *b*.
'''
if b is None:
b = self.boundaries
if b is not None:
self._boundaries = np.asarray(b, dtype=float)
if self.values is None:
self._values = 0.5*(self._boundaries[:-1]
+ self._boundaries[1:])
if isinstance(self.norm, colors.NoNorm):
self._values = (self._values + 0.00001).astype(np.int16)
return
self._values = np.array(self.values)
return
if self.values is not None:
self._values = np.array(self.values)
if self.boundaries is None:
b = np.zeros(len(self.values)+1, 'd')
b[1:-1] = 0.5*(self._values[:-1] - self._values[1:])
b[0] = 2.0*b[1] - b[2]
b[-1] = 2.0*b[-2] - b[-3]
self._boundaries = b
return
self._boundaries = np.array(self.boundaries)
return
# Neither boundaries nor values are specified;
# make reasonable ones based on cmap and norm.
if isinstance(self.norm, colors.NoNorm):
b = self._uniform_y(self.cmap.N+1) * self.cmap.N - 0.5
v = np.zeros((len(b)-1,), dtype=np.int16)
v = np.arange(self.cmap.N, dtype=np.int16)
self._boundaries = b
self._values = v
return
elif isinstance(self.norm, colors.BoundaryNorm):
b = np.array(self.norm.boundaries)
v = np.zeros((len(b)-1,), dtype=float)
bi = self.norm.boundaries
v = 0.5*(bi[:-1] + bi[1:])
self._boundaries = b
self._values = v
return
else:
b = self._uniform_y(self.cmap.N+1)
self._process_values(b)
def _uniform_y(self, N):
'''
Return colorbar data coordinates for *N* uniformly
spaced boundaries.
'''
vmin, vmax = self._get_colorbar_limits()
if isinstance(self.norm, colors.LogNorm):
y = np.logspace(np.log10(vmin), np.log10(vmax), N)
else:
y = np.linspace(vmin, vmax, N)
return y
def _mesh(self):
'''
Return X,Y, the coordinate arrays for the colorbar pcolormesh.
These are suitable for a vertical colorbar; swapping and
transposition for a horizontal colorbar are done outside
this function.
'''
x = np.array([1.0, 2.0])
if self.spacing == 'uniform':
y = self._uniform_y(len(self._boundaries))
else:
y = self._boundaries
self._y = y
X, Y = np.meshgrid(x,y)
return X, Y
def set_alpha(self, alpha):
"""
set alpha value.
"""
self.alpha = alpha
class Colorbar(ColorbarBase):
def __init__(self, ax, mappable, **kw):
mappable.autoscale_None() # Ensure mappable.norm.vmin, vmax
# are set when colorbar is called,
# even if mappable.draw has not yet
# been called. This will not change
# vmin, vmax if they are already set.
self.mappable = mappable
kw['cmap'] = mappable.cmap
kw['norm'] = mappable.norm
kw['alpha'] = mappable.get_alpha()
if isinstance(mappable, contour.ContourSet):
CS = mappable
kw['boundaries'] = CS._levels
kw['values'] = CS.cvalues
kw['extend'] = CS.extend
#kw['ticks'] = CS._levels
kw.setdefault('ticks', ticker.FixedLocator(CS.levels, nbins=10))
kw['filled'] = CS.filled
ColorbarBase.__init__(self, ax, **kw)
if not CS.filled:
self.add_lines(CS)
else:
ColorbarBase.__init__(self, ax, **kw)
def add_lines(self, CS):
'''
Add the lines from a non-filled
:class:`~matplotlib.contour.ContourSet` to the colorbar.
'''
if not isinstance(CS, contour.ContourSet) or CS.filled:
raise ValueError('add_lines is only for a ContourSet of lines')
tcolors = [c[0] for c in CS.tcolors]
tlinewidths = [t[0] for t in CS.tlinewidths]
# The following was an attempt to get the colorbar lines
# to follow subsequent changes in the contour lines,
# but more work is needed: specifically, a careful
# look at event sequences, and at how
# to make one object track another automatically.
#tcolors = [col.get_colors()[0] for col in CS.collections]
#tlinewidths = [col.get_linewidth()[0] for lw in CS.collections]
#print 'tlinewidths:', tlinewidths
ColorbarBase.add_lines(self, CS.levels, tcolors, tlinewidths)
def update_bruteforce(self, mappable):
"""
Update the colorbar artists to reflect the change of the
associated mappable.
"""
self.update_artists()
if isinstance(mappable, contour.ContourSet):
if not mappable.filled:
self.add_lines(mappable)
@docstring.Substitution(make_axes_kw_doc)
def make_axes(parent, **kw):
'''
Resize and reposition a parent axes, and return a child
axes suitable for a colorbar::
cax, kw = make_axes(parent, **kw)
Keyword arguments may include the following (with defaults):
*orientation*
'vertical' or 'horizontal'
%s
All but the first of these are stripped from the input kw set.
Returns (cax, kw), the child axes and the reduced kw dictionary.
'''
orientation = kw.setdefault('orientation', 'vertical')
fraction = kw.pop('fraction', 0.15)
shrink = kw.pop('shrink', 1.0)
aspect = kw.pop('aspect', 20)
#pb = transforms.PBox(parent.get_position())
pb = parent.get_position(original=True).frozen()
if orientation == 'vertical':
pad = kw.pop('pad', 0.05)
x1 = 1.0-fraction
pb1, pbx, pbcb = pb.splitx(x1-pad, x1)
pbcb = pbcb.shrunk(1.0, shrink).anchored('C', pbcb)
anchor = (0.0, 0.5)
panchor = (1.0, 0.5)
else:
pad = kw.pop('pad', 0.15)
pbcb, pbx, pb1 = pb.splity(fraction, fraction+pad)
pbcb = pbcb.shrunk(shrink, 1.0).anchored('C', pbcb)
aspect = 1.0/aspect
anchor = (0.5, 1.0)
panchor = (0.5, 0.0)
parent.set_position(pb1)
parent.set_anchor(panchor)
fig = parent.get_figure()
cax = fig.add_axes(pbcb)
cax.set_aspect(aspect, anchor=anchor, adjustable='box')
return cax, kw
def colorbar(mappable, cax=None, ax=None, **kw):
"""
Create a colorbar for a ScalarMappable instance.
Documentation for the pylab thin wrapper:
%(colorbar_doc)s
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
if cax is None:
cax, kw = make_axes(ax, **kw)
cax.hold(True)
cb = Colorbar(cax, mappable, **kw)
def on_changed(m):
cb.set_cmap(m.get_cmap())
cb.set_clim(m.get_clim())
cb.update_bruteforce(m)
cbid = mappable.callbacksSM.connect('changed', on_changed)
mappable.colorbar = cb
ax.figure.sca(ax)
return cb
| mit |
mindbergh/PrivacyPolicyAnalyser | www/ppa/classifier/classify.py | 1 | 3514 | """
Copyright 2015 Austin Ankney, Ming Fang, Wenjun Wang and Yao Zhou
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file defines the concrete control flow logic
This script is the main entry for classifier.
It does action classification and extract keywords from the incoming query.
===========================================================================
TODO([email protected]):
- Reconsider the type words
- Consider action 6,7,8 in one query
- log force change of aid
- update _type_recognition function
Usage: refer to demo.py
Dependency: numpy, scipy, sklearn
Author: Wenjun Wang<[email protected]>
Date: July 1, 2015
"""
import pickle
import os
from liblinearutil import *
from www.settings import BASE_DIR
from ppa.classifier.feature import *
class Classifier(object):
modeldir = os.path.abspath(BASE_DIR + "/ppa/classifier/models/")
stopword_path = os.path.abspath(BASE_DIR + "/ppa/classifier/english.stp")
def __init__(self):
"""
All variables which would be used by every query classification and parsing are listed here.
Only need to create Classifier object once, i.e. initialize once
"""
self._model = self._get_model()
self.stopwords = stopword(self.stopword_path)
self.feature_list = self._get_feature_list()
self.feature_arg = parse_options('-uni -pos2 -stem -stprm')
self.labels = [0, 1]
def _get_model(self):
"""Load model
This function is called during initialization
Return: models, action model and type model
"""
m1 = load_model(self.modeldir + '/model')
return m1
def _get_feature_list(self):
"""Load feature file
This function is called during initialization
Return: Feature list
"""
with open(self.modeldir + '/features', 'r') as infile:
feature_list = pickle.load(infile)
return feature_list
def _convert_query_to_dictionary(self, query):
"""Convert each user query to the format required by LibLINEAR
Args and Need:
query: the raw query, like 'What do people think of ?'
self.feature_list: a list of unique features generated by function feature_generator
Return:
Convert user's query: store information in a dictionary,
which is a member of a list.
"""
features = feature_generator(query, self.stopwords, self.feature_arg)
onerow = {}
for f in features:
try:
onerow[self.feature_list.index(f) + 1] = 1
except ValueError:
pass
return [onerow]
def classify(self, query):
"""Does query classification, which decides which action need to be taken
This function is called by self.action_info
Return: Action id
"""
x = self._convert_query_to_dictionary(query)
p_label, p_val = predict(self.labels, x, self._model, '-b 0')
return int(p_label[0]) | apache-2.0 |
pompiduskus/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 40 | 23697 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
| bsd-3-clause |
BerryAI/Acai | OpenMRS/test_game_data/test_cf_hf_gd_game.py | 1 | 1377 | """
test_cf_hf_gd.py
~~~
This module contains testing function of SVD method to discover hidden
features in collaborative filtering method
In this tesing file, we generate rating matrix for 1k user playing history
of songs in Million Song Dataset. Because there are large amount of miss
match in two data source, we only generate rate matrix of tracks in MSD
which are played by 1k user dataset. Then we user Gradient Descent method
to discover the hidden features in the CF methods.
:auther: Alexander Z Wang
"""
import numpy
import sys
import time
sys.path.append('../cf')
sys.path.append('../read_game_data')
import matplotlib.pyplot as plt
import acai_game as ag
import cf_hidden_feature as ch
k = 5
lean_rate = 0.001
lambda_rate = 0.04
max_iter = 10000
GD_method = 1
user_rate_dict = ag.get_user_rate_dict()
user_weight, hidden_feature, res_norm = ch.get_hidden_feature_matrix_GD(
user_rate_dict, k, lean_rate, lambda_rate, max_iter, GD_method)
predict_matrix = user_weight.dot(hidden_feature.T)
print predict_matrix.shape
print res_norm[-1]
print "hidden features of 10 songs"
print hidden_feature[0:10, :]
hist, bin_edges = numpy.histogram(hidden_feature, bins=20)
print hist
print bin_edges
# Plot convergence
# plt.plot(res_norm)
# plt.ylabel('Norm of Error')
# plt.xlabel('Iteration Steps')
# plt.show()
| mit |
rafael-radkowski/ME325 | FatigueDiagram_General.py | 1 | 18240 |
"""Failure theories for ductile materials - principal stress example
# -*- coding: utf-8 -*-
This module provides and example explaining the failure theorie for ductile materials
in a plane. The user can set the principal stresses and the yield stress for a material.
The class show a plot with the two envelopes and the principal stress:
Implemented theories are:
- Maximum shear stress theory or Tresca theory
- Distortion enerty theory or von Mises theory.
User input:
- Yield strength
- Principal stresses
Press 'e' for manual input.
Example:
root = Tk()
root.geometry("800x600+300+300")
app = DuctileMaterial_FailureTheory_01()
root.mainloop()
or from a console.
$ python DuctileFailureTheory01.py
Note that this script was developed and tested with Python 3.5.3
Attributes:
-
Todo:
*
Rafael Radkowski
Iowa State University
Dec. 28, 2018
[email protected]
All copyright reserved.
"""
# matplotlib
import platform
import matplotlib
if platform.system() == 'Darwin':
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
# tkinter for the display
from tkinter import *
from tkinter import Canvas, messagebox
from tkinter import Tk, BOTH, W, N, E, S
from tkinter.ttk import Frame, Button, Label, Scale, Checkbutton, Combobox
import tkinter as tk
# for images
from PIL import Image, ImageTk
from ME325Common.UnitConversion import *
from ME325Common.METypes import *
from ME325Common.DynamicLoadTheories import *
from ME325Common.InputHelpers import *
class FatigueDiagram_General(Frame):
"""
This class implements examples for two failure theories for ductile materials.
See documentation
"""
# limits
limit_stress = 1000 # in MPa or N/mm2. Needs to be converted into ksi
# default values
default_Sut = 110
default_Sy = 90
default_Se = 35
default_Sa = 20
default_Sm = 30
# canvas
__canvas = 0
# The plot
__the_plot = None
slider_length = 200
# dicts
__labels = {}
__sliders = {}
__output = {}
__checkboxes = {}
__results = {}
# variables
__var = {}
# checkbox
__checkbox_helpers = 0
__combo = 0
__units = ["SI", "USCS"]
__unit_str = "(N/mm^2)"
#material
__mat = None
# recursion stop
__rec = True
#submenu
menu = None
entry_items = ["Sut", "Sy", "Se", "Sa", "Sm"]
def __init__(self):
super().__init__()
self.__mat = DMaterialData(110, 95, 35)
# the plot
self.__the_plot = FatigueDiagramPlot()
# init ui
self.initUI()
# menu
self.menu = DataEntryMenu(self.master, self.manual_entry_callback)
# update ui
self.update_values(0.0)
# ----------- Update the outputs ----------
def update_plot(self):
"""
Update the plot area
:return:
"""
self.__mat.Sut = self.__var["Sut"].get()
self.__mat.Sy = self.__var["Sy"].get()
self.__mat.Se = self.__var["Se"].get()
Sa = self.__var["Sa"].get()
Sm = self.__var["Sm"].get()
# Update the plot
self.__the_plot.update_plot(Sa, Sm, self.__mat)
self.__canvas.draw_idle()
#self.menu.update_plot(s1, s2, a1)
def update_output_display(self):
"""
Update the output display, the stresses and factor of safeties this
panel shows.
:return:
"""
self.__var["n_Goodman"].set(str(round(self.__results["n_Goodman"],2)))
self.__var["n_Sonderberg"].set(str(round(self.__results["n_Sonderberg"], 2)))
self.__var["n_Gerber"].set(str(round(self.__results["n_Gerber"], 2)))
return
# ---------Widget callbacks ---------------
def update_values(self, val):
"""
Update function for all widgets. The function updates the
input values. Note that all slider widgets call this functions
:param val: The value the widget passes
:return: -
"""
# stops recursion when updating the slider at a different location
if self.__rec == False:
return
self.__update_slider("Sut", 2)
self.__update_slider("Sy", 2)
self.__update_slider("Se", 2)
self.__update_slider("Sa", 2)
self.__update_slider("Sm", 2)
Sut = self.__var["Sut"].get()
Sy = self.__var["Sy"].get()
Se = self.__var["Se"].get()
Sa = self.__var["Sa"].get()
Sm = self.__var["Sm"].get()
# check for limits
if Sut < Sy:
self.__sliders["Sut"].set( Sy + 1)
if Sy < Se:
self.__sliders["Sy"].set(Se + 1)
# calc results
self.__results["n_Goodman"] = FatigueDiagram.calc_mod_Goodman_FoS(Sa, Sm, Se, Sut )
self.__results["n_Sonderberg"] = FatigueDiagram.calc_Sonderberg_FoS(Sa, Sm, Se, Sy)
self.__results["n_Gerber"] = FatigueDiagram.calc_Gerber_FoS(Sa, Sm, Se, Sut)
self.update_plot()
self.update_output_display()
def cb_update(self):
"""
Checkbox update. Captures the ckeckbox clicks.
Checkboxes do not pass any arguments to the function
:return:
"""
#self.failure_theory_plts.showVonMisesPlt(int(self.cb_mises.get()))
#self.failure_theory_plts.showTrescaPlt(int(self.cb_tresca.get()))
if self.__checkbox_helpers.get() == 1:
self.__show_helpers = True
else:
self.__show_helpers = False
self.__the_plot.set_helpers(self.__show_helpers)
self.__canvas.draw_idle()
#self.update_values(0)
def cb2_update(self):
visible = True
if self.__checkboxes["Goodman"].get() != 1:
visible = False
self.__the_plot.set_visible(0, visible)
visible = True
if self.__checkboxes["Gerber"].get() != 1:
visible = False
self.__the_plot.set_visible(1, visible)
visible = True
if self.__checkboxes["Sonderberg"].get() != 1:
visible = False
self.__the_plot.set_visible(2, visible)
visible = True
if self.__checkboxes["Yield"].get() != 1:
visible = False
self.__the_plot.set_visible(3, visible)
self.__canvas.draw_idle()
def key_callback(self, event):
"""
Create a subwindow to allow for user input
:param event:
:return:
"""
if event.char == 'e':
self.create_subUI()
elif event.char == 'd':
return
def manual_entry_callback(self):
"""
Apply the values that the user set in the sub window
:return:
"""
try:
d = self.menu.get()
self.__sliders["Sut"].set(float(d[self.entry_items[0]].get()))
self.__sliders["Sy"].set(float(d[self.entry_items[1]].get()))
self.__sliders["Se"].set(float(d[self.entry_items[2]].get()))
self.__sliders["Sa"].set(float(d[self.entry_items[3]].get()))
self.__sliders["Sm"].set(float(d[self.entry_items[4]].get()))
# get values
self.update_values(0)
except ValueError:
print("Something went wrong - invalid numbers")
except KeyError:
print("Something went wrong - wrong key")
def __combobox_callback(self, event):
c = self.__combo.get()
self.__change_unit(c)
self.update()
def __save_plot(self):
try:
p = self.__the_plot.save_plot()
messagebox.showinfo("Save Plot", str("Saved the plot as: " + p))
except:
pass
try:
self.menu.save_plot()
except:
pass
def __change_unit(self, unit):
self.__rec = False
if unit == self.__units[0]: # change to si units
self.__unit_str = "(N/mm^2)"
self.__the_plot.set_units(0)
self.__sliders["Sut"].configure(from_=0, to=self.limit_stress)
self.__sliders["Sy"].configure(from_=0, to=self.limit_stress)
self.__sliders["Se"].configure(from_=0, to=self.limit_stress)
self.__sliders["Sa"].configure(from_=0, to=self.limit_stress)
self.__sliders["Sm"].configure(from_=0, to=self.limit_stress)
self.__sliders["Sut"].set(UnitConversion.psi_to_Nmm2(self.__var["Sut"].get()) * 1000)
self.__sliders["Sy"].set(UnitConversion.psi_to_Nmm2(self.__var["Sy"].get()) * 1000)
self.__sliders["Se"].set(UnitConversion.psi_to_Nmm2(self.__var["Se"].get()) * 1000)
self.__sliders["Sa"].set(UnitConversion.psi_to_Nmm2(self.__var["Sa"].get()) * 1000)
self.__sliders["Sm"].set(UnitConversion.psi_to_Nmm2(self.__var["Sm"].get()) * 1000)
else: # change to uscs units
self.__unit_str = "(ksi)"
self.__the_plot.set_units(1)
self.__sliders["Sut"].configure(from_=0, to=UnitConversion.Nmm2_to_psi(self.limit_stress / 1000))
self.__sliders["Sut"].set(UnitConversion.Nmm2_to_psi(self.__var["Sut"].get()) / 1000)
self.__sliders["Sy"].configure(from_=0,
to=UnitConversion.Nmm2_to_psi(self.limit_stress / 1000))
self.__sliders["Sy"].set(UnitConversion.Nmm2_to_psi(self.__var["Sy"].get()) / 1000)
self.__sliders["Se"].configure(from_=0,
to=UnitConversion.Nmm2_to_psi(self.limit_stress / 1000))
self.__sliders["Se"].set(UnitConversion.Nmm2_to_psi(self.__var["Se"].get()) / 1000)
self.__sliders["Sa"].configure(from_=0,
to=UnitConversion.Nmm2_to_psi(self.limit_stress / 1000))
self.__sliders["Sa"].set(UnitConversion.Nmm2_to_psi(self.__var["Sa"].get()) / 1000)
self.__sliders["Sm"].configure(from_=0,
to=UnitConversion.Nmm2_to_psi(self.limit_stress / 1000))
self.__sliders["Sm"].set(UnitConversion.Nmm2_to_psi(self.__var["Sm"].get()) / 1000)
self.__labels["Sut"].configure(text=str("Sut " + self.__unit_str + ":"))
self.__labels["Sy"].configure(text=str("Sy " + self.__unit_str + ":"))
self.__labels["Se"].configure(text=str("Se " + self.__unit_str + ":"))
self.__labels["Sa"].configure(text=str("Sa " + self.__unit_str + ":"))
self.__labels["Sm"].configure(text=str("Sm " + self.__unit_str + ":"))
self.__canvas.draw_idle()
self.__rec = True
self.update_values(0)
# ------------ Inits ---------------
def create_subUI(self):
"""
Create a window that allows a user to manually enter all the values
instead of using sliders
:return:
"""
try:
self.menu.create("Enter data", self.entry_items)
d = {self.entry_items[0]: self.__var["Sut"].get(),
self.entry_items[1]: self.__var["Sy"].get(),
self.entry_items[2]: self.__var["Se"].get(),
self.entry_items[3]: self.__var["Sa"].get(),
self.entry_items[4]: self.__var["Sa"].get()}
self.menu.set(d)
except ValueError:
print("Something went wrong")
def create_plot(self):
"""
Create the plot that shows the failure theories
:return:
"""
fig = self.__the_plot.create_plot(8) # 9 -> figure size
self.__canvas = FigureCanvasTkAgg(fig, master=self)
self.__canvas.draw()
def initUI(self):
"""
Init the user interface and all widgets
:return: -
"""
rows_for_plot = 24
cols_for_plot = 5
output_row_start = 14
self.master.title("ME 325 Machine Component Design")
self.pack(fill=BOTH, expand=True)
# keyboard binding
self.master.bind("e", self.key_callback)
self.master.bind("d", self.key_callback)
self.columnconfigure(0, weight=1) # first and last column can expand
self.columnconfigure(0, pad=7)
self.rowconfigure(rows_for_plot, weight=1)
self.rowconfigure(rows_for_plot, pad=7)
lbl = Label(self, text="Mohr's Circle for plane materials")
lbl.grid(sticky=W, pady=4, padx=5)
self.__canvas = Canvas(self, width=300, height=300)
self.create_plot()
self.__canvas.get_tk_widget().grid(row=1, column=0, columnspan=cols_for_plot, rowspan=rows_for_plot,
padx=5, sticky=E + W + S + N)
Label(self, text="Input:",font='Helvetica 14 bold').grid(sticky=NW, row=1, column=cols_for_plot+1)
#-----------
# INPUT
self.__add_slider("Sut", self.default_Sut, 0, self.limit_stress, 2, cols_for_plot)
self.__add_slider("Sy", self.default_Sy, 0, self.limit_stress, 4, cols_for_plot)
self.__add_slider("Se", self.default_Se, 0, self.limit_stress, 6, cols_for_plot)
self.__add_slider("Sa", self.default_Sa, 0, self.limit_stress, 8, cols_for_plot)
self.__add_slider("Sm", self.default_Sm, 0, self.limit_stress, 10, cols_for_plot)
##---------------------------------------------------------------------------------
# OUTPUT
Label(self, text="Output:",font='Helvetica 14 bold').grid(sticky=NW, row=output_row_start, column=cols_for_plot+1)
# s1
self.__add_label("n_Goodman", "n_Goodman", 0.0, output_row_start, cols_for_plot)
self.__add_label("n_Gerber", "n_Gerber", 0.0, output_row_start+1, cols_for_plot)
self.__add_label("n_Sonderberg", "n_Sonderberg", 0.0, output_row_start+2, cols_for_plot)
##---------------------------------------------------------------------------------
# Display
Label(self, text="Display:", font='Helvetica 14 bold').grid(sticky=NW, row=output_row_start+5,
column=cols_for_plot + 1)
self.__add_checkbox("Goodman", "Goodman line", output_row_start + 6, cols_for_plot + 1, self.cb2_update)
self.__add_checkbox("Gerber", "Gerber line" , output_row_start+7, cols_for_plot + 1, self.cb2_update)
self.__add_checkbox("Sonderberg", "Sonderberg line", output_row_start + 8, cols_for_plot + 1, self.cb2_update)
self.__add_checkbox("Yield", "Yield stress", output_row_start + 9, cols_for_plot + 1, self.cb2_update)
#Label(self, text="press 'd' for details ",font='Helvetica 12').grid(sticky=NW, row=output_row_start + 6,
# column=cols_for_plot+1, columnspan=2)
##---------------------------------------------------------------------------------
# Others
cbtn = Button(self, text="Exit", command=self.quit)
cbtn.grid(row=rows_for_plot+1, column=cols_for_plot+2, pady=4, sticky=E)
Button(self, text="Save", command=self.__save_plot).grid(row=rows_for_plot + 1, column=cols_for_plot+1,
pady=4, sticky=W)
self.__checkbox_helpers = IntVar()
self.__checkbox_helpers.set(1)
check1 = Checkbutton(self, text="Helpers", variable=self.__checkbox_helpers, command=self.cb_update)
check1.grid(row=rows_for_plot+1, column=cols_for_plot-1, sticky=W)
Label(self, text="press 'e' for manual input.", font='Helvetica 12').grid(row=rows_for_plot+1, column=0, sticky=W, padx=7)
self.__combo = Combobox(self, values=self.__units, state='readonly', width=6)
self.__combo.grid(sticky=W, row=rows_for_plot+1, column=cols_for_plot-4, columnspan=1, padx=1, pady=0)
self.__combo.current(0)
self.__combo.bind("<<ComboboxSelected>>", self.__combobox_callback)
def __add_slider(self, name, default_value, min, max, row_, cols_):
self.__labels[name] = Label(self, text=str(name + " " + self.__unit_str + ":"), width=15)
self.__labels[name].grid(sticky=W, row=row_, column=cols_ + 1)
self.__var[str(name + "_str")] = StringVar()
self.__var[str(name + "_str")].set(str(default_value))
self.__var[name] = DoubleVar()
self.__var[name].set(default_value)
self.__output[name] = Label(self, textvariable=self.__var[str(name + "_str")])
self.__output[name].grid(sticky=W, row=row_, column=cols_ + 2)
self.__sliders[name] = Scale(self, value=default_value, from_=min,
to=max, orient=HORIZONTAL,
length=self.slider_length, command=self.update_values)
self.__sliders[name].grid(sticky=W, row=row_+1, column=cols_ + 1, columnspan=2)
def __update_slider(self, name, round_):
try:
# update value
self.__var[name].set(round(self.__sliders[name].get(), round_))
self.__var[str(name + "_str")].set(str(self.__var[name].get()))
except RecursionError:
pass
def __add_label(self, name, text_, default_value, row_, col_ ):
self.__labels[name] = Label(self, text=str(text_ + " " + self.__unit_str + ":"))
self.__labels[name].grid(sticky=W, row=row_ + 1, column=col_ + 1)
self.__var[name] = StringVar()
self.__var[name].set(str(default_value))
self.__output[name] = Label(self, textvariable=self.__var[name])
self.__output[name].grid(sticky=W, row=row_ + 1, column=col_ + 2)
def __add_checkbox(self, name_, text_ , row_, col_, callback_):
self.__checkboxes[name_] = IntVar()
self.__checkboxes[name_].set(1)
check1 = Checkbutton(self, text=text_, variable=self.__checkboxes[name_], command=callback_)
check1.grid(row=row_, column=col_, sticky=W)
def main():
root = Tk()
root.geometry("900x720+300+300")
app = FatigueDiagram_General()
# To jump the window to the front
root.attributes("-topmost", True)
root.after_idle(root.attributes, '-topmost', False)
# run
root.mainloop()
if __name__ == '__main__':
main() | mit |
meduz/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 78 | 6016 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raises
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
from sklearn.ensemble.gradient_boosting import QuantileLossFunction
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float64)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float64)
y.fill(0.0)
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float64)
y.fill(1.0)
sw = np.ones(102, dtype=np.float64)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_quantile_loss_function():
# Non regression test for the QuantileLossFunction object
# There was a sign problem when evaluating the function
# for negative values of 'ytrue - ypred'
x = np.asarray([-1.0, 0.0, 1.0])
y_found = QuantileLossFunction(1, 0.9)(x, np.zeros_like(x))
y_expected = np.asarray([0.1, 0.0, 0.9]).mean()
np.testing.assert_allclose(y_found, y_expected)
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
JeanKossaifi/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
mkdubik/multinet-evaluation | plot/t3.py | 1 | 1050 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from os import listdir
from pdb import set_trace
import matplotlib.pyplot as plt
from t1 import process_memory_log, collect_data
def main():
plt.figure(1)
data = collect_data('telemetry/t3/')
x = sorted(data[data.keys()[0]].keys())
def plot(data, method, what):
y = data[method]
y = [y[k][what] for k in sorted(y.keys())]
a1, = plt.plot(x, y, '-', linewidth=2, label = method)
return a1
a = plot(data, 'glouvain', 0)
c = plot(data, 'pmm', 0)
plt.xlabel('Actors (thousand)', fontsize = 14)
plt.ylabel('computational time (s)', fontsize = 14)
plt.legend(handles = [a, c], loc = 2, title = 'L = 3, $\mu$ = 0.3')
plt.xticks(x)
plt.savefig('t3a')
plt.figure(2)
a = plot(data, 'glouvain', 1)
c = plot(data, 'pmm', 1)
plt.xlabel('Actors (thousand)', fontsize = 14)
plt.ylabel('peak memory usage (megabytes)', fontsize = 14)
plt.legend(handles = [a, c], loc = 2, title = 'L = 3, $\mu$ = 0.3')
plt.xticks(x)
plt.savefig('t3b')
if __name__ == "__main__":
main()
| gpl-2.0 |
Jeff20/scipy_2015_sklearn_tutorial | notebooks/helpers.py | 19 | 5046 | import numpy as np
from collections import defaultdict
import os
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.feature_extraction import DictVectorizer
# Can also use pandas!
def process_titanic_line(line):
# Split line on "," to get fields without comma confusion
vals = line.strip().split('",')
# replace spurious " characters
vals = [v.replace('"', '') for v in vals]
pclass = int(vals[0])
survived = int(vals[1])
name = str(vals[2])
sex = str(vals[3])
try:
age = float(vals[4])
except ValueError:
# Blank age
age = -1
sibsp = float(vals[5])
parch = int(vals[6])
ticket = str(vals[7])
try:
fare = float(vals[8])
except ValueError:
# Blank fare
fare = -1
cabin = str(vals[9])
embarked = str(vals[10])
boat = str(vals[11])
homedest = str(vals[12])
line_dict = {'pclass': pclass, 'survived': survived, 'name': name, 'sex': sex, 'age': age, 'sibsp': sibsp,
'parch': parch, 'ticket': ticket, 'fare': fare, 'cabin': cabin, 'embarked': embarked,
'boat': boat, 'homedest': homedest}
return line_dict
def load_titanic(test_size=.25, feature_skip_tuple=(), random_state=1999):
f = open(os.path.join('datasets', 'titanic', 'titanic3.csv'))
# Remove . from home.dest, split on quotes because some fields have commas
keys = f.readline().strip().replace('.', '').split('","')
lines = f.readlines()
f.close()
string_keys = ['name', 'sex', 'ticket', 'cabin', 'embarked', 'boat',
'homedest']
string_keys = [s for s in string_keys if s not in feature_skip_tuple]
numeric_keys = ['pclass', 'age', 'sibsp', 'parch', 'fare']
numeric_keys = [n for n in numeric_keys if n not in feature_skip_tuple]
train_vectorizer_list = []
test_vectorizer_list = []
n_samples = len(lines)
numeric_data = np.zeros((n_samples, len(numeric_keys)))
numeric_labels = np.zeros((n_samples,), dtype=int)
# Doing this twice is horribly inefficient but the file is small...
for n, l in enumerate(lines):
line_dict = process_titanic_line(l)
strings = {k: line_dict[k] for k in string_keys}
numeric_labels[n] = line_dict["survived"]
sss = StratifiedShuffleSplit(numeric_labels, n_iter=1, test_size=test_size,
random_state=12)
# This is a weird way to get the indices but it works
train_idx = None
test_idx = None
for train_idx, test_idx in sss:
pass
for n, l in enumerate(lines):
line_dict = process_titanic_line(l)
strings = {k: line_dict[k] for k in string_keys}
if n in train_idx:
train_vectorizer_list.append(strings)
else:
test_vectorizer_list.append(strings)
numeric_data[n] = np.asarray([line_dict[k]
for k in numeric_keys])
train_numeric = numeric_data[train_idx]
test_numeric = numeric_data[test_idx]
train_labels = numeric_labels[train_idx]
test_labels = numeric_labels[test_idx]
vec = DictVectorizer()
# .toarray() due to returning a scipy sparse array
train_categorical = vec.fit_transform(train_vectorizer_list).toarray()
test_categorical = vec.transform(test_vectorizer_list).toarray()
train_data = np.concatenate([train_numeric, train_categorical], axis=1)
test_data = np.concatenate([test_numeric, test_categorical], axis=1)
keys = numeric_keys + string_keys
return keys, train_data, test_data, train_labels, test_labels
FIELDNAMES = ('polarity', 'id', 'date', 'query', 'author', 'text')
def read_sentiment_csv(csv_file, fieldnames=FIELDNAMES, max_count=None,
n_partitions=1, partition_id=0):
import csv # put the import inside for use in IPython.parallel
def file_opener(csv_file):
try:
open(csv_file, 'r', encoding="latin1").close()
return open(csv_file, 'r', encoding="latin1")
except TypeError:
# Python 2 does not have encoding arg
return open(csv_file, 'rb')
texts = []
targets = []
with file_opener(csv_file) as f:
reader = csv.DictReader(f, fieldnames=fieldnames,
delimiter=',', quotechar='"')
pos_count, neg_count = 0, 0
for i, d in enumerate(reader):
if i % n_partitions != partition_id:
# Skip entry if not in the requested partition
continue
if d['polarity'] == '4':
if max_count and pos_count >= max_count / 2:
continue
pos_count += 1
texts.append(d['text'])
targets.append(1)
elif d['polarity'] == '0':
if max_count and neg_count >= max_count / 2:
continue
neg_count += 1
texts.append(d['text'])
targets.append(-1)
return texts, targets
| cc0-1.0 |
giffordw/astrofuncs | findpotential/__init__.py | 1 | 11153 | ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
NUMERICAL AND NFW MASS/DENSITY/POTENTIAL/V_ESC CALCULATIONS
Description: The main function to call is find_potential(). There are several other
supporting functions which can be called as well.
find_potential(gal_data, clus_data, red_z, Rvir):
gal_data -- A (Nx6) array with the column order as: gal_x,gal_y,gal_z,gal_vx,gal_vy,gal_vz.
The positions are assummed to be in [Mpc] and the velocities in [km/s]
clus_data -- A 6 element array with order: clus_x,clus_y,clus_z,clus_vx,clus_vy,clus_vz.
The positions are assummed to be in [Mpc] and the velocities in [km/s]
red_z -- The cluster redshift
Rvir -- The virial radius or critical radius (r200). Assummed to be in [Mpc]
Outputs: The program calculates the cumulative mass profile, density profile,
potential profile, and escape velocity for each cluster. There are no file
outputs, however the program creates two plots that can be saved. The first is
the cumulative/NFW mass profiles and the second is the phase space plot with the
various escape velocity solutions overplotted.
Author: Dan Gifford
Year: 2014
Institution: University of Michigan
Email: [email protected]
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
from __future__ import division
from math import *
import astropy.units as u
import astropy.constants as const
import matplotlib.pyplot as plt
import astropy
import numpy as np
import scipy.optimize as optimize
from astropy.io import ascii
from scipy import ndimage
from astropy.io import fits as pyfits
from scipy.integrate import cumtrapz
#####CONSTANTS#####
h = 0.73 #km/s/Mpc/100.0
rmax = 10 #Mpc
G =4.518e-48 #Mpc^3 / (Msol s2)
bin_size = 0.01/h #Mpc
def NFW_cumulative_mass(r,rho_0,r_0):
'''returns NFW cumulative mass integrated to infinity (it converges)'''
D = r / r_0
return 4 * np.pi * rho_0 * (r_0**3) * ( np.log(1+D) - ( D / (1+D) ) )
def fit_NFW(radii,cummass,rfit,p0):
'''uses scipy.optimize.curve_fit to find solution. Takes the rgrid,
cumulative mass profile, rcrit radius, and parameter guesses [rho_0,r_0]
'''
#select indices within rfit (should be the virial radius)
vir, = np.where(radii <= rfit)
NFW_array,pcov = optimize.curve_fit(NFW_cumulative_mass,radii[vir],cummass[vir],\
p0=p0)
return NFW_array,pcov
def density(r,mass):
'''Takes radial grid and cumulative mass profile and returns a density profile in Msol/Mpc^3'''
dens = np.zeros(r.size)
for ii in range(dens.size):
if ii == 0: dens[ii] = mass[ii]/(4/3.0*np.pi*(r[ii]**3-r[ii-1]**3))
else:
dens[ii] = (mass[ii]-mass[ii-1])/(4/3.0*np.pi*(r[ii]**3-r[ii-1]**3))
return dens
def numerical_potential(r,dens):
'''Integrates the density profile to solve for the potential profile. This is the 2 integral method.
Returned units are in Mpc^2/s^2
'''
deriv1 = dens*r**2
deriv2 = dens*r
inner = cumtrapz(deriv1,r)
outer = -cumtrapz(deriv2[::-1],r[::-1])
return -4*np.pi*G*(1.0/r[1:-1]*inner[:-1] + outer[::-1][1:])
def phi_N(r,rho_0,r_0):
'''Returns NFW potential integrated to infinity (it converges). Returned units are in Mpc^2/s^2'''
D = r / r_0
return (-4*np.pi*G*rho_0*r_0**2*((np.log(1+D))/D))
class find_potential:
def __init__(self,gal_data,clus_data,red_z=None,Rvir=None):
'''Main Program. See main doc for details'''
#####TESTING#####
try:
assert gal_data.shape[1] == 6
except:
raise Exception, 'Your galaxy array has shape {0}x{1} and needs to be {0}x6. Please reshape!'.format(gal_data.shape[0],gal_data.shape[1])
try:
assert clus_data.size == 6
except:
raise Exception, 'Oops! You have not fed the function the correct number of cluster values.'
if red_z == None: raise Exception, 'Please pass the function a cluster redshift!'
if Rvir == None: raise Exception, 'Please pass the function a cluster virial/critical radius!'
#################
GAL_R = np.sqrt((gal_data[:,0]-clus_data[0])**2 + (gal_data[:,1]-clus_data[1])**2 + (gal_data[:,2]-clus_data[2])**2) #3D galaxy radial distances
GAL_V = np.sqrt((gal_data[:,3]-clus_data[3])**2 + (gal_data[:,4]-clus_data[4])**2 + (gal_data[:,5]-clus_data[5])**2) #3D galaxy peculiar velocities
Mpcbin = np.where(GAL_R <= rmax) #identify indices of particles within 'rmax' of cluster.
crit = 2.774946e11*h*h*(0.75+0.25/(1+red_z)**3) #critical density of universe in solar masses/Mpc^3
av_dens = (0.25/(1+red_z)**3)*crit #average density of universe
#filter particles to within 'Mpcbin' range
self.GAL_R_3Mpc = GAL_R[Mpcbin]
self.GAL_V_3Mpc = GAL_V[Mpcbin]
self.radial = gal_data[:,3][Mpcbin]*(gal_data[:,0][Mpcbin]/self.GAL_R_3Mpc) + gal_data[:,4][Mpcbin]*(gal_data[:,1][Mpcbin]/self.GAL_R_3Mpc) + gal_data[:,5][Mpcbin]*(gal_data[:,2][Mpcbin]/self.GAL_R_3Mpc) #radial velocity in km/s
number_of_particles_in_bin,bin_edges = np.histogram(self.GAL_R_3Mpc,np.arange(0,rmax,bin_size)) #bin particles by radial distance
self.rbin = (bin_edges[1:]+bin_edges[:-1])/2.0 #the grid of r-values associated with each radial bin
#### calculate cumulative mass profile (M<r):
particle_mass = 8.6e8/h #Millennium particle mass
self.cumulative_mass_profile = particle_mass * np.cumsum(number_of_particles_in_bin)# solar masses
'''plot cumulative mass profile M(<r)'''
plt.plot(self.rbin, self.cumulative_mass_profile)
############ NFW fit #############
#parameter guesses:
self.rho_0_guess = 5e14 # Msol / Mpc3
self.r_0_guess = 0.4 #Mpc
NFW_array,pcov = fit_NFW(self.rbin,self.cumulative_mass_profile,Rvir,[self.rho_0_guess,self.r_0_guess])
#first element of optimization curve fit output array. units: Msol/[Mpc]^3
self.rho_0 = NFW_array[0] #* u.Msol * u.Mpc**-3
#second element of optimization curve fit output array. units: [Mpc]
self.r_0 = NFW_array[1] #* u.Mpc
print 'normalization: {0:.3e} Msol/Mpc^3 scale radius: {1:.3f} Mpc'.format(self.rho_0,self.r_0)
self.NFW_mass_profile = NFW_cumulative_mass(self.rbin,self.rho_0,self.r_0)
'''plot cumulative mass profile M(<r) FIT'''
plt.plot(self.rbin, self.NFW_mass_profile)
#plt.savefig('path_to_figs/'+str(i)+'profile.png')
plt.close()
#plt.show()
#Now numerically solve for density and potential profiles
self.dens_NFW = density(self.rbin,self.NFW_mass_profile) - av_dens #Msol/Mpc^3
self.dens = density(self.rbin,self.cumulative_mass_profile) - av_dens #Msol/Mpc^3
self.potential_numerical = numerical_potential(self.rbin,self.dens) #Mpc^2/s^2
self.potential_NFW_numerical = numerical_potential(self.rbin,self.dens_NFW) #Mpc^2/s^2
############# Escape Velocity profiles #############
#escape velocity profile
self.v_esc_NFW = np.sqrt(-2 * phi_N(self.rbin,self.rho_0,self.r_0))*3.086e19 #km/s
self.v_esc_NFW_numerical = np.sqrt(-2 * self.potential_NFW_numerical)*3.086e19 #km/s
self.v_esc_numerical = np.sqrt(-2 * self.potential_numerical)*3.086e19 #km/s
#hubble param escape calculation
q = 0.25/2.0 - 0.75
H2 = (h*100*3.24e-20)**2
re = (G*self.cumulative_mass_profile[1:-1]/(-q*H2))**(1/3.0)
#re = (G*M200[i]*1e10/(-q*H2))**(1/3.0)
self.v_esc_hflow = np.sqrt(self.v_esc_numerical**2 + (q*(h*100)**2*(self.rbin[1:-1]**2)))# - 3*re**2))) #km/s
#Chris's GM/R + dm calc
base = -G*self.cumulative_mass_profile[self.rbin<=Rvir][-1]/Rvir
dm = -G*np.append(self.cumulative_mass_profile[0],self.cumulative_mass_profile[1:] - self.cumulative_mass_profile[:-1])/self.rbin
self.potential_chris = (base + dm)*(3.086e19)**2 + (q*(h*100)**2*(self.rbin**2)) #km^2/s^2
self.v_esc_chris = np.sqrt(-2*self.potential_chris) #km/s
#Chris's integral + dm calc
base = self.potential_numerical
dm = dm[1:-1]
self.potential_chris_tot = (base + dm)*(3.086e19)**2 + (q*(h*100)**2*(self.rbin[1:-1]**2)) #km^2/s^2
self.v_esc_chris_tot = np.sqrt(-2*self.potential_chris_tot) #km/s
if __name__ == '__main__':
#####Read in catalog data#####
redshift,rcrit,M200,veldisp,halox,haloy,haloz,halovx,halovy,halovz = np.loadtxt('/n/Christoq1/giffordw/Millenium/biglosclusters.csv',dtype='float',delimiter=',',usecols=(4,5,6,8,9,10,11,12,13,14),unpack=True)
fileID = np.loadtxt('/n/Christoq1/MILLENNIUM/100Halos/particles/cmiller.csv',dtype='string',delimiter=',',skiprows=1,usecols=(0,),unpack=True)
for i in range(1):#fileID.size): #Loop over range(N) clusters
IDh = fileID[i]
ID = np.loadtxt('/n/Christoq1/giffordw/Millenium/biglosclusters.csv',dtype='string',delimiter=',',usecols=(0,),unpack=True)
Rvir = (rcrit[ID==IDh]/(1+redshift[ID==IDh]))[0] / h # Mpc
fits = pyfits.open('/n/Christoq1/MILLENNIUM/100Halos/particles/t'+str(i)+'_cmiller.dat.fits') #open particle fits file
data = fits[1].data
gal_x = data.field('PPX')/h
gal_y = data.field('PPY')/h
gal_z = data.field('PPZ')/h
gal_vx = data.field('VVX')
gal_vy = data.field('VVY')
gal_vz = data.field('VVZ')
gal_ob = find_potential(np.column_stack((gal_x,gal_y,gal_z,gal_vx,gal_vy,gal_vz)),np.array([0.0,0.0,0.0,halovx[ID==IDh],halovy[ID==IDh],halovz[ID==IDh]]),redshift[ID==IDh][0],Rvir)
s,ax = plt.subplots(1,figsize=(17,10))
# plot up particles
ax.plot(gal_ob.GAL_R_3Mpc,gal_ob.radial,'ko',markersize=1,alpha=0.3)
ax.plot(gal_ob.rbin,gal_ob.v_esc_NFW,'b')# NFW escape velocity
ax.plot(gal_ob.rbin,-gal_ob.v_esc_NFW,'b')
ax.plot(gal_ob.rbin[1:-1],gal_ob.v_esc_NFW_numerical,'b--')# numerical NFW escape velocity
ax.plot(gal_ob.rbin[1:-1],-gal_ob.v_esc_NFW_numerical,'b--')
ax.plot(gal_ob.rbin[1:-1],gal_ob.v_esc_hflow,color='g')# numerical escape velocity
ax.plot(gal_ob.rbin[1:-1],-gal_ob.v_esc_hflow,color='g')
ax.plot(gal_ob.rbin,gal_ob.v_esc_chris,color='orange')# Chris escape velocity
ax.plot(gal_ob.rbin,-gal_ob.v_esc_chris,color='orange')
#format plot
ax.axvline(Rvir,color='k',ls='--',alpha=0.5)
ax.set_xlabel('r [Mpc]')
ax.set_ylabel('$ \sqrt{-2\phi_{N}}$ [km/s]',fontsize=13)
ax.set_xlim(0,3)#Rvir)
#plt.savefig('path_to_figs/'+str(i)+'phase.png')
#plt.show()
plt.close()
print 'DONE WITH CLUSTER {0}'.format(i)
| mit |
umn-earth-surface/IceFlow | examples/Iceland2017.py | 1 | 1597 | import iceflow
ic = iceflow.IceFlow()
self = ic
import numpy as np
import scipy
from scipy.io import loadmat
from scipy.sparse import linalg
import os
import sys
from matplotlib import pyplot as plt
ic.useGRASS = True
ic.location='Iceland'
ic.gisbase = '/usr/local/src/grass7_trunk/dist.x86_64-unknown-linux-gnu'
ic.run_length_years = 1000. # years
ic.t_start_years = 0. # years
ic.dt_years = 1
ic.record_frequency_years = 50
ic.elevation = 'elev'
ic.north = 792000
ic.south = 240000
ic.west = 108000
ic.east = 868000
ic.dx = 4000
ic.dy = 4000
"""
ic.mass_balance_parameterization = 'TP_PDD'
# Goes to Ta variable
ic.temperature_melt_season = 'TwentiethCenturyTemp_NCAR_CIRES_JJA_degC'
ic.melt_season_length_days = 120
#self.Pa /= 1000. / self.secyr # e.g., PRISM is in mm/yr, change to m/s.
# CONVERT THIS IN GRASS BEFOREHAND!!! <----------------------------------------
# Goes to Pair variable
ic.precipitation_solid = 'TwentiethCenturyPrecip_NCAR_CIRES_annual'
ic.T_correction = -5
ic.P_factor = 1
ic.b_maximum_per_year = 1 # [m/yr]
"""
ic.mass_balance_parameterization = 'ELA'
ic.ELA = 800
ic.dbdz_per_year = 1E-3 # 1 m/yr over 1 km -- EDIT THIS TO FIT DATA!
ic.b_maximum_per_year = .3 # [m/yr]
# Set this up to automatically number IceFlow outputs using glob
ic.output_filename=None
ic.output_figure=None
ic.plot_at_end_flag=False
ic.plot_during_run_flag = True
#ic.plot_t_years = ic.run_length_years
ic.boundary_condition = 'Dirichlet0'
ic.GRASS_raster_ice_extent = 'FakeMeasuredExtents'
ic.verbose = False
# Flexure
ic.isostatic = False
ic.initialize()
ic.run()
ic.finalize()
| gpl-3.0 |
lucadealfaro/crowdranker | modules/rank.py | 1 | 31858 | # -*- coding: utf-8 -*-
import numpy as np
import random
#import matplotlib.pyplot as plt
import time
import math
class Cost:
""" Class contains cost function.
"""
def __init__(self, cost_type='top-k', rank_cost_coefficient=-1):
self.cost_type = cost_type
self.rank_cost_coefficient = rank_cost_coefficient
def calculate(self, i, k, id2rank):
# ranking starts from 0, so first k rank are 0, 1, ..., k - 1
if self.cost_type == 'top-k':
if id2rank[i] < k:
return 1
else:
return 0
elif self.cost_type == 'one_over_rank':
return 1./( 1 + id2rank[i])
elif self.cost_type == 'rank_power_alpha':
if self.rank_cost_coefficient == 0:
raise Exception("If coefficient is zero then cost object should be None!")
return (1 + id2rank[i]) ** self.rank_cost_coefficient
elif self.cost_type == 'two_steps':
if id2rank[i] < k:
return 1
if id2rank[i] < 3 * k / 2:
return 0.5
return 0
elif self.cost_type == 'piecewise':
a = 0.25
x = id2rank[i]
if x < k:
return (-1) * a / k * x + 1 + a
if x < 2 * k:
return - 1. / k * x + 2
return 0
elif self.cost_type == 'smooth-top-k':
beta = 2
return 1.0 / (1 + (id2rank[i]/float(k)) ** beta)
else:
raise Exception('Cost funtion type is not specified')
class Rank:
""" Class contains methods for ranking items based on items comparison.
"""
def __init__(self, items, alpha=0.9, num_bins=2001,
cost_obj=None, k=None, init_dist_type='gauss'):
"""
Arguments:
- items is a list of original items id.
function. If cost_obj is None then we don't use any
reward function and treat each item equally.
- alpha is the annealing coefficient for distribution update.
- num_bins is the number of histogram bins.
- cost_obj is an object of type Cost, in other words it is reward
- init_distr_type is type of ditribution we use for initialization
quality distributions
"""
# items are indexed by 0, 1, ..., num_items - 1 in the class but
# "outside" they have ids from orig_items_id, so orig_items_id[n]
# is original id of item n.
self.orig_items_id = items
num_items = len(items)
self.num_items = num_items
self.num_bins = num_bins
self.cost_obj = cost_obj
self.alpha = alpha
# Constant k is for top-k problems.
self.k = k
# qdistr is numpy two dimensional array which represents quality
# distribution, i-th row is a distribution for an item with id equals i.
# qdistr is initialized as uniform distribution.
if init_dist_type == 'unif':
self.qdistr = np.zeros((num_items ,num_bins)) + 1./num_bins
elif init_dist_type == 'gauss':
# Does a Gaussian distribution centered in the center.
#print num_items, num_bins
x, y = np.mgrid[0:num_items, 0:num_bins]
self.qdistr = np.zeros((self.num_items, self.num_bins))
for i in xrange(self.num_items):
self.qdistr[i, :] = self.get_normal_vector(self.num_bins,
self.num_bins / 2,
self.num_bins / 8)
#self.qdistr = scipy.stats.distributions.norm.pdf(y, loc=num_bins / 2, scale = num_bins / 8)
# Normalization.
#self.qdistr = self.qdistr / np.sum(self.qdistr, 1) [:, np.newaxis]
self.qdistr_init = self.qdistr.copy()
# Plotting, for testing.
#plt.plot(self.qdistr[0, :])
#plt.draw()
#time.sleep(2)
#plt.close('all')
self.rank2id, self.id2rank = self.compute_ranks(self.qdistr)
# generate true items quality and rank
self.generate_true_items_quality()
self.rank2id_true, self.id2rank_true = \
self.compute_ranks(self.qdistr_true)
# Computing true quality vector; quality_true[i] is true quality
# of item i.
#self.quality_true = self.avg(self.qdistr_true)
self.quality_true = self.num_items - self.id2rank_true
@classmethod
def from_qdistr_param(cls, items, qdistr_param, alpha=0.6,
num_bins=2001, cost_obj=None):
""" Alternative constructor for creating rank object
from quality distributions parameters.
Arguments are the same like in __init__ method but qdistr_param
is a list with mean and stdev for each item such that qdistr_param[2*i]
and qdistr[2*i + 1] are mean and stdev for items[i].
"""
result = cls(items, alpha, num_bins, cost_obj,
k=None, init_dist_type='gauss')
result.restore_qdistr_from_parameters(qdistr_param)
return result
def get_normal_vector(self, num_bins, average, stdev):
x_array = np.arange(num_bins)
dist = x_array - average
# In literature sigma is standard deviation and sigma**2 is variance.
d = np.exp(-dist * dist / (2.0 * stdev * stdev))
d = d / np.sum(d)
return d
#def plot_distributions(self, hold=False, **kwargs):
# plt.clf()
# for i in range(self.num_items):
# plt.plot(self.qdistr[i, :])
# #plt.title(self.get_title_for_plot(**kwargs))
# if hold:
# plt.show()
# else:
# plt.ioff()
# plt.draw()
# time.sleep(.3)
#def get_title_for_plot(self, **kwargs):
# result = ''
# for key in kwargs:
# result += '%s %s, ' % (key, kwargs[key])
# result += 'raking error %s %%, ' % self.get_ranking_error()
# result += 'quality metric %s ' % self.get_quality_metric()
# return result
def generate_true_items_quality(self):
identity = np.eye(self.num_items)
zeros = np.zeros((self.num_items, self.num_bins - self.num_items))
self.qdistr_true = np.hstack((identity, zeros))
def compute_ranks(self, quality_distr):
""" Returns two vectors: id2rank and rank2id.
id2rank[i] is a rank of an item with id i.
rank2id[i] is an id of an item with rank i.
"""
avg = self.avg(quality_distr)
rank2id = avg.argsort()[::-1]
id2rank = rank2id.argsort()
return rank2id, id2rank
def compute_percentile(self):
# Rank is from 0, 1, ..., num_items - 1
val = 100 / float(self.num_items)
id2percentile = {}
for idx in xrange(self.num_items):
id2percentile[idx] = val * (self.num_items - self.id2rank[idx])
return id2percentile
def avg(self, quality_distr):
""" returns vector v with average qualities for each item.
v[i] is the average quality of the item with id i.
"""
# grid_b is a matrix consisting of vertically stacked vector
# (0, 1, ..., num_bins - 1)
grid_b, _ = np.meshgrid(np.arange(self.num_bins),
np.arange(self.num_items))
# Actually values are from 1 to num_bins.
grid_b = grid_b + 1
# avg[i] is expected value of quality distribution for item with id i.
avg = np.sum(quality_distr * grid_b, 1)
return avg
def update(self, sorted_items, new_item=None, alpha_annealing=None,
annealing_type='before_normalization_uniform'):
""" Main update function.
Given sorted_items and new_item it updates quality distributions and
items ranks.
Method returns dictionary d such that d['sumbission id'] is a list
[percentile, average, stdev], i.e. percentile of the submission,
average and stdev of quaility distribution of it.
If alpha_annealing is None then we use old self.alpha otherwise we
set self.alpha to alpha_annealing.
Arguments:
- sorted_items is a list of items sorted by user such that
rank(sorted_items[i]) > rank(sorted_items[j]) for i < j
(Worst to Best)
- new_item is an id of a submission from sorted_items which was new
to the user. If sorted_items contains only two elements then
new_item is None.
- annealing_type (see n_comparisons_update method) is whether
'before_normalization_uniform' or
'before_normalization_gauss' or
'after_normalization'
"""
# Setting new annealing coefficient.
alpha_old = None
if not alpha_annealing is None:
alpha_old = self.alpha
self.alpha = alpha_annealing
# Obtaining ordering in terms of internal ids.
sorted_ids = [self.orig_items_id.index(x) for x in sorted_items]
self.n_comparisons_update(sorted_ids, annealing_type)
id2percentile = self.compute_percentile()
qdistr_param = self.get_qdistr_parameters()
result = {}
for idx in xrange(self.num_items):
avrg = qdistr_param[2 * idx]
stdev = qdistr_param[2 * idx + 1]
result[self.orig_items_id[idx]] = (id2percentile[idx], avrg, stdev)
# Setting old alpha back.
if not alpha_old is None:
self.alpha = alpha_old
return result
def get_ranking_error_inthe_end_of_round(self, num_items_to_compare):
"""
TODO(michael): As for now this method is not in use.
Write method to return error which is
interpretable in terms of ranking.
Method returns ranking error in the end of contest round.
Each contest consists of rounds. On the first round users compare 2
submissions, on the second round users compare 3 submsissions, etc.
Because we don't know true quality of submissions then we need to make
assumptions about how noisy will be users and return errors based on
experiments.
"""
# Current assumption is that 5% of users will sort submission randomly
# (so 3 students out of 60 will order randomly).
# Another assumption is that each user is gaussian user with stdev
# equals 2 and any two heighbor submissions have distance 1 in quality.
# In experiments with above settings I measured average(avrg_rank_error)
# and stdev (stdev_rank_error) of ranking error
# (ranking error of item i is |rank(i) - true_rank(i)|).
#
# Method returns ranking error as avrg_rank_error + 2 * stdev_rank_error
if num_items_to_compare == 2: # First round.
avrg_rank_error = 10.9
stdev_rank_error = 8.8
elif num_items_to_compare == 3: # Second round.
avrg_rank_error = 6.6
stdev_rank_error = 6
elif num_items_to_compare == 4: # Third round.
avrg_rank_error = 3.6
stdev_rank_error = 3.6
elif num_items_to_compare == 5: # Fourth round.
avrg_rank_error = 2.1
stdev_rank_error = 2.1
elif num_items_to_compare == 6: # Fifth round.
avrg_rank_error = 1.4
stdev_rank_error = 1.3
elif num_items_to_compare == 7: # Round number six.
avrg_rank_error = 1.2
stdev_rank_error = 1.1
elif num_items_to_compare == 8: # Round number seven.
avrg_rank_error = 1
stdev_rank_error = 1
else:
return None
return avrg_rank_error + 2 * stdev_rank_error
def n_comparisons_update(self, descend_list,
annealing_type='before_normalization_uniform'):
""" Updates quality distributions given n ordered items.
Item id is from set {0, 1, ..., num_items - 1}
Bins are 0, 1, ..., num_bins - 1
descend_list is a list of id's such that
rank(descend_list[i]) > rank(descend_list[j]) if i < j
(Worst to Best)
annealing type
- 'after_normalization' is self explanatory
- 'before_normalization_uniform' is using by default, it works
best in presence of users who gave random ordering.
- 'before_normalization_gauss' works best in presence of gaussian
users (users who can swap similar items).
"""
n = len(descend_list)
factorial = math.factorial(n)
# Let's denote quality of element descend_list[i] as zi, then
# z0 < z1 < ... < z(n-1) where n is length of descend_list.
# v[0, x] = Pr(x < z(n-1))
# v[1, x] = Pr(x < z(n-2) < z(n-1))
# v[i, x] = Pr(x < z(n-1-i) < ... < z(n-1))
# v[n-2, x] = Pr(x < z1 < ... < z(n-1))
v = np.zeros((n - 1, self.num_bins))
q = self.qdistr[descend_list[n-1], :]
v[0,:] = 1 - np.cumsum(q)
# w[0, x] = Pr(z0 < x)
# w[1, x] = Pr(z0 < z1 < x)
# w[i, x] = Pr(z0 < z1 < ... < z(i) < x)
# w[n-2, x] = Pr(z0 < z1 < ... < z(n-2) < x)
w = np.zeros((n - 1, self.num_bins))
q = self.qdistr[descend_list[0], :]
w[0,:] = np.cumsum(q) - q
# Filling v and w.
for idx in xrange(1, n - 1, 1):
# Matrix v.
# Calculating v[idx,:] given v[idx-1,:].
t = self.qdistr[descend_list[n - 1 - idx], :] * v[idx - 1, :]
t = t[::-1]
t = np.cumsum(t)
# Shift.
t = self.shift_vector(t)
v[idx,:] = t[::-1]
# Matrix w.
# Calculating w[idx,:] given w[idx-1,:].
t = self.qdistr[descend_list[idx], :] * w[idx - 1, :]
t = np.cumsum(t)
t = self.shift_vector(t)
w[idx,:] = t
# Updating distributions.
# Update first distributions.
idx = descend_list[0]
q = self.qdistr[idx,:]
q_prime = q * v[-1, :]
# Annealing.
if annealing_type == 'before_normalization_uniform':
self.qdistr[idx,:] = (1.0/factorial) * (1 - self.alpha) * q + \
self.alpha * q_prime
self.qdistr[idx,:] = self.qdistr[idx,:] / np.sum(self.qdistr[idx,:])
elif annealing_type == 'after_normalization':
q_prime = q_prime / np.sum(q_prime)
self.qdistr[idx,:] = (1 - self.alpha) * q + self.alpha * q_prime
elif annealing_type == 'before_normalization_gauss':
ww = v[-1, :]
self.qdistr[idx,:] = (1 - self.alpha) * q *(1 - ww) + \
self.alpha * q * ww
self.qdistr[idx,:] = self.qdistr[idx,:] / np.sum(self.qdistr[idx,:])
else:
# Should not happen.
raise Exception("Error: annealing type is not known.")
# Update last distributions.
idx = descend_list[-1]
q = self.qdistr[idx,:]
q_prime = q * w[-1, :]
# Annealing.
if annealing_type == 'before_normalization_uniform':
self.qdistr[idx,:] = (1.0/factorial) * (1 - self.alpha) * q + \
self.alpha * q_prime
self.qdistr[idx,:] = self.qdistr[idx,:] / np.sum(self.qdistr[idx,:])
elif annealing_type == 'after_normalization':
q_prime = q_prime / np.sum(q_prime)
self.qdistr[idx,:] = (1 - self.alpha) * q + self.alpha * q_prime
elif annealing_type == 'before_normalization_gauss':
ww = w[-1, :]
self.qdistr[idx,:] = (1 - self.alpha) * q *(1 - ww) + \
self.alpha * q * ww
self.qdistr[idx,:] = self.qdistr[idx,:] / np.sum(self.qdistr[idx,:])
else:
# Should not happen.
raise Exception("Error: annealing type is not known.")
# Update the rest of distributions.
for i in range(1, n - 1, 1):
idx = descend_list[i]
q = self.qdistr[idx,:]
q_prime = q * w[i - 1, :] * v[-(i+1), :]
# Annealing.
if annealing_type == 'before_normalization_uniform':
self.qdistr[idx,:] = (1.0/factorial) * (1 - self.alpha) * q + \
self.alpha * q_prime
self.qdistr[idx,:] = self.qdistr[idx,:] / np.sum(self.qdistr[idx,:])
elif annealing_type == 'after_normalization':
q_prime = q_prime / np.sum(q_prime)
self.qdistr[idx,:] = (1 - self.alpha) * q + self.alpha * q_prime
elif annealing_type == 'before_normalization_gauss':
ww = w[i - 1, :] * v[-(i+1), :]
self.qdistr[idx,:] = (1 - self.alpha) * q *(1 - ww) + \
self.alpha * q * ww
self.qdistr[idx,:] = self.qdistr[idx,:] / np.sum(self.qdistr[idx,:])
else:
# Should not happen.
raise Exception("Error: annealing type is not known.")
# Update id2rank and rank2id vectors.
self.rank2id, self.id2rank = self.compute_ranks(self.qdistr)
def sample(self, black_items=None):
""" Returns two items to compare. If there is no two items to sample
from then None is returned.
Sampling by loss-driven comparison algorithm.
black_items cannot be sampled.
"""
indices = range(self.num_items)
if (not black_items == None) and (not len(black_items) == 0):
indices = [x for x in indices if not x in black_items]
if len(indices) < 2:
return None
# l is len(indices)^2 array; l[idx] is expected loss of for items with ids
# idx/len(indices) and idx%len(indices)
l = np.zeros(len(indices) ** 2)
for i in xrange(len(indices)):
for j in xrange(len(indices)):
# We are choosing pairs (i, j) such that p(i) < p(j)
ii = indices[i]
jj = indices[j]
if self.id2rank[ii] < self.id2rank[jj]:
l[i * len(indices) + j] = self.get_expected_loss(ii, jj)
else:
l[i * len(indices) + j] = 0
# normalization
l /= l.sum()
# randomly choosing a pair
cs = l.cumsum()
rn = np.random.uniform()
idx = cs.searchsorted(rn)
i, j = idx/len(indices), idx%len(indices)
# sanity check
ii = indices[i]
jj = indices[j]
if self.id2rank[ii] >= self.id2rank[jj]:
raise Exception('There is an error in sampling!')
return ii, jj
def sample_n_items(self, n):
items = set()
while True:
i,j = self.sample()
items.add(i)
items.add(j)
if len(items) == n:
return list(items)
if len(items) > n:
items.remove(i if random.random() < 0.5 else j)
return list(items)
def sample_item(self, old_items, black_items=None, sample_one=True ):
""" Method samples an item given items the user received before.
If sample_one is true then if old_items is None or empty then method
returns one item, otherwise it returns two itmes.
black_items is a list with items which should not be sampled.
If it is impossible to sample an item then None is returned.
"""
if black_items == None:
black_items = []
if old_items == None or len(old_items) == 0:
if len(black_items) == 0:
l = self.sample()
else:
ids = [self.orig_items_id.index(x) for x in black_items]
l = self.sample(ids)
# If we need two elements.
if not sample_one:
if l == None:
return None
return [self.orig_items_id[x] for x in l]
# We need only one element.
if not l is None:
return self.orig_items_id[l[0]] if random.random() < 0.5 else\
self.orig_items_id[l[1]]
# We cannot sample two items, try to sample only one.
if len(black_items) == len(self.orig_items_id):
return None
if len(self.orig_items_id) == 0:
return None
item = [x for x in self.orig_items_id if not x in black_items]
return item[0]
taken_ids = [idx for idx in range(self.num_items) if \
self.orig_items_id[idx] in old_items]
free_ids = [idx for idx in range(self.num_items) if \
(not self.orig_items_id[idx] in old_items and
not self.orig_items_id[idx] in black_items)]
# If there are no items to pick from then return None.
if len(free_ids) == 0:
return None
# l[idx] is expected loss of for items with ids
# idx/len(taken_ids) and idx%len(taken_ids)
l = np.zeros(len(taken_ids) * len(free_ids))
for i in xrange(len(taken_ids)):
for j in xrange(len(free_ids)):
ii = taken_ids[i]
jj = free_ids[j]
if self.id2rank[ii] < self.id2rank[jj]:
l[i * len(free_ids) + j] = self.get_expected_loss(ii, jj)
else:
l[i * len(free_ids) + j] = self.get_expected_loss(jj, ii)
# normalization
#print l
l /= l.sum()
# randomly choosing a pair
cs = l.cumsum()
rn = np.random.uniform()
idx = cs.searchsorted(rn)
i, j = idx/len(free_ids), idx%len(free_ids)
ii = taken_ids[i]
jj = free_ids[j]
# sanity check
#if self.id2rank[ii] >= self.id2rank[jj]:
# raise Exception('There is an error in sampling!')
return self.orig_items_id[jj]
def shift_vector(self, vec):
""" Shifts vector one position right filling the most left element
with zero.
"""
vec[1:] = vec[:-1]
vec[0] = 0
return vec
def get_expected_loss(self, i, j):
""" Calculate expected loss l(i, j) between items i and j.
It is implied that r(i) < r(j).
"""
if self.cost_obj == None:
return self.get_missrank_prob(i, j)
c_i = self.get_cost(i, self.k, self.id2rank)
c_j = self.get_cost(j, self.k, self.id2rank)
#return abs(c_i + c_j - c_i * c_j) * self.get_missrank_prob(i, j)
return abs(c_i - c_j) * self.get_missrank_prob(i, j)
def get_cost(self, i, k, id2rank):
return self.cost_obj.calculate(i, k, id2rank)
def get_missrank_prob(self, i, k):
""" Method returns probability that r(i) > r(k) where r(i) is a rank
of an item with id i.
"""
q_k = self.qdistr[k, :]
Q_i = np.cumsum(self.qdistr[i, :])
prob = np.dot(q_k, Q_i)
return prob
def get_quality_metric(self):
""" Returns quality metric for current quality distribution
for top-k problem.
"""
q_true = np.sum(self.quality_true[self.rank2id_true[0:self.k]])
q_alg = np.sum(self.quality_true[self.rank2id[0:self.k]])
val = (q_true - q_alg) / float(self.k)
return val
def get_ranking_error(self):
""" Get ranking error, i.e. ratio of number of items which wrongly
have rank less than k to the constant k.
"""
counter = 0
for idx in xrange(self.num_items):
if self.id2rank_true[idx] >= self.k and self.id2rank[idx] < self.k:
counter += 1
return 100 * float(counter)/self.k
def get_qdistr_parameters(self):
""" Method returns array w such that w[2*i], w[2*i+1] are mean and
standard deviation of quality distribution of item i (self.qdist[i])
"""
w = np.zeros(2 * self.num_items)
val = range(self.num_bins)
for i in xrange(self.num_items):
p = self.qdistr[i,:]
w[2 * i] = np.sum(p * val)
w[2 * i + 1] = np.sqrt(np.sum(p * (val - w[2 * i]) ** 2))
return w
def restore_qdistr_from_parameters(self, w):
""" Method restores quality distributions from array w returned by
get_qdistr_parameters: w such that w[2*i], w[2*i+1] are mean and
standard deviation of quality distribution of item i
"""
self.qdist = np.zeros((self.num_items, self.num_bins))
y = range(self.num_bins)
for i in xrange(self.num_items):
mean = w[2 * i]
std = w[2 * i + 1]
self.qdistr[i, :] = self.get_normal_vector(self.num_bins, mean, std)
#self.qdistr[i,:] = scipy.stats.distributions.norm.pdf(y, loc=mean,
# scale=std)
if np.sum(self.qdistr[i,:]) == 0:
print 'ERROR, sum should not be zero !!!'
# Normalization.
#self.qdistr = self.qdistr / np.sum(self.qdistr, 1) [:, np.newaxis]
def evaluate_ordering(self, ordering):
""" rank(oredring[i]) > rank(ordering[j]) for i < j
(Worst to Best)
Function, returns average probability of error.
"""
n = len(ordering)
if n <= 1:
return 0
# Below ordering is evaluated using "incremental" way.
# Incremental type of ordering evaluation is when for each
# entity e in ordering (starting from 2nd one) we compute
# error_e = 1 - 2*max(Pr(error)) and total evaluation is
# a sum of all error_e.
# max(Pr(error)) is a maxmum error that the user made when comparing
# entity e.
val = 0.0
for i in xrange(0, n, 1):
ii = self.orig_items_id.index(ordering[i])
l1 = [self.get_missrank_prob(self.orig_items_id.index(ordering[j]),
ii) for j in xrange(i + 1, n, 1)]
l2 = [self.get_missrank_prob(ii,
self.orig_items_id.index(ordering[j])) for j in xrange(0, i, 1)]
#pr_error = 0
#if len(l1) != 0:
# pr_error = max(l1)
#if len(l2) != 0:
# pr_error = max([max(l2), pr_error])
#val += 1 - 2 * pr_error
l1.extend(l2)
if len(l1) == 0:
continue
val += 1 - np.mean(l1)
return val
def evaluate_ordering_using_dirichlet(self, ordering):
""" rank(oredring[i]) > rank(ordering[j]) for i < j
(Worst to Best).
"""
if len(ordering) <= 1:
return 0
# alpha is a number of "Truth"
# beta is a number of "False"
alpha, beta = 0.01, 0.01
for i in xrange(len(ordering)):
for j in xrange(i + 1, len(ordering), 1):
item_i = self.orig_items_id.index(ordering[i])
item_j = self.orig_items_id.index(ordering[j])
# q is a probability that comparison is True
#q = 1 - self.get_missrank_prob(item_i, item_j)
q = 1 - self.get_missrank_prob(item_j, item_i)
# Update alpha and beta.
if q > 0.5:
alpha += 2 * (q - 0.5)
else:
beta += 2 * (0.5 - q)
# Okay, alpha and beta are computed.
# Let's q is a probability that user says True.
# The quality of the ordering is 90-th percentile, so we need to compute it.
perc = 0.9
# First, numerically compute unnormalised probability mass function of q
delta = 0.001
x = np.arange(0 + delta, 1, delta)
#print 'alpha', alpha
#print 'beta', beta
y = x ** (alpha - 1) * (1 - x) ** (beta - 1)
# Integral approximation based on trapezoidal rule.
y1 = y[:-1]
y2 = y[1:]
integral_vec = (y2 + y1) / 2 * delta
integral = np.sum(integral_vec)
cumsum = np.cumsum(integral_vec)
threshold = (1 - perc) * integral
idx = cumsum.searchsorted(threshold)
val = idx * delta
return val
def sort_items_truthfully(self, items):
""" Method is for testing purposes.
It simulates sorting by a truthful user.
Returns sorted list of items so rank(result[i]) > rank(result[j])
if i > j.
#TODO(michael): check this function in case of use
"""
items_ids = [idx for idx in range(self.num_items) if \
self.orig_items_id[idx] in items]
values = np.array(self.quality_true)[items_ids]
idx = np.argsort(values)
return [self.orig_items_id[x] for x in np.array(items_ids)[idx]]
def get_quality_of_order(self, qual_type='avrg_rank_error'):
""" Method calculates quality of current order of items.
(essentially it should be not quality but error)
quality types:
inversions - calculates normalized number of inversions.
avrg_rank_error - is average of |rank(i) - true_rank(i)| over all
items.
stdev_rank_error - standard deviation of |rank(i) - true_rank(i)|
"""
if qual_type == 'inversions':
seq = self.id2rank.argsort()
seq = [self.id2rank_true[x] for x in seq]
_, num_inv = self._sort_and_get_inv_num(seq)
return 2.0 * num_inv / len(seq) / (len(seq) - 1)
elif qual_type == 'avrg_rank_error':
seq = np.abs(self.id2rank_true - self.id2rank)
return np.mean(seq)
elif qual_type == 'stdev_rank_error':
seq = np.abs(self.id2rank_true - self.id2rank)
return np.std(seq)
else:
raise Exception("Quality type is unknown!")
def _sort_and_get_inv_num(self, seq):
""" Returns tuple (sorted_seq, num_inv) where sorted_seq is sorted
sequence seq and num_inv is number of invertions in seq.
Increasing order has zeor inversions.
Sequence 1, 5, 3, 2, have 3 inversions: (5,3), (5,2) and (3,2)
Maximum number of inversion in a sequence of length N is N * (N - 1) / 2
seq is a sequence with unique elements.
"""
length = len(seq)
if length <= 1:
return seq, 0
left = seq[: (length / 2)]
right = seq[(length / 2) :]
left_sorted, num_inv_left = self._sort_and_get_inv_num(left)
right_sorted, num_inv_right = self._sort_and_get_inv_num(right)
# Merging and counting invertions.
length_l, length_r = len(left), len(right)
idx_l, idx_r = 0, 0
seq_sorted, num_inv = [0] * length, 0
for idx in xrange(length):
if idx_l == length_l:
seq_sorted[idx:] = right_sorted[idx_r :]
break
if idx_r == length_r:
seq_sorted[idx:] = left_sorted[idx_l :]
break
if left_sorted[idx_l] <= right_sorted[idx_r]:
seq_sorted[idx] = left_sorted[idx_l]
idx_l += 1
else:
seq_sorted[idx] = right_sorted[idx_r]
idx_r += 1
num_inv += length_l - idx_l
num_inv += num_inv_left + num_inv_right
return seq_sorted, num_inv
| bsd-3-clause |
losonczylab/Zaremba_NatNeurosci_2017 | losonczy_analysis_bundle/lab/classes/new_interval.py | 1 | 17615 | """Definition of Interval objects.
Used to filter/mask data in time.
"""
try:
from bottleneck import nanmin, nanmax
except ImportError:
from numpy import nanmin, nanmax
import numpy as np
import pandas as pd
class Interval(pd.DataFrame):
"""Class for defining intervals of interest across ROIs and Trials.
Inherits from and stores data as a pandas DataFrame.
Each row is an interval defined by the values in the 'start' and 'stop'
columns. In addition, there should be at either an 'experiment' or 'trial'
column and optionally an 'roi' column.
Parameters
----------
intervals : pd.DataFrame, list, dict
Interval data will be passed to pd.DataFrame, so see pd.DataFrame for
details of possible initialization structure. The result DataFrame must
at least have 'start' and 'stop' columns.
sampling_interval : float
Conversion factor for values stored in 'start'/'stop'; 1. if intervals
are in seconds, or the frame period if the intervals are in imaging
frames.
num_frames : int
Duration of all time for the given experiments/trials. Used for
converting to mask.
Note
----
For details of DataFrame subclassing, see:
http://pandas.pydata.org/pandas-docs/stable/internals.html#subclassing-pandas-data-structures
"""
_metadata = ['_sampling_interval']
def __init__(self, intervals, sampling_interval=1., **kwargs):
super(Interval, self).__init__(intervals, **kwargs)
assert 'start' in self.columns
assert 'stop' in self.columns
self._sampling_interval = sampling_interval
@property
def _constructor(self):
return Interval
@property
def _constructor_sliced(self):
return IntervalSeries
@property
def sampling_interval(self):
"""Sampling interval of data in seconds."""
return self._sampling_interval
@sampling_interval.setter
def sampling_interval(self, new_sampling_interval):
self.resample(float(new_sampling_interval), inplace=True)
@classmethod
def from_mask(cls, mask, sampling_interval=1., data=None, **kwargs):
"""Create an interval object from a boolean mask.
Parameters
----------
mask : Nx1 np.ndarray of booleans
True within interval, False outside of interval.
sampling_interval : float
Time between samples, used to convert intervals to time (in
seconds).
data : dict, optional
Additional columns to add to Interval DataFrame. Each key will be a
column and the value will be the same for each row.
kwargs : optional
Additional keyword arguments are passed to the DataFrame init.
Returns
-------
Interval
New Interval object representation of the mask.
"""
if data is None:
data = {}
df_list = []
for start, stop in _mask_to_intervals_1d(mask):
df_list.append(dict(start=start, stop=stop, **data))
return cls(df_list, sampling_interval=sampling_interval, **kwargs)
def resample(self, new_sampling_interval=1., inplace=False):
"""Change the time units of the data.
Parameters
----------
new_sampling_interval : float
The new sampling interval of the data.
inplace : boolean, optional
If True, edit the Interval object in place, if False return a new
Interval object.
Returns
-------
Interval
Either the original Interval with new 'start'/'stop' values, or
a new Interval
"""
scale_factor = self.sampling_interval / float(new_sampling_interval)
if inplace:
dataframe = self
else:
dataframe = self.copy()
dataframe['start'] *= scale_factor
dataframe['stop'] *= scale_factor
dataframe._sampling_interval = new_sampling_interval
if not inplace:
return dataframe
def merge_intervals(self, inplace=False):
"""Merge overlapping intervals.
As a side-effect of the merging, also sorts all intervals by 'start'.
Parameters
----------
inplace : bool
"""
sampling_interval = self.sampling_interval
def join_wrapper(group_df):
"""Wrapper to correctly wrap and unwrap data for reduce."""
sorted_df = group_df.sort_values(by='start', na_position='first')
df_rows = (row for _, row in sorted_df.iterrows())
reduced = reduce(_joiner, df_rows, [])
return type(self)(reduced).reset_index(drop=True)
if not inplace:
dataframe = self.copy()
else:
dataframe = self
other_columns = [col for col in dataframe.columns.values
if col not in ['start', 'stop']]
dataframe = dataframe.groupby(other_columns).apply(
join_wrapper).reset_index(drop=True)
dataframe._sampling_interval = sampling_interval
if not inplace:
return dataframe
def durations(self, end_time):
"""Calculate total durations of intervals.
Parameters
----------
end_time : float
Max time (in seconds) of the interval. Replaces NaN in 'stop'
column.
Returns
-------
pd.DataFrame
"""
other_columns = [col for col in self.columns.values
if col not in ['start', 'stop']]
resampled = self.resample(inplace=False).fillna(
{'start': 0, 'stop': end_time})
resampled['duration'] = resampled['stop'] - resampled['start']
result = resampled.groupby(other_columns, as_index=False).agg(
lambda x: x['duration'].sum())
return result.drop(['start', 'stop'], axis=1)
def __invert__(self):
"""Invert the time that is in/out of the interval."""
# Need a way to invert ROIs/Trials not present in original df.
raise NotImplementedError('Does not reliably invert intervals.')
other_columns = [col for col in self.columns.values
if col not in ['start', 'stop']]
def invert_wrapper(group_df):
def row_gen(group_df):
for _, row in group_df.iterrows():
# row['_sampling_interval'] = group_df.sampling_interval
yield row
reduced = reduce(_invert_intersector, row_gen(group_df), [])
return reduced
result = self.groupby(other_columns).apply(invert_wrapper)
result._sampling_interval = self.sampling_interval
return result.reset_index(drop=True)
def __and__(self, other):
"""Combine Interval objects to only include overlapping intervals."""
# If we are combining with a non-Interval, use it as a filter
# Might also just be able to add all NaN 'start' and 'stop' columns.
if all(col not in other.columns for col in ['start', 'stop']):
return pd.merge(self, other, how='inner').merge_intervals()
other_resampled = other.resample(self.sampling_interval, inplace=False)
other_columns = set(self.columns).intersection(
other.columns).difference(['start', 'stop'])
merged = pd.merge(
pd.DataFrame(self), pd.DataFrame(other_resampled),
on=list(other_columns), how='inner')
merged_rows = (row for _, row in merged.iterrows())
reduced = Interval(
reduce(_intersector, merged_rows, []),
columns=set(self.columns,).union(other.columns),
sampling_interval=self.sampling_interval)
return reduced.merge_intervals()
def __or__(self, other):
"""Combine Interval objects as the union of all intervals."""
if all(col not in other.columns for col in ['start', 'stop']):
# if all(col in self.columns for col in other.columns):
# raise Exception
# return pd.merge(self, other, how='outer')
raise ValueError(
'Unrecognized other Interval, expecting Interval object.')
other_resampled = other.resample(self.sampling_interval, inplace=False)
other_columns = set(self.columns).intersection(
other.columns).difference(['start', 'stop'])
merged = pd.merge(
pd.DataFrame(self), pd.DataFrame(other_resampled),
on=list(other_columns), how='outer', indicator=True)
merged_rows = (row for _, row in merged.iterrows())
reduced = Interval(
reduce(_unioner, merged_rows, []),
columns=set(self.columns).union(other.columns),
sampling_interval=self.sampling_interval)
return reduced.merge_intervals()
def filter_events(self, events_df, key='value', invert=False):
"""Filter a DataFrame of event times.
Parameters
----------
events_df : pd.DataFrame
A DataFrame containing times to filter. Should have columns that
match the Interval dataframe: expt, trial, roi, etc.
key : string
The column containing the data to filter on. Should be at same
sampling interval as current Interval.
Returns
-------
pd.DataFrame
A new DataFrame only including events that occurred within the
current Interval.
"""
# Create a copy and also convert to a basic DataFrame (might be an
# Interval)
events_df = pd.DataFrame(events_df)
events_df['_index'] = np.arange(events_df.shape[0])
# 'start' and 'stop' are special columns in an Interval dataframe.
# Ensure that we never try to merge on them, and if you want to filter
# on one of them, rename to something new.
columns = set(list(events_df.columns)).intersection(
self.columns).difference(['start', 'stop'])
if key in ['start', 'stop']:
orig_key = key
key = '_value'
events_df.rename(columns={orig_key: key}, inplace=True)
events = events_df[list(columns) + [key, '_index']]
merged = pd.merge(events, self, how='inner')
merged['_test'] = (np.isnan(merged['start']) | (merged['start'] <= merged[key])) & \
(np.isnan(merged['stop']) | (merged[key] < merged['stop']))
merged_test = merged[['_index', '_test']]
grouped = merged_test.groupby('_index').any().reset_index()
result = pd.merge(events_df, grouped, on='_index', how='left')
result['_test'] = result['_test'].fillna(False)
if invert:
result = result[~result['_test']]
else:
result = result[result['_test']]
del result['_test']
del result['_index']
if key is '_value':
result.rename(columns={key: orig_key}, inplace=True)
return result
class IntervalSeries(pd.Series):
def __init__(self, *args, **kwargs):
super(IntervalSeries, self).__init__(*args, **kwargs)
@property
def _constructor(self):
return IntervalSeries
@property
def _constructor_expanddim(self):
return Interval
def concat(intervals, ignore_index=True, **kwargs):
"""Same functionality as pd.concat.
Sampling interval of resulting Interval will match the first Interval.
Parameters
----------
ignore_index : bool
Changed default to True, but see pd.concat for details.
kwargs : optional
All other arguments are passed to pd.concat.
"""
new_sampling_interval = intervals[0].sampling_interval
resampled_intervals = [
interval.resample(new_sampling_interval, inplace=False)
for interval in intervals]
concat_intervals = pd.concat(
resampled_intervals, ignore_index=ignore_index, **kwargs)
concat_intervals._sampling_interval = new_sampling_interval
return concat_intervals
def _mask_to_intervals_1d(mask):
"""Convert a 1d boolean array to Nx2 array of starts/stops.
Parameters
----------
mask : Nx1 np.ndarray
Returns
-------
intervals : Mx2 np.ndarray
"""
mask_diff = np.diff(mask.astype('int'))
starts = np.where(mask_diff == 1)[0]
stops = np.where(mask_diff == -1)[0]
if len(stops) and (not len(starts) or stops[0] < starts[0]):
starts = np.hstack([np.nan, starts])
if len(starts) and (not len(stops) or starts[-1] > stops[-1]):
stops = np.hstack([stops, np.nan])
assert len(starts) == len(stops)
stacked_intervals = np.vstack([starts, stops]).T
stacked_intervals += 1 # result of diff is trimmed down by 1
return stacked_intervals
def _joiner(acc, int_df_row):
# https://stackoverflow.com/questions/37496759/combining-discrete-and-or-overlapping-time-sequences-from-lists
# if an empty list, return the new interval
if not len(acc):
return [int_df_row]
# pop the last interval from the list
last = acc.pop()
# if the intervals are disjoint, return both
if int_df_row['start'] > last['stop']:
return acc + [last, int_df_row]
# otherwise, join them together
last['stop'] = np.max([int_df_row['stop'], last['stop']])
return acc + [last]
def _unioner(acc, row):
# Expect '_merge', start_x', 'stop_x', 'start_y', and 'stop_y'
other_columns = set(row.index).difference(
['start_x', 'stop_x', 'start_y', 'stop_y', '_merge'])
indicator = row['_merge']
base_dict = {col: row[col] for col in other_columns}
intervals = [{'start': row['start_x'], 'stop': row['stop_x']},
{'start': row['start_y'], 'stop': row['stop_y']}]
intervals[0].update(base_dict)
intervals[1].update(base_dict)
if indicator is 'left_only':
return acc + [intervals[0]]
elif indicator is 'right_only':
return acc + [intervals[1]]
if np.isnan(intervals[1]['start']) or \
intervals[1]['start'] < intervals[0]['start']:
intervals = intervals[::-1]
# if the intervals are disjoint, return both
if intervals[0]['stop'] < intervals[1]['start']:
return acc + intervals
# otherwise, join them together
joined_int = intervals[0]
joined_int['stop'] = np.max([intervals[0]['stop'], intervals[1]['stop']])
return acc + [joined_int]
def _intersector(acc, row):
# Expect 'start_x', 'stop_x', 'start_y', and 'stop_y'
other_columns = set(row.index).difference(
['start_x', 'stop_x', 'start_y', 'stop_y'])
base_dict = {col: row[col] for col in other_columns}
merged_int = dict(start=nanmax([row['start_x'], row['start_y']]),
stop=nanmin([row['stop_x'], row['stop_y']]), **base_dict)
if merged_int['start'] < merged_int['stop'] or \
np.any(np.isnan([merged_int['start'], merged_int['stop']])):
return acc + [merged_int]
return acc
def _invert_intersector(acc, row):
row_dict = {key: [val] * 2 for key, val in row.iteritems()
# if key not in ['start', 'stop', '_sampling_interval']}
if key not in ['start', 'stop']}
row_dict['start'] = [np.nan, row['stop']]
row_dict['stop'] = [row['start'], np.nan]
# row_int = Interval(row_dict, sampling_interval=row['_sampling_interval'])
row_int = Interval(row_dict)
row_int.dropna(how='all', subset=['start', 'stop'], inplace=True)
if not len(acc):
return row_int
else:
return acc & row_int
if __name__ == '__main__':
run_int = Interval([{'expt': 1, 'start': 2, 'stop': 5},
{'expt': 1, 'start': 7, 'stop': 9},
{'expt': 2, 'start': np.nan, 'stop': 10},
{'expt': 2, 'start': 15, 'stop': np.nan}])
pf_int = Interval([{'expt': 1, 'start': 10, 'stop': 12, 'roi': 1},
{'expt': 1, 'start': 3, 'stop': 4, 'roi': 1},
{'expt': 1, 'start': 8, 'stop': 10, 'roi': 1},
{'expt': 1, 'start': 8, 'stop': 11, 'roi': 2},
{'expt': 2, 'start': np.nan, 'stop': 2, 'roi': 1},
{'expt': 2, 'start': 2, 'stop': 12, 'roi': 1},
{'expt': 2, 'start': 11, 'stop': 13, 'roi': 2},
{'expt': 3, 'start': 2, 'stop': 4, 'roi': 1}])
roi_df = pd.DataFrame([{'expt': 1, 'roi': 1},
{'expt': 1, 'roi': 2},
{'expt': 1, 'roi': 3},
{'expt': 2, 'roi': 1},
{'expt': 3, 'roi': 1}])
trans_events = pd.DataFrame([{'expt': 1, 'roi': 1, 'max': 2, 'amp': 12},
{'expt': 1, 'roi': 1, 'max': 11, 'amp': 1},
{'expt': 1, 'roi': 1, 'max': 20, 'amp': 1},
{'expt': 1, 'roi': 5, 'max': 3, 'amp': 7},
{'expt': 2, 'roi': 1, 'max': 1, 'amp': 3}])
stim_events = pd.DataFrame([{'expt': 1, 'value': 3},
{'expt': 1, 'value': 20}])
z = pd.merge(run_int, roi_df, on=['expt'], how='outer').merge_intervals()
x = z | pf_int
union = run_int | pf_int
intersection = run_int & pf_int
# inverted_run = ~ run_int
pf_int.filter_events(trans_events, key='max')
run_int.duration(100)
from pudb import set_trace; set_trace()
| mit |
trungnt13/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 105 | 22788 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Tests that precomputed metric returns pointer to, and not copy of, X.
S = np.dot(X, X.T)
S2 = pairwise_distances(S, metric="precomputed")
assert_true(S is S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
vantares/trading-with-python | lib/widgets.py | 78 | 3012 | # -*- coding: utf-8 -*-
"""
A collection of widgets for gui building
Copyright: Jev Kuznetsov
License: BSD
"""
from __future__ import division
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import numpy as np
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
class MatplotlibWidget(QWidget):
def __init__(self,parent=None,grid=True):
QWidget.__init__(self,parent)
self.grid = grid
self.fig = Figure()
self.canvas =FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.mpl_connect('button_press_event', self.onPick) # bind pick event
#self.axes = self.fig.add_subplot(111)
margins = [0.05,0.1,0.9,0.8]
self.axes = self.fig.add_axes(margins)
self.toolbar = NavigationToolbar(self.canvas,self)
#self.initFigure()
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
def onPick(self,event):
print 'Pick event'
print 'you pressed', event.button, event.xdata, event.ydata
def update(self):
self.canvas.draw()
def plot(self,*args,**kwargs):
self.axes.plot(*args,**kwargs)
self.axes.grid(self.grid)
self.update()
def clear(self):
self.axes.clear()
def initFigure(self):
self.axes.grid(True)
x = np.linspace(-1,1)
y = x**2
self.axes.plot(x,y,'o-')
class PlotWindow(QMainWindow):
''' a stand-alone window with embedded matplotlib widget '''
def __init__(self,parent=None):
super(PlotWindow,self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.mplWidget = MatplotlibWidget()
self.setCentralWidget(self.mplWidget)
def plot(self,dataFrame):
''' plot dataframe '''
dataFrame.plot(ax=self.mplWidget.axes)
def getAxes(self):
return self.mplWidget.axes
def getFigure(self):
return self.mplWidget.fig
def update(self):
self.mplWidget.update()
class MainForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Demo: PyQt with matplotlib')
self.plot = MatplotlibWidget()
self.setCentralWidget(self.plot)
self.plot.clear()
self.plot.plot(np.random.rand(10),'x-')
#---------------------
if __name__=='__main__':
app = QApplication(sys.argv)
form = MainForm()
form.show()
app.exec_() | bsd-3-clause |
Quantipy/quantipy | tests/test_dataset.py | 1 | 39018 | import unittest
import os.path
import numpy as np
import pandas as pd
import quantipy as qp
from quantipy.core.tools.view.logic import (
has_any, has_all, has_count,
not_any, not_all, not_count,
is_lt, is_ne, is_gt,
is_le, is_eq, is_ge,
union, intersection)
from quantipy.core.tools.dp.prep import frange
freq = qp.core.tools.dp.prep.frequency
cross = qp.core.tools.dp.prep.crosstab
class TestDataSet(unittest.TestCase):
def check_freq(self, dataset, var, show='values'):
return freq(dataset._meta, dataset._data, var, show=show)
def check_cross(self, dataset, x, y, show='values', rules=False):
return cross(dataset._meta, dataset._data, x=x, y=y,
show=show, rules=rules)
def _get_dataset(self, cases=None):
path = os.path.dirname(os.path.abspath(__file__)) + '/'
name = 'Example Data (A)'
casedata = '{}.csv'.format(name)
metadata = '{}.json'.format(name)
dataset = qp.DataSet(name, False)
dataset.set_verbose_infomsg(False)
dataset.read_quantipy(path+metadata, path+casedata)
if cases:
dataset._data = dataset._data.head(cases)
return dataset
def test_read_quantipy(self):
dataset = self._get_dataset()
self.assertTrue(isinstance(dataset._data, pd.DataFrame))
self.assertTrue(isinstance(dataset._meta, dict))
def test_fileinfo(self):
dataset = self._get_dataset()
meta_def_key = dataset._meta['lib']['default text']
self.assertTrue(dataset.path is not None)
self.assertTrue(dataset.name == 'Example Data (A)')
self.assertTrue(dataset.filtered == 'no_filter')
self.assertTrue(dataset.text_key == meta_def_key)
self.assertTrue(dataset.text_key == 'en-GB')
self.assertTrue(dataset._verbose_errors is True)
self.assertTrue(dataset._verbose_infos is False)
self.assertTrue(dataset._dimensions_comp is False)
def test_filter(self):
dataset = self._get_dataset()
f = intersection([{'gender': [2]},
{'age': frange('35-45')}])
alias = 'men: 35 to 45 years old'
dataset.filter(alias, f, inplace=True)
# alias copied correctly?
self.assertEqual(dataset.filtered, alias)
# correctly sliced?
expected_index_len = 1509
self.assertEqual(len(dataset._data.index), expected_index_len)
self.assertEqual(dataset['age'].value_counts().sum(), expected_index_len)
expected_gender_codes = [2]
expected_age_codes = [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45]
self.assertTrue(dataset['gender'].value_counts().index.tolist() ==
expected_gender_codes)
self.assertTrue(sorted(dataset['age'].value_counts().index.tolist()) ==
expected_age_codes)
def test_subset_from_varlist(self):
dataset = self._get_dataset()
keep = ['gender', 'q1', 'q5', 'q6']
sub_ds = dataset.subset(variables=keep)
# only variables from "keep" are left?
sub_ds_vars = sub_ds.columns() + sub_ds.masks()
expected_vars = sub_ds.unroll(keep, both='all')
self.assertTrue(sorted(expected_vars) == sorted(sub_ds_vars))
# data file set only list the "keep" variables?
set_vars = sub_ds._variables_from_set('data file')
self.assertTrue(sorted(keep) == sorted(set_vars))
# 'sets' & 'lib' list only reduced array masks ref.?
lib_ref = sub_ds._meta['lib']['values']
expected_lib_ref = ['q5', 'q6']
self.assertTrue(expected_lib_ref == sorted(lib_ref))
set_keys = sub_ds._meta['sets'].keys()
expected_set_keys = ['data file', 'q5', 'q6']
self.assertTrue(expected_set_keys == sorted(set_keys))
# DataFrame columns match "keep" list?
df_cols = sub_ds._data.columns[1:]
expected_df_cols = sub_ds.unroll(keep)
self.assertTrue(sorted(expected_df_cols) == sorted(df_cols))
def test_order_full_change(self):
dataset = self._get_dataset()
variables = dataset._variables_from_set('data file')
new_order = list(sorted(variables, key=lambda v: v.lower()))
dataset.order(new_order)
new_set_order = dataset._variables_to_set_format(new_order)
data_file_items = dataset._meta['sets']['data file']['items']
df_columns = dataset._data.columns.tolist()
self.assertEqual(new_set_order, data_file_items)
self.assertEqual(dataset.unroll(new_order), df_columns)
def test_order_repos_change(self):
dataset = self._get_dataset()
repos = [{'age': ['q8', 'q5']},
{'q6': 'q7'},
{'q5': 'weight_a'}]
dataset.order(reposition=repos)
data_file_items = dataset._meta['sets']['data file']['items']
df_columns = dataset._data.columns.tolist()
expected_items = ['record_number', 'unique_id', 'q8', 'weight_a', 'q5',
'age', 'birth_day', 'birth_month', 'birth_year',
'gender', 'locality', 'ethnicity', 'religion', 'q1',
'q2', 'q2b', 'q3', 'q4', 'q7', 'q6', 'q8a', 'q9',
'q9a', 'Wave', 'weight_b', 'start_time', 'end_time',
'duration', 'q14_1', 'q14_2', 'q14_3', 'RecordNo']
expected_columns = dataset.unroll(expected_items)
self.assertEqual(dataset._variables_to_set_format(expected_items),
data_file_items)
self.assertEqual(expected_columns, df_columns)
def test_categorical_metadata_additions(self):
dataset = self._get_dataset()
name, qtype, label = 'test', 'single', 'TEST VAR'
cats1 = [(4, 'Cat1'), (5, 'Cat2')]
cats2 = ['Cat1', 'Cat2']
cats3 = [1, 2]
for check, cat in enumerate([cats1, cats2, cats3], start=1):
dataset.add_meta(name, qtype, label, cat)
values = dataset.values(name)
if check == 1:
self.assertTrue(values, cats1)
elif check == 2:
expected_vals = [(1, 'Cat1'), (2, 'Cat2')]
self.assertTrue(values, expected_vals)
elif check == 3:
expected_vals = [(1, ''), (2, '')]
self.assertTrue(values, expected_vals)
def test_array_metadata(self):
dataset = self._get_dataset()
meta, data = dataset.split()
name, qtype, label = 'array_test', 'delimited set', 'TEST LABEL TEXT'
cats = ['Cat 1', 'Cat 2', 'Cat 3', 'Cat 4', 'Cat 5']
items1 = [(1, 'ITEM A'), (3, 'ITEM B'), (6, 'ITEM C')]
items2 = ['ITEM A', 'ITEM B', 'ITEM C']
items3 = [4, 5, 6]
for check, items in enumerate([items1, items2, items3], start=1):
dataset.add_meta(name, qtype, label, cats, items)
sources = dataset.sources(name)
# catgeories correct?
expected_vals = list(enumerate(cats, start=1))
self.assertEqual(dataset.values(name), expected_vals)
# items correct?
items = dataset.items(name)
if check == 1:
expected_items = [('array_test_1', 'ITEM A'),
('array_test_3', 'ITEM B'),
('array_test_6', 'ITEM C')]
self.assertEqual(items, expected_items)
elif check == 2:
expected_items = [('array_test_1', 'ITEM A'),
('array_test_2', 'ITEM B'),
('array_test_3', 'ITEM C')]
self.assertEqual(items, expected_items)
elif check == 3:
expected_items = [('array_test_4', ''),
('array_test_5', ''),
('array_test_6', '')]
self.assertEqual(items, expected_items)
# value object location correct?
item_val_ref = dataset._get_value_loc(sources[0])
mask_val_ref = dataset._get_value_loc(name)
self.assertEqual(item_val_ref, mask_val_ref)
lib_ref = 'lib@values@array_test'
self.assertTrue(meta['columns'][sources[0]]['values'] == lib_ref)
self.assertTrue(meta['masks'][name]['values'] == lib_ref)
# sets entry correct?
self.assertTrue('masks@array_test' in meta['sets']['data file']['items'])
# parent entry correct?
for source in dataset.sources(name):
parent_meta = meta['columns'][source]['parent']
expected_parent_meta = {'masks@array_test': {'type': 'array'}}
parent_maskref = dataset.parents(source)
expected_parent_maskref = ['masks@array_test']
self.assertEqual(parent_meta, expected_parent_meta)
self.assertEqual(parent_maskref, expected_parent_maskref)
def test_rename_via_masks(self):
dataset = self._get_dataset()
meta, data = dataset.split()
new_name = 'q5_new'
dataset.rename('q5', new_name)
# name properly changend?
self.assertTrue('q5' not in dataset.masks())
self.assertTrue(new_name in dataset.masks())
# item names updated?
items = meta['sets'][new_name]['items']
expected_items = ['columns@q5_new_1',
'columns@q5_new_2',
'columns@q5_new_3',
'columns@q5_new_4',
'columns@q5_new_5',
'columns@q5_new_6']
self.assertEqual(items, expected_items)
sources = dataset.sources(new_name)
expected_sources = [i.split('@')[-1] for i in expected_items]
self.assertEqual(sources, expected_sources)
# lib reference properly updated?
lib_ref_mask = meta['masks'][new_name]['values']
lib_ref_items = meta['columns'][dataset.sources(new_name)[0]]['values']
expected_lib_ref = 'lib@values@q5_new'
self.assertEqual(lib_ref_mask, lib_ref_items)
self.assertEqual(lib_ref_items, expected_lib_ref)
# new parent entry correct?
parent_spec = meta['columns'][dataset.sources(new_name)[0]]['parent']
expected_parent_spec = {'masks@{}'.format(new_name): {'type': 'array'}}
self.assertEqual(parent_spec, expected_parent_spec)
# sets entries replaced?
self.assertTrue('masks@q5' not in meta['sets']['data file']['items'])
self.assertTrue('masks@q5_new' in meta['sets']['data file']['items'])
self.assertTrue('q5' not in meta['sets'])
self.assertTrue('q5_new' in meta['sets'])
def test_copy_via_masks_full(self):
dataset = self._get_dataset()
meta, data = dataset.split()
suffix = 'test'
new_name = 'q5_test'
dataset.copy('q5', suffix)
# name properly changend?
self.assertTrue('q5' in dataset.masks())
self.assertTrue(new_name in dataset.masks())
# item names updated?
items = meta['sets'][new_name]['items']
expected_items = ['columns@q5_test_1',
'columns@q5_test_2',
'columns@q5_test_3',
'columns@q5_test_4',
'columns@q5_test_5',
'columns@q5_test_6']
self.assertEqual(items, expected_items)
sources = dataset.sources(new_name)
old_items_split = [s.split('_') for s in dataset.sources('q5')]
expected_sources = ['{}_{}_{}'.format('_'.join(ois[:-1]), suffix, ois[-1])
for ois in old_items_split]
self.assertEqual(sources, expected_sources)
# lib reference properly updated?
lib_ref_mask = meta['masks'][new_name]['values']
lib_ref_items = meta['columns'][dataset.sources(new_name)[0]]['values']
expected_lib_ref = 'lib@values@q5_test'
self.assertEqual(lib_ref_mask, lib_ref_items)
self.assertEqual(lib_ref_items, expected_lib_ref)
# new parent entry correct?
parent_spec = meta['columns'][dataset.sources(new_name)[0]]['parent']
expected_parent_spec = {'masks@{}'.format(new_name): {'type': 'array'}}
self.assertEqual(parent_spec, expected_parent_spec)
# sets entries replaced?
self.assertTrue('masks@q5' in meta['sets']['data file']['items'])
self.assertTrue('masks@q5_test' in meta['sets']['data file']['items'])
self.assertTrue('q5' in meta['sets'])
self.assertTrue('q5_test' in meta['sets'])
def test_copy_via_masks_sliced_and_reduced(self):
dataset = self._get_dataset()
meta, data = dataset.split()
suffix = 'test'
new_name = 'q5_test'
slicer = {'gender': [1]}
copy_only = [1, 2, 3]
dataset.copy('q5', suffix, slicer=slicer, copy_only=copy_only)
# name properly changend?
self.assertTrue('q5' in dataset.masks())
self.assertTrue(new_name in dataset.masks())
# item names updated?
items = meta['sets'][new_name]['items']
expected_items = ['columns@q5_test_1',
'columns@q5_test_2',
'columns@q5_test_3',
'columns@q5_test_4',
'columns@q5_test_5',
'columns@q5_test_6']
self.assertEqual(items, expected_items)
sources = dataset.sources(new_name)
old_items_split = [s.split('_') for s in dataset.sources('q5')]
expected_sources = ['{}_{}_{}'.format('_'.join(ois[:-1]), suffix, ois[-1])
for ois in old_items_split]
self.assertEqual(sources, expected_sources)
# lib reference properly updated?
lib_ref_mask = meta['masks'][new_name]['values']
lib_ref_items = meta['columns'][dataset.sources(new_name)[0]]['values']
expected_lib_ref = 'lib@values@q5_test'
self.assertEqual(lib_ref_mask, lib_ref_items)
self.assertEqual(lib_ref_items, expected_lib_ref)
# new parent entry correct?
parent_spec = meta['columns'][dataset.sources(new_name)[0]]['parent']
expected_parent_spec = {'masks@{}'.format(new_name): {'type': 'array'}}
self.assertEqual(parent_spec, expected_parent_spec)
# sets entries replaced?
self.assertTrue('masks@q5' in meta['sets']['data file']['items'])
self.assertTrue('masks@q5_test' in meta['sets']['data file']['items'])
self.assertTrue('q5' in meta['sets'])
self.assertTrue('q5_test' in meta['sets'])
# metadata reduced (only codes 1, 2, 3)?
self.assertTrue(dataset.codes(new_name) == copy_only)
# data sliced and reduced properly?
for s in dataset.sources('q5_test'):
self.assertTrue(set(dataset[s].dropna().unique()) == set(copy_only))
self.assertTrue(dataset[[s, 'gender']].dropna()['gender'].unique() == 1)
def test_transpose(self):
dataset = self._get_dataset(cases=500)
meta, data = dataset.split()
dataset.transpose('q5')
# new items are old values?
new_items = dataset.items('q5_trans')
old_values = dataset.values('q5')
check_old_values = [('q5_trans_{}'.format(element), text)
for element, text in old_values]
self.assertEqual(check_old_values, new_items)
# new values are former items?
new_values = dataset.value_texts('q5_trans')
old_items = dataset.item_texts('q5')
self.assertEqual(new_values, old_items)
# parent meta correctly updated?
trans_parent = meta['columns'][dataset.sources('q5_trans')[0]]['parent']
expected_parent = {'masks@q5_trans': {'type': 'array'}}
self.assertEqual(trans_parent, expected_parent)
# recoded data is correct?
original_ct = dataset.crosstab('q5', text=False)
transposed_ct = dataset.crosstab('q5_trans', text=False)
self.assertTrue(np.array_equal(original_ct.drop('All', 1, 1).T.values,
transposed_ct.drop('All', 1, 1).values))
def test_reorder_values(self):
dataset = self._get_dataset()
dataset.reorder_values('q8', [96, 1, 98, 4, 3, 2, 5])
df_vals = self.check_freq(dataset, 'q8')
df_texts = self.check_freq(dataset, 'q8', 'text')
meta = dataset.meta('q8')
df_vals_index = df_vals.index.get_level_values(1).tolist()
df_vals_index.remove('All')
df_texts_index = df_texts.index.get_level_values(1).tolist()
df_texts_index.remove('All')
# correctly indexed?
self.assertTrue(df_vals_index == meta['codes'].tolist())
self.assertTrue(df_texts_index == meta['texts'].tolist())
# correct values?
expected = [[2367.0],
[283.0],
[949.0],
[49.0],
[970.0],
[595.0],
[216.0],
[1235.0]]
self.assertEqual(df_vals.values.tolist(), expected)
def test_reorder_values_raises_on_incomplete_list(self):
dataset = self._get_dataset()
dataset.set_verbose_errmsg(False)
new_order = [3, 2, 1]
self.assertRaises(ValueError, dataset.reorder_values, 'q8', new_order)
def test_set_missings_flagging(self):
dataset = self._get_dataset()
dataset.set_missings('q8', {'exclude': [1, 2, 98, 96]})
meta = dataset.meta('q8')[['codes', 'missing']]
meta.index.name = None
meta.columns.name = None
missings = [[1, 'exclude'],
[2, 'exclude'],
[3, None],
[4, None],
[5, None],
[96, 'exclude'],
[98, 'exclude']]
expected_meta = pd.DataFrame(missings,
index=xrange(1, len(missings)+1),
columns=['codes', 'missing'])
self.assertTrue(all(meta == expected_meta))
def test_set_missings_results(self):
dataset = self._get_dataset()
dataset.set_missings('q8', {'exclude': [1, 2, 98, 96]})
df = self.check_freq(dataset, 'q8')
# check the base
base_size = df.iloc[0, 0]
expected_base_size = 1058
self.assertEqual(base_size, expected_base_size)
# check the index
index = df.index.get_level_values(1).tolist()
index.remove('All')
expected_index = [3, 4, 5]
self.assertEqual(index, expected_index)
# check categories
cat_vals = df.iloc[1:, 0].values.tolist()
expected_cat_vals = [595, 970, 1235]
self.assertEqual(cat_vals, expected_cat_vals)
def test_remove_values(self):
dataset = self._get_dataset()
dataset.remove_values('q5_1', [1, 2, 97, 98])
# removed from meta data?
expected_cat_meta = [[3, "Probably wouldn't"],
[4, 'Probably would if asked'],
[5, 'Very likely']]
self.assertEqual(dataset.meta('q5_1')[['codes', 'texts']].values.tolist(),
expected_cat_meta)
# removed from case data?
expected_cat_vals = [cat[0] for cat in expected_cat_meta]
self.assertEqual(sorted(dataset._data['q5_1'].value_counts().index.tolist()),
expected_cat_vals)
# does the engine correctly handle it?
df = self.check_freq(dataset, 'q5_1', show='text')
expected_index = [cat[1] for cat in expected_cat_meta]
df_index = df.index.get_level_values(1).tolist()
df_index.remove('All')
self.assertTrue(df_index == expected_index)
expected_results = [[5194.0],
[2598.0],
[124.0],
[2472.0]]
self.assertEqual(df.values.tolist(), expected_results)
def test_extend_values_autocodes(self):
dataset = self._get_dataset()
meta_before = dataset.meta('q8')[['codes', 'texts']]
add_values = ['CAT A', 'CAT B']
dataset.extend_values('q8', add_values)
meta_after = dataset.meta('q8')[['codes', 'texts']]
# codes are correctly selected?
expected_codes_diff = [99, 100]
codes_diff = sorted(list(set(meta_after['codes'].values)-
set(meta_before['codes'].values)))
self.assertEqual(codes_diff, expected_codes_diff)
# texts match?
expected_values_at_end = ['CAT A', 'CAT B']
self.assertEqual(meta_after['texts'].tail(2).values.tolist(),
expected_values_at_end)
def test_extend_values_usercodes(self):
dataset = self._get_dataset()
meta_before = dataset.meta('q8')[['codes', 'texts']]
add_values = [(210, 'CAT A'), (102, 'CAT B')]
dataset.extend_values('q8', add_values)
meta_after = dataset.meta('q8')[['codes', 'texts']]
# codes are correct?
expected_codes_at_end = [210, 102]
self.assertEqual(meta_after['codes'].tail(2).values.tolist(),
expected_codes_at_end)
# texts match?
expected_values_at_end = ['CAT A', 'CAT B']
self.assertEqual(meta_after['texts'].tail(2).values.tolist(),
expected_values_at_end)
def test_extend_values_no_texts(self):
dataset = self._get_dataset()
dataset.set_verbose_infomsg(False)
meta_before = dataset.meta('q8')[['codes', 'texts']]
add_values = [3001, 30002, 3003]
dataset.extend_values('q8', add_values)
meta_after = dataset.meta('q8')[['codes', 'texts']]
# codes are correct?
self.assertEqual(meta_after['codes'].tail(3).values.tolist(),
add_values)
# texts are empty?
expected_values_at_end = ['', '', '']
self.assertEqual(meta_after['texts'].tail(3).values.tolist(),
expected_values_at_end)
def test_extend_values_raises_on_dupes(self):
dataset = self._get_dataset()
add_values = [(1, 'CAT A'), (2, 'CAT B')]
self.assertRaises(ValueError, dataset.extend_values, 'q8', add_values)
def test_text_replacements_non_array(self):
dataset = self._get_dataset()
replace = {'following': 'TEST IN LABEL',
'Breakfast': 'TEST IN VALUES'}
dataset.replace_texts(replace=replace)
expected_value = 'TEST IN VALUES'
expected_label = 'Which of the TEST IN LABEL do you regularly skip?'
value_text = dataset._get_valuemap('q8', non_mapped='texts')[0]
column_text = dataset.text('q8')
self.assertEqual(column_text, expected_label)
self.assertEqual(value_text, expected_value)
def test_sorting_rules_meta(self):
dataset = self._get_dataset()
dataset.sorting('q8', fix=[3, 98, 100])
expected_rules = {'x': {'sortx': {'fixed': [3, 98],
'within': False,
'between': False,
'ascending': False,
'sort_on': '@',
'with_weight': 'auto'}},
'y': {}}
# rule correctly set?: i.e. code 100 removed from fix list since it
# does not appear in the values meta?
self.assertEqual(dataset._meta['columns']['q8']['rules'],
expected_rules)
def test_force_texts(self):
dataset = self._get_dataset()
dataset.set_value_texts(name='q4',
renamed_vals={1: 'kyllae'},
text_key='fi-FI')
dataset.force_texts(copy_to='de-DE',
copy_from=['fi-FI','en-GB'],
update_existing=False)
q4_de_val0 = dataset._meta['columns']['q4']['values'][0]['text']['de-DE']
q4_de_val1 = dataset._meta['columns']['q4']['values'][1]['text']['de-DE']
self.assertEqual(q4_de_val0, 'kyllae')
self.assertEqual(q4_de_val1, 'No')
q5_de_val0 = dataset._meta['lib']['values']['q5'][0]['text']['de-DE']
self.assertEqual(q5_de_val0, 'I would refuse if asked')
def test_validate(self):
dataset = self._get_dataset()
meta = dataset._meta
meta['columns']['q1']['values'][0]['text']['x edits'] = 'test'
meta['columns']['q1']['name'] = 'Q1'
meta['columns'].pop('q2')
meta['masks']['q5']['text'] = {'en-GB': ''}
meta['masks']['q6']['text'].pop('en-GB')
meta['columns'].pop('q6_3')
meta['columns']['q8']['text'] = ''
meta['columns']['q8']['values'][3]['text'] = ''
meta['columns']['q8']['values'] = meta['columns']['q8']['values'][0:5]
index = ['q1', 'q2', 'q5', 'q6', 'q6_1', 'q6_2', 'q6_3', 'q8']
data = {'name': ['x', '', '', '', '', '', '', '' ],
'q_label': ['', '', 'x', '', '', '', '', 'x'],
'values': ['x', '', '', '', '', '', '', 'x'],
'text keys': ['', '', '', 'x', 'x', 'x', '', 'x'],
'source': ['', '', '', 'x', '', '', '', '' ],
'codes': ['', 'x', '', '', '', '', 'x', 'x']}
df = pd.DataFrame(data, index=index)
df = df[['name', 'q_label', 'values', 'text keys', 'source', 'codes']]
df_validate = dataset.validate(False, verbose=False)
self.assertTrue(df.equals(df_validate))
def test_compare(self):
dataset = self._get_dataset()
ds = dataset.clone()
dataset.set_value_texts('q1', {2: 'test'})
dataset.set_variable_text('q8', 'test', ['en-GB', 'sv-SE'])
dataset.remove_values('q6', [1, 2])
dataset.convert('q6_3', 'delimited set')
index = ['q1', 'q6', 'q6_1', 'q6_2', 'q6_3', 'q8']
data = {'type': ['', '', '', '', 'x', ''],
'q_label': ['', '', '', '', '', 'en-GB, sv-SE, '],
'codes': ['', 'x', 'x', 'x', 'x', ''],
'value texts': ['2: en-GB, ', '', '', '', '', '']}
df = pd.DataFrame(data, index=index)
df = df[['type', 'q_label', 'codes', 'value texts']]
df_comp = dataset.compare(ds)
self.assertTrue(df.equals(df_comp))
def test_uncode(self):
dataset = self._get_dataset()
dataset.uncode('q8',{1: 1, 2:2, 5:5}, 'q8', intersect={'gender':1})
dataset.uncode('q8',{3: 3, 4:4, 98:98}, 'q8', intersect={'gender':2})
df = dataset.crosstab('q8', 'gender')
result = [[ 1797., 810., 987.],
[ 476., 0., 476.],
[ 104., 0., 104.],
[ 293., 293., 0.],
[ 507., 507., 0.],
[ 599., 0., 599.],
[ 283., 165., 118.],
[ 26., 26., 0.]]
self.assertEqual(df.values.tolist(), result)
def test_derotate_df(self):
dataset = self._get_dataset()
levels = {'visit': ['visit_1', 'visit_2', 'visit_3']}
mapper = [{'q14r{:02}'.format(r): ['q14r{0:02}c{1:02}'.format(r, c)
for c in range(1, 4)]} for r in frange('1-5')]
ds = dataset.derotate(levels, mapper, 'gender', 'record_number')
df_h = ds._data.head(10)
df_val = [[x if not np.isnan(x) else 'nan' for x in line]
for line in df_h.values.tolist()]
result_df = [[1.0, 2.0, 1.0, 4.0, 4.0, 4.0, 8.0, 1.0, 2.0, 4.0, 2.0, 3.0, 1.0],
[1.0, 2.0, 2.0, 4.0, 4.0, 4.0, 8.0, 3.0, 3.0, 2.0, 4.0, 3.0, 1.0],
[1.0, 3.0, 1.0, 1.0, 1.0, 8.0, 'nan', 4.0, 3.0, 1.0, 3.0, 1.0, 2.0],
[1.0, 4.0, 1.0, 5.0, 5.0, 4.0, 8.0, 2.0, 3.0, 2.0, 3.0, 1.0, 1.0],
[1.0, 4.0, 2.0, 4.0, 5.0, 4.0, 8.0, 2.0, 1.0, 3.0, 2.0, 1.0, 1.0],
[1.0, 5.0, 1.0, 3.0, 3.0, 5.0, 8.0, 4.0, 2.0, 2.0, 1.0, 3.0, 1.0],
[1.0, 5.0, 2.0, 5.0, 3.0, 5.0, 8.0, 3.0, 3.0, 3.0, 1.0, 2.0, 1.0],
[1.0, 6.0, 1.0, 2.0, 2.0, 8.0, 'nan', 4.0, 2.0, 3.0, 4.0, 2.0, 1.0],
[1.0, 7.0, 1.0, 3.0, 3.0, 3.0, 8.0, 2.0, 1.0, 3.0, 2.0, 4.0, 1.0],
[1.0, 7.0, 2.0, 3.0, 3.0, 3.0, 8.0, 3.0, 2.0, 1.0, 2.0, 3.0, 1.0]]
result_columns = ['@1', 'record_number', 'visit', 'visit_levelled',
'visit_1', 'visit_2', 'visit_3', 'q14r01', 'q14r02',
'q14r03', 'q14r04', 'q14r05', 'gender']
df_len = 18520
self.assertEqual(df_val, result_df)
self.assertEqual(df_h.columns.tolist(), result_columns)
self.assertEqual(len(ds._data.index), df_len)
path_json = '{}/{}.json'.format(ds.path, ds.name)
path_csv = '{}/{}.csv'.format(ds.path, ds.name)
os.remove(path_json)
os.remove(path_csv)
def test_derotate_freq(self):
dataset = self._get_dataset()
levels = {'visit': ['visit_1', 'visit_2', 'visit_3']}
mapper = [{'q14r{:02}'.format(r): ['q14r{0:02}c{1:02}'.format(r, c)
for c in range(1, 4)]} for r in frange('1-5')]
ds = dataset.derotate(levels, mapper, 'gender', 'record_number')
val_c = {'visit': {'val': {1: 8255, 2: 6174, 3: 4091},
'index': [1, 2, 3]},
'visit_levelled': {'val': {4: 3164, 1: 3105, 5: 3094, 6: 3093, 3: 3082, 2: 2982},
'index': [4, 1, 5, 6, 3,2]},
'visit_1': {'val': {4: 3225, 6: 3136, 3: 3081, 2: 3069, 1: 3029, 5: 2980},
'index': [4, 6, 3, 2, 1, 5]},
'visit_2': {'val': {1: 2789, 6: 2775, 5: 2765, 3: 2736, 4: 2709, 2: 2665, 8: 2081},
'index': [1, 6, 5, 3, 4, 2, 8]},
'visit_3': {'val': {8: 4166, 5: 2181, 4: 2112, 3: 2067, 1: 2040, 6: 2001, 2: 1872},
'index': [8, 5, 4, 3, 1, 6, 2]},
'q14r01': {'val': {3: 4683, 1: 4653, 4: 4638, 2: 4546},
'index': [3, 1, 4, 2]},
'q14r02': {'val': {4: 4749, 2: 4622, 1: 4598, 3: 4551},
'index': [4, 2, 1, 3]},
'q14r03': {'val': {1: 4778, 4: 4643, 3: 4571, 2: 4528},
'index': [1, 4, 3, 2]},
'q14r04': {'val': {1: 4665, 2: 4658, 4: 4635, 3: 4562},
'index': [1, 2, 4, 3]},
'q14r05': {'val': {2: 4670, 4: 4642, 1: 4607, 3: 4601},
'index': [2, 4, 1, 3]},
'gender': {'val': {2: 9637, 1: 8883},
'index': [2, 1]}}
for var in val_c.keys():
series = pd.Series(val_c[var]['val'], index = val_c[var]['index'])
compare = all(series == ds._data[var].value_counts())
self.assertTrue(compare)
path_json = '{}/{}.json'.format(ds.path, ds.name)
path_csv = '{}/{}.csv'.format(ds.path, ds.name)
os.remove(path_json)
os.remove(path_csv)
def test_derotate_meta(self):
dataset = self._get_dataset()
levels = {'visit': ['visit_1', 'visit_2', 'visit_3']}
mapper = [{'q14r{:02}'.format(r): ['q14r{0:02}c{1:02}'.format(r, c)
for c in range(1, 4)]} for r in frange('1-5')]
ds = dataset.derotate(levels, mapper, 'gender', 'record_number')
err = ds.validate(False, False)
err_s = None
self.assertEqual(err_s, err)
path_json = '{}/{}.json'.format(ds.path, ds.name)
path_csv = '{}/{}.csv'.format(ds.path, ds.name)
os.remove(path_json)
os.remove(path_csv)
def test_interlock(self):
dataset = self._get_dataset()
data = dataset._data
name, lab = 'q4AgeGen', 'q4 Age Gender'
variables = ['q4',
{'age': [(1, '18-35', {'age': frange('18-35')}),
(2, '30-49', {'age': frange('30-49')}),
(3, '50+', {'age': is_ge(50)})]},
'gender']
dataset.interlock(name, lab, variables)
val = [1367,1109,1036,831,736,579,571,550,454,438,340,244]
ind = ['10;','8;','9;','7;','3;','8;10;','1;','4;','2;','7;9;','1;3;','2;4;']
s = pd.Series(val, index=ind, name='q4AgeGen')
self.assertTrue(all(s==data['q4AgeGen'].value_counts()))
values = [(1, u'Yes/18-35/Male'),
(2, u'Yes/18-35/Female'),
(3, u'Yes/30-49/Male'),
(4, u'Yes/30-49/Female'),
(5, u'Yes/50+/Male'),
(6, u'Yes/50+/Female'),
(7, u'No/18-35/Male'),
(8, u'No/18-35/Female'),
(9, u'No/30-49/Male'),
(10, u'No/30-49/Female'),
(11, u'No/50+/Male'),
(12, u'No/50+/Female')]
text = 'q4 Age Gender'
self.assertEqual(values, dataset.values('q4AgeGen'))
self.assertEqual(text, dataset.text('q4AgeGen'))
self.assertTrue(dataset.is_delimited_set('q4AgeGen'))
def test_dichotomous_to_delimited_set(self):
dataset = self._get_dataset()
dataset.dichotomize('q8', None, False)
dataset.to_delimited_set('q8_new', dataset.text('q8'),
['q8_1', 'q8_2', 'q8_3', 'q8_4', 'q8_5', 'q8_96', 'q8_98'],
from_dichotomous=True, codes_from_name=True)
self.assertEqual(dataset.values('q8'), dataset.values('q8_new'))
self.assertEqual(dataset['q8'].value_counts().values.tolist(),
dataset['q8_new'].value_counts().values.tolist())
self.assertRaises(ValueError, dataset.to_delimited_set, 'q8_new', '', ['age', 'gender'])
def test_categorical_to_delimited_set(self):
dataset = self._get_dataset()
self.assertRaises(ValueError, dataset.to_delimited_set, 'q1_1', '', ['q1', 'q2'])
dataset.to_delimited_set('q5_new',
dataset.text('q5'),
dataset.sources('q5'),
False)
self.assertEqual(dataset.crosstab('q5_new').values.tolist(),
[[8255.0], [3185.0], [2546.0], [4907.0],
[287.0], [3907.0], [1005.0], [3640.0]])
for v in dataset.sources('q5'):
self.assertEqual(dataset.values('q5_new'), dataset.values(v))
def test_get_value_texts(self):
dataset = self._get_dataset()
values = [(1, u'Regularly'), (2, u'Irregularly'), (3, u'Never')]
self.assertEqual(values, dataset.values('q2b', 'en-GB'))
dataset._meta['columns']['q2b']['values'][0]['text']['x edits'] = {'en-GB': 'test'}
value_texts = ['test', None, None]
self.assertEqual(value_texts, dataset.value_texts('q2b', 'en-GB', 'x'))
def test_get_item_texts(self):
dataset = self._get_dataset()
items = [(u'q6_1', u'Exercise alone'),
(u'q6_2', u'Join an exercise class'),
(u'q6_3', u'Play any kind of team sport')]
self.assertEqual(items, dataset.items('q6', 'en-GB'))
dataset._meta['masks']['q6']['items'][2]['text']['x edits'] = {'en-GB': 'test'}
item_texts = [None, None, 'test']
self.assertEqual(item_texts, dataset.item_texts('q6', 'en-GB', 'x'))
def test_get_variable_text(self):
dataset = self._get_dataset()
text = 'How often do you take part in any of the following? - Exercise alone'
self.assertEqual(text, dataset.text('q6_1', False, 'en-GB'))
text = 'Exercise alone'
self.assertEqual(text, dataset.text('q6_1', True, 'en-GB'))
text = ''
self.assertEqual(text, dataset.text('q6_1', True, 'en-GB', 'x'))
def test_set_value_texts(self):
dataset = self._get_dataset()
values = [{u'text': {u'en-GB': u'Strongly disagree'}, u'value': 1},
{u'text': {u'en-GB': 'test1'}, u'value': 2},
{u'text': {u'en-GB': u'Neither agree nor disagree'}, u'value': 3},
{u'text': {u'en-GB': u'Agree', 'y edits': {'en-GB': 'test2'}}, u'value': 4},
{u'text': {u'en-GB': u'Strongly agree'}, u'value': 5}]
dataset.set_value_texts('q14_1', {2: 'test1'}, 'en-GB')
dataset.set_value_texts('q14_1', {4: 'test2'}, 'en-GB', 'y')
value_obj = dataset._meta['lib']['values']['q14_1']
self.assertEqual(value_obj, values)
values = [{u'text': {u'en-GB': u'test1'}, u'value': 1},
{u'text': {u'en-GB': u'Irregularly'}, u'value': 2},
{u'text': {u'en-GB': u'Never',
u'y edits': {'en-GB': 'test2'},
u'x edits': {'en-GB': 'test2'}}, u'value': 3}]
dataset.set_value_texts('q2b', {1: 'test1'}, 'en-GB')
dataset.set_value_texts('q2b', {3: 'test2'}, 'en-GB', ['x', 'y'])
value_obj = dataset._meta['columns']['q2b']['values']
self.assertEqual(value_obj, values)
def test_set_item_texts(self):
dataset = self._get_dataset()
items = [{u'en-GB': u'Exercise alone'},
{u'en-GB': u'Join an exercise class',
'sv-SE': 'test1',
'x edits': {'sv-SE': 'test', 'en-GB': 'test'}},
{u'en-GB': u'Play any kind of team sport',
'sv-SE': 'test2'}]
dataset.set_item_texts('q6', {2: 'test1', 3: 'test2'}, 'sv-SE')
dataset.set_item_texts('q6', {2: 'test'}, ['en-GB', 'sv-SE'], 'x')
item_obj = [i['text'] for i in dataset._meta['masks']['q6']['items']]
self.assertEqual(item_obj, items)
def test_set_variable_text(self):
dataset = self._get_dataset()
text = {'en-GB': 'new text', 'sv-SE': 'new text'}
dataset.set_variable_text('q6', 'new text', ['en-GB', 'sv-SE'])
dataset.set_variable_text('q6', 'new', ['da-DK'], 'x')
text_obj = dataset._meta['masks']['q6']['text']
self.assertEqual(text_obj, text)
text = {'en-GB': 'What is your main fitness activity?',
'x edits': {'en-GB': 'edit'}, 'y edits':{'en-GB': 'edit'}}
dataset.set_variable_text('q1', 'edit', 'en-GB', ['x', 'y'])
def test_crosstab(self):
x = 'q14r01c01'
dataset = self._get_dataset()
dataset.crosstab(x)
self.assertEqual(dataset._meta['columns'][x]['values'],
'lib@values@q14_1')
| mit |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/plotly/matplotlylib/mplexporter/renderers/vincent_renderer.py | 64 | 1922 | import warnings
from .base import Renderer
from ..exporter import Exporter
class VincentRenderer(Renderer):
def open_figure(self, fig, props):
self.chart = None
self.figwidth = int(props['figwidth'] * props['dpi'])
self.figheight = int(props['figheight'] * props['dpi'])
def draw_line(self, data, coordinates, style, label, mplobj=None):
import vincent # only import if VincentRenderer is used
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
linedata = {'x': data[:, 0],
'y': data[:, 1]}
line = vincent.Line(linedata, iter_idx='x',
width=self.figwidth, height=self.figheight)
# TODO: respect the other style settings
line.scales['color'].range = [style['color']]
if self.chart is None:
self.chart = line
else:
warnings.warn("Multiple plot elements not yet supported")
def draw_markers(self, data, coordinates, style, label, mplobj=None):
import vincent # only import if VincentRenderer is used
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
markerdata = {'x': data[:, 0],
'y': data[:, 1]}
markers = vincent.Scatter(markerdata, iter_idx='x',
width=self.figwidth, height=self.figheight)
# TODO: respect the other style settings
markers.scales['color'].range = [style['facecolor']]
if self.chart is None:
self.chart = markers
else:
warnings.warn("Multiple plot elements not yet supported")
def fig_to_vincent(fig):
"""Convert a matplotlib figure to a vincent object"""
renderer = VincentRenderer()
exporter = Exporter(renderer)
exporter.run(fig)
return renderer.chart
| apache-2.0 |
jiegec/gnuradio | gr-filter/examples/channelize.py | 58 | 7003 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 2000000 # number of samples to use
self._fs = 1000 # initial sampling rate
self._M = M = 9 # Number of channels to channelize
self._ifs = M*self._fs # initial sampling rate
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._ifs, 475.50, 50,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
# Create a set of signals at different frequencies
# freqs lists the frequencies of the signals that get stored
# in the list "signals", which then get summed together
self.signals = list()
self.add = blocks.add_cc()
freqs = [-70, -50, -30, -10, 10, 20, 40, 60, 80]
for i in xrange(len(freqs)):
f = freqs[i] + (M/2-M+i+1)*self._fs
self.signals.append(analog.sig_source_c(self._ifs, analog.GR_SIN_WAVE, f, 1))
self.connect(self.signals[i], (self.add,i))
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps, 1)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Use this to play with the channel mapping
#self.pfb.set_channel_map([5,6,7,8,0,1,2,3,4])
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
Ns = 1000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._ifs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(X))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
amolsharma99/Implementing-Anomaly-Detection | outlier_detection.py | 1 | 2871 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show() | mit |
alexjc/pylearn2 | pylearn2/cross_validation/tests/test_subset_iterators.py | 49 | 2411 | """
Test subset iterators.
"""
import numpy as np
from pylearn2.testing.skip import skip_if_no_sklearn
def test_validation_k_fold():
"""Test ValidationKFold."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import ValidationKFold
n = 30
# test with indices
cv = ValidationKFold(n)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n / cv.n_folds
assert test.size == n / cv.n_folds
def test_stratified_validation_k_fold():
"""Test StratifiedValidationKFold."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import (
StratifiedValidationKFold)
n = 30
y = np.concatenate((np.zeros(n / 2, dtype=int), np.ones(n / 2, dtype=int)))
# test with indices
cv = StratifiedValidationKFold(y)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n / cv.n_folds
assert test.size == n / cv.n_folds
assert np.count_nonzero(y[valid]) == (n / 2) * (1. / cv.n_folds)
assert np.count_nonzero(y[test]) == (n / 2) * (1. / cv.n_folds)
def test_validation_shuffle_split():
"""Test ValidationShuffleSplit."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import (
ValidationShuffleSplit)
n = 30
# test with indices
cv = ValidationShuffleSplit(n)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n * cv.test_size
assert test.size == n * cv.test_size
def test_stratified_validation_shuffle_split():
"""Test StratifiedValidationShuffleSplit."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import (
StratifiedValidationShuffleSplit)
n = 60
y = np.concatenate((np.zeros(n / 2, dtype=int), np.ones(n / 2, dtype=int)))
# test with indices
cv = StratifiedValidationShuffleSplit(y)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n * cv.test_size
assert test.size == n * cv.test_size
assert np.count_nonzero(y[valid]) == (n / 2) * cv.test_size
assert np.count_nonzero(y[test]) == (n / 2) * cv.test_size
| bsd-3-clause |
gkunter/coquery | coquery/installer/coq_install_icle.py | 1 | 17328 | # -*- coding: utf-8 -*-
"""
coq_install_icle.py is part of Coquery.
Copyright (c) 2016 Gero Kunter ([email protected])
Coquery is released under the terms of the GNU General Public License (v3).
For details, see the file LICENSE that you should have received along
with Coquery. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import unicode_literals
import re
import string
import pandas as pd
from coquery.corpusbuilder import *
from coquery.unicode import utf8
from coquery.bibliography import *
"""
Right now, there's no straightforward way to make this installer work cross-
platform because the corpus uses the MS Access file format .mdb for the
meta data. These files can be read in Linux if the mdbtools are installed
first. Also, the meza module (available via PyPi) is required. The meta data
can be extracted like so:
In [1]: from meza import io
In [2]: import pandas as pd
In [3]: records = io.read(
"/usr/local/share/corpora/source/ICLE/DATA/_icle.mdb")
In [4]: lst = list(records)
In [5]: df = pd.DataFrame(lst)
In [6]: df = df.dropna(axis="index", how="all")
In [7]: df = df.loc[df["country"].notna()]
In [8]: df["monthseng"] = (df["monthseng"].replace("None", pd.np.nan)
.astype(float))
In [9]: df["unieng"] = (df["unieng"].replace("None", pd.np.nan)
.astype(float))
In [10]: df["filename"] = df["file"] + ".txt"
In [11]: df.to_csv(
"/usr/local/share/corpora/source/ICLE/meta.csv", index=False)
Now, the ICLE can be build from the .txt files with the meta.csv file just
created as the meta data file. What is important is that the .txt files are
stored in a separate folder – on the CD, they are placed in the DATA folder
alongside some binaries which will confuse the corpus builder.
"""
class BuilderClass(BaseCorpusBuilder):
file_filter = "*.*"
word_table = "Lexicon"
word_id = "WordId"
word_label = "Word"
word_lemma = "Lemma"
word_pos = "POS"
word_claws = "CLAWS"
corpus_table = "Corpus"
corpus_id = "ID"
corpus_word_id = "WordId"
corpus_source_id = "TextId"
corpus_file_id = "FileId"
corpus_speaker_id = "SpeakerId"
source_table = "Texts"
source_id = "TextId"
source_batch = "Batch"
source_title = "Title"
source_type = "Type"
source_condition = "Conditions"
source_reftool = "Reference_tools"
source_exam = "Examination"
source_status = "Status"
source_institute = "Institute"
source_comment = "Comments"
speaker_table = "Speakers"
speaker_id = "SpeakerId"
speaker_age = "Age"
speaker_sex = "Gender"
speaker_country = "Country"
speaker_language = "Native_language"
speaker_schoolenglish = "Years_English_at_school"
speaker_unienglish = "Years_English_at_university"
speaker_abroadenglish = "Months_English_speaking_country"
speaker_otherlang1 = "Other_language_1"
speaker_otherlang2 = "Other_language_2"
speaker_otherlang3 = "Other_language_3"
file_table = "Files"
file_id = "FileId"
file_name = "Filename"
file_path = "Path"
special_files = ["source_info.csv", "tokens.txt"]
special_files = ["tokens.txt"]
expected_files = special_files + ["BGSU1003.txt", "CNHK1052.txt"]
def __init__(self, gui=False, *args):
# all corpus builders have to call the inherited __init__ function:
super(BuilderClass, self).__init__(gui, *args)
self.create_table_description(self.file_table,
[Identifier(self.file_id, "MEDIUMINT(7) UNSIGNED NOT NULL"),
Column(self.file_name, "TINYTEXT NOT NULL"),
Column(self.file_path, "TINYTEXT NOT NULL")])
self.create_table_description(self.speaker_table,
[Identifier(self.speaker_id, "MEDIUMINT(4) UNSIGNED NOT NULL"),
Column(self.speaker_age, "TINYINT(2) UNSIGNED"),
Column(self.speaker_sex, "ENUM('Female','Male','Unknown') NOT NULL"),
Column(self.speaker_country, "VARCHAR(15) NOT NULL"),
Column(self.speaker_language, "VARCHAR(17) NOT NULL"),
Column(self.speaker_schoolenglish, "TINYINT(2) UNSIGNED"),
Column(self.speaker_unienglish, "DECIMAL(1,1) UNSIGNED"),
Column(self.speaker_abroadenglish, "DECIMAL(3,2) UNSIGNED"),
Column(self.speaker_otherlang1, "VARCHAR(16)"),
Column(self.speaker_otherlang2, "VARCHAR(16)"),
Column(self.speaker_otherlang3, "VARCHAR(9)"),
])
self.create_table_description(self.word_table,
[Identifier(self.word_id, "MEDIUMINT(5) UNSIGNED NOT NULL"),
Column(self.word_label, "VARCHAR(60) NOT NULL"),
Column(self.word_lemma, "VARCHAR(30) NOT NULL"),
Column(self.word_pos, "VARCHAR(10) NOT NULL"),
Column(self.word_claws, "VARCHAR(10) NOT NULL")])
self.create_table_description(self.source_table,
[Identifier(self.source_id, "MEDIUMINT(4) UNSIGNED NOT NULL"),
Column(self.source_batch, "CHAR(19) NOT NULL"),
Column(self.source_title, "VARCHAR(434) NOT NULL"),
Column(self.source_type, "ENUM('Argumentative','Literary','Other') NOT NULL"),
Column(self.source_condition, "ENUM('No Timing','Timed','Unknown') NOT NULL"),
Column(self.source_reftool, "ENUM('Yes','No','Unknown') NOT NULL"),
Column(self.source_exam, "ENUM('Yes','No','Unknown') NOT NULL"),
Column(self.source_status, "ENUM('Complete','Incomplete') NOT NULL"),
Column(self.source_institute, "VARCHAR(88) NOT NULL"),
Column(self.source_comment, "VARCHAR(141) NOT NULL"),
])
self.create_table_description(self.corpus_table,
[Identifier(self.corpus_id, "MEDIUMINT(7) UNSIGNED NOT NULL"),
Link(self.corpus_word_id, self.word_table),
Link(self.corpus_file_id, self.file_table),
Link(self.corpus_source_id, self.source_table),
Link(self.corpus_speaker_id, self.speaker_table)])
self._sources = {}
self._speakers = {}
self._words = {}
@staticmethod
def get_name():
return "ICLE"
@staticmethod
def get_db_name():
return "coq_icle"
@staticmethod
def get_title():
return "The International Corpus of Learner English"
@staticmethod
def get_language():
return "English"
@staticmethod
def get_language_code():
return "en-L2"
@staticmethod
def get_description():
return [
"The International Corpus of Learner English contains argumentative essays written by higher intermediate to advanced learners of English from several mother tongue backgrounds (Bulgarian, Chinese, Czech, Dutch, Finnish, French, German, Italian, Japanese, Norwegian, Polish, Russian, Spanish, Swedish, Tswana, Turkish). The corpus is the result of collaboration with a wide range of partner universities internationally. The first version was published on CD-ROM in 2002, and an expanded version, ICLEv2, was published in 2009. The corpus is highly homogeneous as all partners have adopted the same corpus collection guidelines."]
@staticmethod
def get_references():
return [InCollection(
authors=PersonList(Person(first="Sylviane", last="Granger")),
year=2008,
contributiontitle="Learner corpora",
editors=PersonList(
Person(first="Anke", last="Lüdeling"),
Person(first="Merja", last="Kytö")),
title="Handbook on corpus linguistics",
publisher="Mouton de Gruyter",
address="Berlin")]
@staticmethod
def get_url():
return "https://www.uclouvain.be/en-cecl-icle.html"
@staticmethod
def get_license():
return "The ICLE is available under the terms of a commercial license.</a>."
@classmethod
def get_file_list(cls, *args, **kwargs):
"""
Make sure that source_info.csv file appears first in the file list.
"""
l = super(BuilderClass, cls).get_file_list(*args, **kwargs)
for x in list(l):
if os.path.basename(x) in cls.special_files:
l.remove(x)
l.insert(0, x)
return l
@staticmethod
def _filename_to_batch(filename):
"""
This method should return a string that matches the 'batch' label.
"""
#{"BGSU": "ICLE-BG-SUN",
#"CNHK": "ICLE-CN-HKU",
#"CNUK": "ICLE-CN-UK",
#"CZKR": "ICLE-CZ-KRAL",
#"DBAN": "ICLE-DB-KVH",
#"CZKR": "ICLE-CZ-PRAG",
#if filename.startswith("BGSU"):
#id_str =
#elif filename.startswith("CNHK"):
#id_str =
#elif filename.startswith("CNUK"):
#id_str = "ICLE-CN-UK"
#return "ICLE
return filename
def process_file(self, filename):
base_name = os.path.basename(filename)
print(base_name)
if base_name == "source_info.csv":
df = pd.read_csv(filename)
for i in df.index:
row = df.loc[i]
if i == 1:
print(row)
print(row.dtypes)
if row.schooleng == -1:
row.schooleng = None
if row.unieng == -1:
row.unieng = None
if row.monthseng == -1:
row.monthseng = None
if row.age == -1:
row.age = None
self._sources[utf8(row.file)] = (
self.table(self.source_table).add(
{self.source_batch: self._filename_to_batch(row.file),
self.source_title: utf8(row.title),
self.source_type: row.type,
self.source_condition: row.conditions,
self.source_reftool: row.reftools,
self.source_exam: row.exam,
self.source_status: row.status,
self.source_institute: utf8(row.instit2),
self.source_comment: utf8(row.comments)}))
self._speakers[utf8(row.file)] = (
self.table(self.speaker_table).add(
{self.speaker_age: row.age,
self.speaker_sex: row.sex,
self.speaker_country: row.country,
self.speaker_language: row.llanguage,
self.speaker_schoolenglish: row.schooleng,
self.speaker_unienglish: row.unieng,
self.speaker_abroadenglish: row.monthseng,
self.speaker_otherlang1: row.olang1,
self.speaker_otherlang2: row.olang2,
self.speaker_otherlang3: row.olang3}))
elif base_name == "tokens.txt":
hold_back = []
with codecs.open(filename, "r", encoding="utf-16") as input_file:
_ = input_file.read()
for row in input_file:
#match = re.match("{(.+),(.+)\.(\w+)\+(.+)}", row)
match = re.match("{(.+),(.+)\.(.+)}", row)
if match:
label = match.group(1)
pos = match.group(3)
if "+" in pos:
pos, _, claws = pos.partition("+")
else:
claws = pos
d = {
self.word_label: label,
self.word_lemma: match.group(2),
self.word_pos: pos,
self.word_claws: claws}
if label not in self._words:
self._words[label] = self.table(self.word_table).add(d)
else:
hold_back.append(row.strip())
for row in hold_back:
if row not in self._words:
if row in string.punctuation:
d = {
self.word_label: row,
self.word_lemma: row,
self.word_pos: "PUNCT",
self.word_claws: "PUNCT"}
else:
d = {self.word_label: row,
self.word_lemma: row.lower(),
self.word_pos: "UNKNOWN",
self.word_claws: "UNKNOWN"}
self._words[row] = self.table(self.word_table).add(d)
elif base_name in self.expected_files:
self._source_id = self._sources[base_name.partition(".")[0]]
self._speaker_id = self._speakers[base_name.partition(".")[0]]
d = {self.corpus_file_id: self._file_id,
self.corpus_source_id: self._source_id,
self.corpus_speaker_id: self._speaker_id}
with codecs.open(filename, "r") as input_file:
batch = None
for row in input_file:
if batch is None:
"""
process batch name
"""
batch = row
else:
words = row.split()
for word in [x.strip() for x in words]:
# handle any word-initial punctuation:
while word and word[0] in string.punctuation:
d[self.corpus_word_id] = self.table(self.word_table).get_or_insert(
{self.word_label: word[0],
self.word_lemma: word[0],
self.word_pos: "PUNCT",
self.word_claws: "PUNCT"}, case=True)
self.add_token_to_corpus(dict(d))
word = word[1:]
# construct word, taking punctuation and escaped
# punctuation into account:
l = []
escaped = True
for ch in word:
# add escaped characters to the word:
if escaped:
l.append(ch)
escaped = False
continue
# take note of escaping:
if ch == "\\":
escaped = True
continue
# add any non-punctuation character to word:
if ch not in string.punctuation:
l.append(ch)
continue
# current character is a punctuation mark.
# store the current word:
if l:
w = "".join(l)
d[self.corpus_word_id] = self.table(self.word_table).get_or_insert(
{self.word_label: w,
self.word_lemma: w.lower(),
self.word_pos: "UNKNOWN",
self.word_claws: "UNKNOWN"}, case=True)
self.add_token_to_corpus(dict(d))
l = []
# add any following punctuation marks as
# punctuation tokens:
d[self.corpus_word_id] = self.table(self.word_table).get_or_insert(
{self.word_label: ch,
self.word_lemma: ch,
self.word_pos: "PUNCT",
self.word_claws: "PUNCT"}, case=True)
self.add_token_to_corpus(dict(d))
# make sure that the current word is added:
if l:
w = "".join(l)
d[self.corpus_word_id] = self.table(self.word_table).get_or_insert(
{self.word_label: w,
self.word_lemma: w.lower(),
self.word_pos: "UNKNOWN",
self.word_claws: "UNKNOWN"}, case=True)
self.add_token_to_corpus(dict(d))
def store_filename(self, file_name):
if os.path.basename(file_name) not in self.special_files:
super(BuilderClass, self).store_filename(file_name)
if __name__ == "__main__":
BuilderClass().build()
| gpl-3.0 |
dongjoon-hyun/spark | python/pyspark/sql/tests/test_pandas_cogrouped_map.py | 20 | 9306 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.sql.functions import array, explode, col, lit, udf, pandas_udf
from pyspark.sql.types import DoubleType, StructType, StructField, Row
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.testing import assert_frame_equal
if have_pyarrow:
import pyarrow as pa # noqa: F401
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class CogroupedMapInPandasTests(ReusedSQLTestCase):
@property
def data1(self):
return self.spark.range(10).toDF('id') \
.withColumn("ks", array([lit(i) for i in range(20, 30)])) \
.withColumn("k", explode(col('ks')))\
.withColumn("v", col('k') * 10)\
.drop('ks')
@property
def data2(self):
return self.spark.range(10).toDF('id') \
.withColumn("ks", array([lit(i) for i in range(20, 30)])) \
.withColumn("k", explode(col('ks'))) \
.withColumn("v2", col('k') * 100) \
.drop('ks')
def test_simple(self):
self._test_merge(self.data1, self.data2)
def test_left_group_empty(self):
left = self.data1.where(col("id") % 2 == 0)
self._test_merge(left, self.data2)
def test_right_group_empty(self):
right = self.data2.where(col("id") % 2 == 0)
self._test_merge(self.data1, right)
def test_different_schemas(self):
right = self.data2.withColumn('v3', lit('a'))
self._test_merge(self.data1, right, 'id long, k int, v int, v2 int, v3 string')
def test_complex_group_by(self):
left = pd.DataFrame.from_dict({
'id': [1, 2, 3],
'k': [5, 6, 7],
'v': [9, 10, 11]
})
right = pd.DataFrame.from_dict({
'id': [11, 12, 13],
'k': [5, 6, 7],
'v2': [90, 100, 110]
})
left_gdf = self.spark\
.createDataFrame(left)\
.groupby(col('id') % 2 == 0)
right_gdf = self.spark \
.createDataFrame(right) \
.groupby(col('id') % 2 == 0)
def merge_pandas(l, r):
return pd.merge(l[['k', 'v']], r[['k', 'v2']], on=['k'])
result = left_gdf \
.cogroup(right_gdf) \
.applyInPandas(merge_pandas, 'k long, v long, v2 long') \
.sort(['k']) \
.toPandas()
expected = pd.DataFrame.from_dict({
'k': [5, 6, 7],
'v': [9, 10, 11],
'v2': [90, 100, 110]
})
assert_frame_equal(expected, result)
def test_empty_group_by(self):
left = self.data1
right = self.data2
def merge_pandas(l, r):
return pd.merge(l, r, on=['id', 'k'])
result = left.groupby().cogroup(right.groupby())\
.applyInPandas(merge_pandas, 'id long, k int, v int, v2 int') \
.sort(['id', 'k']) \
.toPandas()
left = left.toPandas()
right = right.toPandas()
expected = pd \
.merge(left, right, on=['id', 'k']) \
.sort_values(by=['id', 'k'])
assert_frame_equal(expected, result)
def test_mixed_scalar_udfs_followed_by_cogrouby_apply(self):
df = self.spark.range(0, 10).toDF('v1')
df = df.withColumn('v2', udf(lambda x: x + 1, 'int')(df['v1'])) \
.withColumn('v3', pandas_udf(lambda x: x + 2, 'int')(df['v1']))
result = df.groupby().cogroup(df.groupby()) \
.applyInPandas(lambda x, y: pd.DataFrame([(x.sum().sum(), y.sum().sum())]),
'sum1 int, sum2 int').collect()
self.assertEqual(result[0]['sum1'], 165)
self.assertEqual(result[0]['sum2'], 165)
def test_with_key_left(self):
self._test_with_key(self.data1, self.data1, isLeft=True)
def test_with_key_right(self):
self._test_with_key(self.data1, self.data1, isLeft=False)
def test_with_key_left_group_empty(self):
left = self.data1.where(col("id") % 2 == 0)
self._test_with_key(left, self.data1, isLeft=True)
def test_with_key_right_group_empty(self):
right = self.data1.where(col("id") % 2 == 0)
self._test_with_key(self.data1, right, isLeft=False)
def test_with_key_complex(self):
def left_assign_key(key, l, _):
return l.assign(key=key[0])
result = self.data1 \
.groupby(col('id') % 2 == 0)\
.cogroup(self.data2.groupby(col('id') % 2 == 0)) \
.applyInPandas(left_assign_key, 'id long, k int, v int, key boolean') \
.sort(['id', 'k']) \
.toPandas()
expected = self.data1.toPandas()
expected = expected.assign(key=expected.id % 2 == 0)
assert_frame_equal(expected, result)
def test_wrong_return_type(self):
# Test that we get a sensible exception invalid values passed to apply
left = self.data1
right = self.data2
with QuietTest(self.sc):
with self.assertRaisesRegex(
NotImplementedError,
'Invalid return type.*ArrayType.*TimestampType'):
left.groupby('id').cogroup(right.groupby('id')).applyInPandas(
lambda l, r: l, 'id long, v array<timestamp>')
def test_wrong_args(self):
left = self.data1
right = self.data2
with self.assertRaisesRegex(ValueError, 'Invalid function'):
left.groupby('id').cogroup(right.groupby('id')) \
.applyInPandas(lambda: 1, StructType([StructField("d", DoubleType())]))
def test_case_insensitive_grouping_column(self):
# SPARK-31915: case-insensitive grouping column should work.
df1 = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = df1.groupby("ColUmn").cogroup(
df1.groupby("COLUMN")
).applyInPandas(lambda r, l: r + l, "column long, value long").first()
self.assertEqual(row.asDict(), Row(column=2, value=2).asDict())
df2 = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = df1.groupby("ColUmn").cogroup(
df2.groupby("COLUMN")
).applyInPandas(lambda r, l: r + l, "column long, value long").first()
self.assertEqual(row.asDict(), Row(column=2, value=2).asDict())
def test_self_join(self):
# SPARK-34319: self-join with FlatMapCoGroupsInPandas
df = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = df.groupby("ColUmn").cogroup(
df.groupby("COLUMN")
).applyInPandas(lambda r, l: r + l, "column long, value long")
row = row.join(row).first()
self.assertEqual(row.asDict(), Row(column=2, value=2).asDict())
@staticmethod
def _test_with_key(left, right, isLeft):
def right_assign_key(key, l, r):
return l.assign(key=key[0]) if isLeft else r.assign(key=key[0])
result = left \
.groupby('id') \
.cogroup(right.groupby('id')) \
.applyInPandas(right_assign_key, 'id long, k int, v int, key long') \
.toPandas()
expected = left.toPandas() if isLeft else right.toPandas()
expected = expected.assign(key=expected.id)
assert_frame_equal(expected, result)
@staticmethod
def _test_merge(left, right, output_schema='id long, k int, v int, v2 int'):
def merge_pandas(l, r):
return pd.merge(l, r, on=['id', 'k'])
result = left \
.groupby('id') \
.cogroup(right.groupby('id')) \
.applyInPandas(merge_pandas, output_schema)\
.sort(['id', 'k']) \
.toPandas()
left = left.toPandas()
right = right.toPandas()
expected = pd \
.merge(left, right, on=['id', 'k']) \
.sort_values(by=['id', 'k'])
assert_frame_equal(expected, result)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_cogrouped_map import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
macks22/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
YerevaNN/mimic3-benchmarks | mimic3benchmark/scripts/create_in_hospital_mortality.py | 1 | 3320 | from __future__ import absolute_import
from __future__ import print_function
import os
import argparse
import pandas as pd
import random
random.seed(49297)
from tqdm import tqdm
def process_partition(args, partition, eps=1e-6, n_hours=48):
output_dir = os.path.join(args.output_path, partition)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
xy_pairs = []
patients = list(filter(str.isdigit, os.listdir(os.path.join(args.root_path, partition))))
for patient in tqdm(patients, desc='Iterating over patients in {}'.format(partition)):
patient_folder = os.path.join(args.root_path, partition, patient)
patient_ts_files = list(filter(lambda x: x.find("timeseries") != -1, os.listdir(patient_folder)))
for ts_filename in patient_ts_files:
with open(os.path.join(patient_folder, ts_filename)) as tsfile:
lb_filename = ts_filename.replace("_timeseries", "")
label_df = pd.read_csv(os.path.join(patient_folder, lb_filename))
# empty label file
if label_df.shape[0] == 0:
continue
mortality = int(label_df.iloc[0]["Mortality"])
los = 24.0 * label_df.iloc[0]['Length of Stay'] # in hours
if pd.isnull(los):
print("\n\t(length of stay is missing)", patient, ts_filename)
continue
if los < n_hours - eps:
continue
ts_lines = tsfile.readlines()
header = ts_lines[0]
ts_lines = ts_lines[1:]
event_times = [float(line.split(',')[0]) for line in ts_lines]
ts_lines = [line for (line, t) in zip(ts_lines, event_times)
if -eps < t < n_hours + eps]
# no measurements in ICU
if len(ts_lines) == 0:
print("\n\t(no events in ICU) ", patient, ts_filename)
continue
output_ts_filename = patient + "_" + ts_filename
with open(os.path.join(output_dir, output_ts_filename), "w") as outfile:
outfile.write(header)
for line in ts_lines:
outfile.write(line)
xy_pairs.append((output_ts_filename, mortality))
print("Number of created samples:", len(xy_pairs))
if partition == "train":
random.shuffle(xy_pairs)
if partition == "test":
xy_pairs = sorted(xy_pairs)
with open(os.path.join(output_dir, "listfile.csv"), "w") as listfile:
listfile.write('stay,y_true\n')
for (x, y) in xy_pairs:
listfile.write('{},{:d}\n'.format(x, y))
def main():
parser = argparse.ArgumentParser(description="Create data for in-hospital mortality prediction task.")
parser.add_argument('root_path', type=str, help="Path to root folder containing train and test sets.")
parser.add_argument('output_path', type=str, help="Directory where the created data should be stored.")
args, _ = parser.parse_known_args()
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
process_partition(args, "test")
process_partition(args, "train")
if __name__ == '__main__':
main()
| mit |
gzucolotto/PrimeNumbersPlotting | StatisticCollector.py | 1 | 1674 | import time
import matplotlib.pyplot as plt
import math
counter = {}
times = {}
lables = {}
stored_series = {}
stored_times = {}
stored_labels = {}
def method_decorator(method):
'''
Decorator to getter method invocations and execution times.
'''
def wrapper(*args, **kwargs):
key = method
if counter.__contains__(key) is False:
counter[key] = 0
times[key] = 0
lables[key] = args[0].__module__
init_time = time.time()
response = method(*args, **kwargs)
total_time = time.time() - init_time
counter[key] += 1
times[key] += total_time
return response
return wrapper
def reset_iteration_counters():
'''
Resets the iteration counters.
'''
counter.clear()
times.clear()
lables.clear()
def reset_counters():
'''
Resets all counters.
'''
counter.clear()
times.clear()
lables.clear()
stored_series.clear()
stored_times.clear()
stored_labels.clear()
def store_iteration():
'''
Stores the iteration. Appends the iteration counter into store counters and reset the interation counters.
'''
for key, value in counter.iteritems():
if stored_series.__contains__(key) is False:
stored_series[key] = []
stored_times[key] = []
stored_labels[key] = lables[key]
stored_series[key].append(value)
stored_times[key].append(times[key])
reset_iteration_counters()
def generate_report(filename="nroInteration.png"):
'''
Generates a report with stored invocation counters.
'''
plt.figure()
x_series = range(2, stored_series.items()[0][1].__len__() + 2)
for key, series in stored_series.iteritems():
plt.plot(x_series, series, label=stored_labels[key])
plt.legend(loc="upper left")
plt.grid(True)
plt.savefig(filename) | apache-2.0 |
exepulveda/swfc | python/clustering_kmean_ds4.py | 1 | 2282 | import numpy as np
import pickle
import logging
import argparse
import csv
import matplotlib as mpl
mpl.use('agg')
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
from cluster_utils import create_clusters_dict, recode_categorical_values
from plotting import scatter_clusters
import matplotlib.pyplot as plt
import clusteringlib as cl
from case_study_2d import attributes,setup_case_study,setup_distances
if __name__ == "__main__":
filename = 'ds4'
X = np.loadtxt("../data/{dataset}.csv".format(dataset=filename),skiprows=1,delimiter=",")
locations = X[:,0:2]
data = X[:,2:6] #0,1,2,5 are continues
true_clusters = X[:,6]
N,ND = data.shape
var_types = np.ones(ND)
seed = 1634120
np.random.seed(seed)
standadize = StandardScaler()
data_scaled = standadize.fit_transform(data)
scale = standadize.scale_
data_F = np.asfortranarray(data,dtype=np.float32)
for NC in range(2,11):
clustering = KMeans(n_clusters=NC)
kmeans_clusters = clustering.fit_predict(data_scaled)
sc = silhouette_score(data_scaled,kmeans_clusters)
#save data
#new_data = np.c_[locations,kmeans_clusters_all]
#np.savetxt("../results/{dataset}_clusters_kmeans_{nc}.csv".format(dataset=filename,nc=NC),new_data,delimiter=",",fmt="%.4f")
centroids_F = np.asfortranarray(np.empty((NC,ND)),dtype=np.float32)
for k in range(NC):
indices = np.where(kmeans_clusters == k)[0]
centroids_F[k,:] = np.mean(data[indices,:],axis=0)
#stats
cl.distances.sk_setup(np.asfortranarray(np.float32(scale)))
cl.distances.set_variables(np.asfortranarray(np.int32(var_types)),False)
#KMeans
weights = np.asfortranarray(np.ones((NC,ND),dtype=np.float32)/ ND)
clusters = np.asfortranarray(kmeans_clusters,dtype=np.int8)
ret_kmeans = cl.clustering.dbi_index(centroids_F,data_F,clusters,weights)
ret_sill= cl.clustering.silhouette_index(data_F,clusters,weights)
print("KMeans DB Index:",NC,ret_kmeans,ret_sill,sc)
cl.distances.reset()
| gpl-3.0 |
cliu3/pf_geolocation | postprocessing/post_process_gpu.py | 1 | 3654 | from __future__ import print_function
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import scipy.io
import scipy.stats
from astropy.time import Time
import pandas as pd
from my_project import *
import os.path
#from config import *
import sys
#tagid = 7
# try:
tagid = int(sys.argv[1])
#tagid = 12
# except:
# load tag file
path_to_tags = '/home/cliu3/pf_geolocation/data/tag_files'
tag=scipy.io.loadmat(path_to_tags+'/'+str(tagid)+'_raw.mat',squeeze_me =False,struct_as_record=True)
tag=tag['tag'][0,0]
release_lon = tag['release_lon'][0,0]
release_lat = tag['release_lat'][0,0]
[release_x, release_y] = my_project(release_lon, release_lat, 'forward')
recapture_lon = tag['recapture_lon'][0,0]
recapture_lat = tag['recapture_lat'][0,0]
[recapture_x, recapture_y] = my_project(recapture_lon, recapture_lat, 'forward')
tagname = str(tagid)+'_'+tag['tag_id'][0]
# load result file
result = scipy.io.loadmat('result'+tagname+'.mat',squeeze_me =False,struct_as_record=True)
particles = result['particles']
mpt_idx = result['mpt_idx']
# determine most probable track
mpt_x = particles[:,mpt_idx,0].flatten()
mpt_y = particles[:,mpt_idx,1].flatten()
(mpt_lon, mpt_lat) = my_project(mpt_x, mpt_y, 'reverse')
day_dnum = np.array(range(int(tag['dnum'][0]), int(tag['dnum'][-1])+1))
date = Time(day_dnum-678942,format='mjd',scale='utc').datetime
MPT = pd.DataFrame({'date':date, 'lon':mpt_lon, 'lat':mpt_lat, 'X':mpt_x, 'Y':mpt_y})
MPT['date'] = pd.to_datetime(MPT['date'])
MPT = MPT[['date', 'X', 'Y', 'lat', 'lon']]
MPT.to_csv('mpt_'+tagname+'.csv')
#-- calculate cumulative probability distribution
# construct daily distrubution using kernel density estimation
xmin = particles[:,:,0].min()
xmax = particles[:,:,0].max()
ymin = particles[:,:,1].min()
ymax = particles[:,:,1].max()
X, Y = np.meshgrid(np.linspace(xmin,xmax,50), np.linspace(ymin,ymax,50))
positions = np.vstack([X.ravel(), Y.ravel()])
ndays = len(particles)
udist = np.zeros_like(X)
# for i in range(ndays):
# print("Processing kde for Day "+str(i+1)+"/"+str(ndays)+"...")
# values = particles[i].T
# kernel = scipy.stats.gaussian_kde(values)
# Z = np.reshape(kernel(positions).T, X.shape)
# Z = Z/Z.max()
# udist += Z
print("Processing kde...")
values = np.vstack([particles[:,:,0].flatten(), particles[:,:,1].flatten()])
kernel = scipy.stats.gaussian_kde(values)
udist = np.reshape(kernel(positions).T, X.shape)
udist = udist/udist.max()
scipy.io.savemat('UD_'+tagname+'.mat',{'X':X, 'Y':Y, 'udist':udist})
# create basemap
print('Generating plot...')
latStart = 41.15
latEnd = 43.15
lonStart =-71
lonEnd =-68
map = Basemap(projection='merc', lat_0 = 42, lon_0 = -70,resolution = 'h', area_thresh = 0.1,llcrnrlon=lonStart, llcrnrlat=latStart,
urcrnrlon=lonEnd, urcrnrlat=latEnd)
map.fillcontinents(color = 'green')
#-- plot mpt
mptlon, mptlat = my_project(mpt_x, mpt_y, 'inverse')
mptx, mpty = map(mptlon, mptlat)
map.plot(mptx,mpty,'b-')
#plot release and recapture location
map.plot(mptx[0],mpty[0],'kx',label="Release")
recap_x, recap_y = map(recapture_lon, recapture_lat)
map.plot(recap_x, recap_y,'k^',markeredgecolor='k',label="Reported Recapture")
map.plot(mptx[-1],mpty[-1],'bv',markeredgecolor='b',label="Simulated Recapture")
#-- plot uncertainty distribution
lon_g, lat_g = my_project(X, Y, 'inverse')
map.pcolormesh(lon_g, lat_g,udist,cmap=plt.cm.cubehelix_r,latlon=True,shading='gouraud')
plt.legend(numpoints=1,prop={'size':16},loc='lower right')
plt.title(tagname+' gpu')
plt.savefig('track'+tagname+'_gpu.pdf', dpi=300, bbox_inches='tight')
| mit |
MadsJensen/RP_scripts | epoching_interupt_subject.py | 1 | 1251 | """Epoch a raw data set.
"""
import mne
import sys
import matplotlib
matplotlib.use('Agg')
from my_settings import *
subject = sys.argv[1]
# SETTINGS
tmin, tmax = -4, 1
raw = mne.io.Raw(save_folder + "%s_interupt_filtered_ica_mc_tsss-raw.fif" % (
subject))
events = mne.find_events(raw)
for j in range(len(events)):
if events[:, 2][j] == 1 and events[:, 2][j - 1] == 2:
events[:, 2][j] = 3
event_id = {'press': 1, "cue": 2, "cued_press": 3}
# Setup for reading the raw data
picks = mne.pick_types(raw.info,
meg=True,
eeg=False,
stim=True,
eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw,
events,
event_id,
tmin,
tmax,
picks=picks,
baseline=(None, -3.5),
reject=reject_params,
preload=True)
epochs.drop_bad(reject=reject_params)
fig = epochs.plot_drop_log(subject=subject, show=False)
fig.savefig(epochs_folder + "pics/%s_interupt_drop_log.png" % (subject))
# Save epochs
epochs.save(epochs_folder + "%s_interupt-epo.fif" % (subject))
| bsd-3-clause |
kambysese/mne-python | mne/tests/test_label.py | 8 | 41281 | from itertools import product
import glob
import os
import os.path as op
import pickle
import shutil
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_equal)
import pytest
from mne.datasets import testing
from mne import (read_label, stc_to_label, read_source_estimate,
read_source_spaces, grow_labels, read_labels_from_annot,
write_labels_to_annot, split_label, spatial_tris_adjacency,
read_surface, random_parcellation, morph_labels,
labels_to_stc)
from mne.label import (Label, _blend_colors, label_sign_flip, _load_vert_pos,
select_sources)
from mne.utils import (_TempDir, requires_sklearn, get_subjects_dir,
run_tests_if_main, check_version)
from mne.label import _n_colors, _read_annot, _read_annot_cands
from mne.source_space import SourceSpaces
from mne.source_estimate import mesh_edges
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
src_fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
stc_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-lh.stc')
real_label_fname = op.join(data_path, 'MEG', 'sample', 'labels',
'Aud-lh.label')
real_label_rh_fname = op.join(data_path, 'MEG', 'sample', 'labels',
'Aud-rh.label')
v1_label_fname = op.join(subjects_dir, 'sample', 'label', 'lh.V1.label')
fwd_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
src_bad_fname = op.join(data_path, 'subjects', 'fsaverage', 'bem',
'fsaverage-ico-5-src.fif')
label_dir = op.join(subjects_dir, 'sample', 'label', 'aparc')
test_path = op.join(op.split(__file__)[0], '..', 'io', 'tests', 'data')
label_fname = op.join(test_path, 'test-lh.label')
label_rh_fname = op.join(test_path, 'test-rh.label')
# This code was used to generate the "fake" test labels:
# for hemi in ['lh', 'rh']:
# label = Label(np.unique((np.random.rand(100) * 10242).astype(int)),
# hemi=hemi, comment='Test ' + hemi, subject='fsaverage')
# label.save(op.join(test_path, 'test-%s.label' % hemi))
# XXX : this was added for backward compat and keep the old test_label_in_src
def _stc_to_label(stc, src, smooth, subjects_dir=None):
"""Compute a label from the non-zero sources in an stc object.
Parameters
----------
stc : SourceEstimate
The source estimates.
src : SourceSpaces | str | None
The source space over which the source estimates are defined.
If it's a string it should the subject name (e.g. fsaverage).
Can be None if stc.subject is not None.
smooth : int
Number of smoothing iterations.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
Returns
-------
labels : list of Labels | list of list of Labels
The generated labels. If connected is False, it returns
a list of Labels (one per hemisphere). If no Label is available
in a hemisphere, None is returned. If connected is True,
it returns for each hemisphere a list of connected labels
ordered in decreasing order depending of the maximum value in the stc.
If no Label is available in an hemisphere, an empty list is returned.
"""
src = stc.subject if src is None else src
if isinstance(src, str):
subject = src
else:
subject = stc.subject
if isinstance(src, str):
subjects_dir = get_subjects_dir(subjects_dir)
surf_path_from = op.join(subjects_dir, src, 'surf')
rr_lh, tris_lh = read_surface(op.join(surf_path_from, 'lh.white'))
rr_rh, tris_rh = read_surface(op.join(surf_path_from, 'rh.white'))
rr = [rr_lh, rr_rh]
tris = [tris_lh, tris_rh]
else:
if not isinstance(src, SourceSpaces):
raise TypeError('src must be a string or a set of source spaces')
if len(src) != 2:
raise ValueError('source space should contain the 2 hemispheres')
rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
tris = [src[0]['tris'], src[1]['tris']]
labels = []
cnt = 0
for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
zip(['lh', 'rh'], stc.vertices, tris, rr)):
this_data = stc.data[cnt:cnt + len(this_vertno)]
e = mesh_edges(this_tris)
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
clusters = [this_vertno[np.any(this_data, axis=1)]]
cnt += len(this_vertno)
clusters = [c for c in clusters if len(c) > 0]
if len(clusters) == 0:
this_labels = None
else:
this_labels = []
colors = _n_colors(len(clusters))
for c, color in zip(clusters, colors):
idx_use = c
for k in range(smooth):
e_use = e[:, idx_use]
data1 = e_use * np.ones(len(idx_use))
idx_use = np.where(data1)[0]
label = Label(idx_use, this_rr[idx_use], None, hemi,
'Label from stc', subject=subject,
color=color)
this_labels.append(label)
this_labels = this_labels[0]
labels.append(this_labels)
return labels
def assert_labels_equal(l0, l1, decimal=5, comment=True, color=True):
"""Assert two labels are equal."""
if comment:
assert_equal(l0.comment, l1.comment)
if color:
assert_equal(l0.color, l1.color)
for attr in ['hemi', 'subject']:
attr0 = getattr(l0, attr)
attr1 = getattr(l1, attr)
msg = "label.%s: %r != %r" % (attr, attr0, attr1)
assert_equal(attr0, attr1, msg)
for attr in ['vertices', 'pos', 'values']:
a0 = getattr(l0, attr)
a1 = getattr(l1, attr)
assert_array_almost_equal(a0, a1, decimal)
def test_copy():
"""Test label copying."""
label = read_label(label_fname)
label_2 = label.copy()
label_2.pos += 1
assert_array_equal(label.pos, label_2.pos - 1)
def test_label_subject():
"""Test label subject name extraction."""
label = read_label(label_fname)
assert label.subject is None
assert ('unknown' in repr(label))
label = read_label(label_fname, subject='fsaverage')
assert (label.subject == 'fsaverage')
assert ('fsaverage' in repr(label))
def test_label_addition():
"""Test label addition."""
pos = np.random.RandomState(0).rand(10, 3)
values = np.arange(10.) / 10
idx0 = list(range(7))
idx1 = list(range(7, 10)) # non-overlapping
idx2 = list(range(5, 10)) # overlapping
l0 = Label(idx0, pos[idx0], values[idx0], 'lh', color='red')
l1 = Label(idx1, pos[idx1], values[idx1], 'lh')
l2 = Label(idx2, pos[idx2], values[idx2], 'lh', color=(0, 1, 0, .5))
assert_equal(len(l0), len(idx0))
l_good = l0.copy()
l_good.subject = 'sample'
l_bad = l1.copy()
l_bad.subject = 'foo'
pytest.raises(ValueError, l_good.__add__, l_bad)
pytest.raises(TypeError, l_good.__add__, 'foo')
pytest.raises(ValueError, l_good.__sub__, l_bad)
pytest.raises(TypeError, l_good.__sub__, 'foo')
# adding non-overlapping labels
l01 = l0 + l1
assert_equal(len(l01), len(l0) + len(l1))
assert_array_equal(l01.values[:len(l0)], l0.values)
assert_equal(l01.color, l0.color)
# subtraction
assert_labels_equal(l01 - l0, l1, comment=False, color=False)
assert_labels_equal(l01 - l1, l0, comment=False, color=False)
# adding overlapping labels
l02 = l0 + l2
i0 = np.where(l0.vertices == 6)[0][0]
i2 = np.where(l2.vertices == 6)[0][0]
i = np.where(l02.vertices == 6)[0][0]
assert_equal(l02.values[i], l0.values[i0] + l2.values[i2])
assert_equal(l02.values[0], l0.values[0])
assert_array_equal(np.unique(l02.vertices), np.unique(idx0 + idx2))
assert_equal(l02.color, _blend_colors(l0.color, l2.color))
# adding lh and rh
l2.hemi = 'rh'
bhl = l0 + l2
assert_equal(bhl.hemi, 'both')
assert_equal(len(bhl), len(l0) + len(l2))
assert_equal(bhl.color, l02.color)
assert ('BiHemiLabel' in repr(bhl))
# subtraction
assert_labels_equal(bhl - l0, l2)
assert_labels_equal(bhl - l2, l0)
bhl2 = l1 + bhl
assert_labels_equal(bhl2.lh, l01)
assert_equal(bhl2.color, _blend_colors(l1.color, bhl.color))
assert_array_equal((l2 + bhl).rh.vertices, bhl.rh.vertices) # rh label
assert_array_equal((bhl + bhl).lh.vertices, bhl.lh.vertices)
pytest.raises(TypeError, bhl.__add__, 5)
# subtraction
bhl_ = bhl2 - l1
assert_labels_equal(bhl_.lh, bhl.lh, comment=False, color=False)
assert_labels_equal(bhl_.rh, bhl.rh)
assert_labels_equal(bhl2 - l2, l0 + l1)
assert_labels_equal(bhl2 - l1 - l0, l2)
bhl_ = bhl2 - bhl2
assert_array_equal(bhl_.vertices, [])
@testing.requires_testing_data
@pytest.mark.parametrize('fname', (real_label_fname, v1_label_fname))
def test_label_fill_restrict(fname):
"""Test label in fill and restrict."""
src = read_source_spaces(src_fname)
label = read_label(fname)
# construct label from source space vertices
label_src = label.restrict(src)
vert_in_src = label_src.vertices
values_in_src = label_src.values
if check_version('scipy', '1.3') and fname == real_label_fname:
# Check that we can auto-fill patch info quickly for one condition
for s in src:
s['nearest'] = None
with pytest.warns(None):
label_src = label_src.fill(src)
else:
label_src = label_src.fill(src)
assert src[0]['nearest'] is not None
# check label vertices
vertices_status = np.in1d(src[0]['nearest'], label.vertices)
vertices_in = np.nonzero(vertices_status)[0]
vertices_out = np.nonzero(np.logical_not(vertices_status))[0]
assert_array_equal(label_src.vertices, vertices_in)
assert_array_equal(np.in1d(vertices_out, label_src.vertices), False)
# check values
value_idx = np.digitize(src[0]['nearest'][vertices_in], vert_in_src, True)
assert_array_equal(label_src.values, values_in_src[value_idx])
# test exception
vertices = np.append([-1], vert_in_src)
with pytest.raises(ValueError, match='does not contain all of the label'):
Label(vertices, hemi='lh').fill(src)
# test filling empty label
label = Label([], hemi='lh')
label.fill(src)
assert_array_equal(label.vertices, np.array([], int))
@testing.requires_testing_data
def test_label_io_and_time_course_estimates():
"""Test IO for label + stc files."""
stc = read_source_estimate(stc_fname)
label = read_label(real_label_fname)
stc_label = stc.in_label(label)
assert (len(stc_label.times) == stc_label.data.shape[1])
assert (len(stc_label.vertices[0]) == stc_label.data.shape[0])
@testing.requires_testing_data
def test_label_io():
"""Test IO of label files."""
tempdir = _TempDir()
label = read_label(label_fname)
# label attributes
assert_equal(label.name, 'test-lh')
assert label.subject is None
assert label.color is None
# save and reload
label.save(op.join(tempdir, 'foo'))
label2 = read_label(op.join(tempdir, 'foo-lh.label'))
assert_labels_equal(label, label2)
# pickling
dest = op.join(tempdir, 'foo.pickled')
with open(dest, 'wb') as fid:
pickle.dump(label, fid, pickle.HIGHEST_PROTOCOL)
with open(dest, 'rb') as fid:
label2 = pickle.load(fid)
assert_labels_equal(label, label2)
def _assert_labels_equal(labels_a, labels_b, ignore_pos=False):
"""Ensure two sets of labels are equal."""
for label_a, label_b in zip(labels_a, labels_b):
assert_array_equal(label_a.vertices, label_b.vertices)
assert (label_a.name == label_b.name)
assert (label_a.hemi == label_b.hemi)
if not ignore_pos:
assert_array_equal(label_a.pos, label_b.pos)
@testing.requires_testing_data
def test_annot_io():
"""Test I/O from and to *.annot files."""
# copy necessary files from fsaverage to tempdir
tempdir = _TempDir()
subject = 'fsaverage'
label_src = os.path.join(subjects_dir, 'fsaverage', 'label')
surf_src = os.path.join(subjects_dir, 'fsaverage', 'surf')
label_dir = os.path.join(tempdir, subject, 'label')
surf_dir = os.path.join(tempdir, subject, 'surf')
os.makedirs(label_dir)
os.mkdir(surf_dir)
shutil.copy(os.path.join(label_src, 'lh.PALS_B12_Lobes.annot'), label_dir)
shutil.copy(os.path.join(label_src, 'rh.PALS_B12_Lobes.annot'), label_dir)
shutil.copy(os.path.join(surf_src, 'lh.white'), surf_dir)
shutil.copy(os.path.join(surf_src, 'rh.white'), surf_dir)
# read original labels
with pytest.raises(IOError, match='\nPALS_B12_Lobes$'):
read_labels_from_annot(subject, 'PALS_B12_Lobesey',
subjects_dir=tempdir)
labels = read_labels_from_annot(subject, 'PALS_B12_Lobes',
subjects_dir=tempdir)
# test saving parcellation only covering one hemisphere
parc = [label for label in labels if label.name == 'LOBE.TEMPORAL-lh']
write_labels_to_annot(parc, subject, 'myparc', subjects_dir=tempdir)
parc1 = read_labels_from_annot(subject, 'myparc', subjects_dir=tempdir)
parc1 = [label for label in parc1 if not label.name.startswith('unknown')]
assert_equal(len(parc1), len(parc))
for lt, rt in zip(parc1, parc):
assert_labels_equal(lt, rt)
# test saving only one hemisphere
parc = [label for label in labels if label.name.startswith('LOBE')]
write_labels_to_annot(parc, subject, 'myparc2', hemi='lh',
subjects_dir=tempdir)
annot_fname = os.path.join(tempdir, subject, 'label', '%sh.myparc2.annot')
assert os.path.isfile(annot_fname % 'l')
assert not os.path.isfile(annot_fname % 'r')
parc1 = read_labels_from_annot(subject, 'myparc2',
annot_fname=annot_fname % 'l',
subjects_dir=tempdir)
parc_lh = [label for label in parc if label.name.endswith('lh')]
for lt, rt in zip(parc1, parc_lh):
assert_labels_equal(lt, rt)
# test that the annotation is complete (test Label() support)
rr = read_surface(op.join(surf_dir, 'lh.white'))[0]
label = sum(labels, Label(hemi='lh', subject='fsaverage')).lh
assert_array_equal(label.vertices, np.arange(len(rr)))
@testing.requires_testing_data
def test_morph_labels():
"""Test morph_labels."""
# Just process the first 5 labels for speed
parc_fsaverage = read_labels_from_annot(
'fsaverage', 'aparc', subjects_dir=subjects_dir)[:5]
parc_sample = read_labels_from_annot(
'sample', 'aparc', subjects_dir=subjects_dir)[:5]
parc_fssamp = morph_labels(
parc_fsaverage, 'sample', subjects_dir=subjects_dir)
for lf, ls, lfs in zip(parc_fsaverage, parc_sample, parc_fssamp):
assert lf.hemi == ls.hemi == lfs.hemi
assert lf.name == ls.name == lfs.name
perc_1 = np.in1d(lfs.vertices, ls.vertices).mean() * 100
perc_2 = np.in1d(ls.vertices, lfs.vertices).mean() * 100
# Ideally this would be 100%, but we do not use the same algorithm
# as FreeSurfer ...
assert perc_1 > 92
assert perc_2 > 88
with pytest.raises(ValueError, match='wrong and fsaverage'):
morph_labels(parc_fsaverage, 'sample', subjects_dir=subjects_dir,
subject_from='wrong')
with pytest.raises(RuntimeError, match='Number of surface vertices'):
_load_vert_pos('sample', subjects_dir, 'white', 'lh', 1)
for label in parc_fsaverage:
label.subject = None
with pytest.raises(ValueError, match='subject_from must be provided'):
morph_labels(parc_fsaverage, 'sample', subjects_dir=subjects_dir)
@testing.requires_testing_data
def test_labels_to_stc():
"""Test labels_to_stc."""
labels = read_labels_from_annot(
'sample', 'aparc', subjects_dir=subjects_dir)
values = np.random.RandomState(0).randn(len(labels))
with pytest.raises(ValueError, match='1 or 2 dim'):
labels_to_stc(labels, values[:, np.newaxis, np.newaxis])
with pytest.raises(ValueError, match=r'values\.shape'):
labels_to_stc(labels, values[np.newaxis])
with pytest.raises(ValueError, match='multiple values of subject'):
labels_to_stc(labels, values, subject='foo')
stc = labels_to_stc(labels, values)
assert stc.subject == 'sample'
for value, label in zip(values, labels):
stc_label = stc.in_label(label)
assert (stc_label.data == value).all()
stc = read_source_estimate(stc_fname, 'sample')
@testing.requires_testing_data
def test_read_labels_from_annot(tmpdir):
"""Test reading labels from FreeSurfer parcellation."""
# test some invalid inputs
pytest.raises(ValueError, read_labels_from_annot, 'sample', hemi='bla',
subjects_dir=subjects_dir)
pytest.raises(ValueError, read_labels_from_annot, 'sample',
annot_fname='bla.annot', subjects_dir=subjects_dir)
with pytest.raises(IOError, match='does not exist'):
_read_annot_cands('foo')
with pytest.raises(IOError, match='no candidate'):
_read_annot(str(tmpdir))
# read labels using hemi specification
labels_lh = read_labels_from_annot('sample', hemi='lh',
subjects_dir=subjects_dir)
for label in labels_lh:
assert label.name.endswith('-lh')
assert label.hemi == 'lh'
assert label.color is not None
# read labels using annot_fname
annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
labels_rh = read_labels_from_annot('sample', annot_fname=annot_fname,
subjects_dir=subjects_dir)
for label in labels_rh:
assert label.name.endswith('-rh')
assert label.hemi == 'rh'
assert label.color is not None
# combine the lh, rh, labels and sort them
labels_lhrh = list()
labels_lhrh.extend(labels_lh)
labels_lhrh.extend(labels_rh)
names = [label.name for label in labels_lhrh]
labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]
# read all labels at once
labels_both = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# we have the same result
_assert_labels_equal(labels_lhrh, labels_both)
# aparc has 68 cortical labels
assert (len(labels_both) == 68)
# test regexp
label = read_labels_from_annot('sample', parc='aparc.a2009s',
regexp='Angu', subjects_dir=subjects_dir)[0]
assert (label.name == 'G_pariet_inf-Angular-lh')
# silly, but real regexp:
label = read_labels_from_annot('sample', 'aparc.a2009s',
regexp='.*-.{4,}_.{3,3}-L',
subjects_dir=subjects_dir)[0]
assert (label.name == 'G_oc-temp_med-Lingual-lh')
pytest.raises(RuntimeError, read_labels_from_annot, 'sample', parc='aparc',
annot_fname=annot_fname, regexp='JackTheRipper',
subjects_dir=subjects_dir)
@testing.requires_testing_data
def test_read_labels_from_annot_annot2labels():
"""Test reading labels from parc. by comparing with mne_annot2labels."""
label_fnames = glob.glob(label_dir + '/*.label')
label_fnames.sort()
labels_mne = [read_label(fname) for fname in label_fnames]
labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# we have the same result, mne does not fill pos, so ignore it
_assert_labels_equal(labels, labels_mne, ignore_pos=True)
@testing.requires_testing_data
def test_write_labels_to_annot():
"""Test writing FreeSurfer parcellation from labels."""
tempdir = _TempDir()
labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# create temporary subjects-dir skeleton
surf_dir = op.join(subjects_dir, 'sample', 'surf')
temp_surf_dir = op.join(tempdir, 'sample', 'surf')
os.makedirs(temp_surf_dir)
shutil.copy(op.join(surf_dir, 'lh.white'), temp_surf_dir)
shutil.copy(op.join(surf_dir, 'rh.white'), temp_surf_dir)
os.makedirs(op.join(tempdir, 'sample', 'label'))
# test automatic filenames
dst = op.join(tempdir, 'sample', 'label', '%s.%s.annot')
write_labels_to_annot(labels, 'sample', 'test1', subjects_dir=tempdir)
assert (op.exists(dst % ('lh', 'test1')))
assert (op.exists(dst % ('rh', 'test1')))
# lh only
for label in labels:
if label.hemi == 'lh':
break
write_labels_to_annot([label], 'sample', 'test2', subjects_dir=tempdir)
assert (op.exists(dst % ('lh', 'test2')))
assert (op.exists(dst % ('rh', 'test2')))
# rh only
for label in labels:
if label.hemi == 'rh':
break
write_labels_to_annot([label], 'sample', 'test3', subjects_dir=tempdir)
assert (op.exists(dst % ('lh', 'test3')))
assert (op.exists(dst % ('rh', 'test3')))
# label alone
pytest.raises(TypeError, write_labels_to_annot, labels[0], 'sample',
'test4', subjects_dir=tempdir)
# write left and right hemi labels with filenames:
fnames = [op.join(tempdir, hemi + '-myparc') for hemi in ['lh', 'rh']]
for fname in fnames:
with pytest.warns(RuntimeWarning, match='subjects_dir'):
write_labels_to_annot(labels, annot_fname=fname)
# read it back
labels2 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[0])
labels22 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[1])
labels2.extend(labels22)
names = [label.name for label in labels2]
for label in labels:
idx = names.index(label.name)
assert_labels_equal(label, labels2[idx])
# same with label-internal colors
for fname in fnames:
write_labels_to_annot(labels, 'sample', annot_fname=fname,
overwrite=True, subjects_dir=subjects_dir)
labels3 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[0])
labels33 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[1])
labels3.extend(labels33)
names3 = [label.name for label in labels3]
for label in labels:
idx = names3.index(label.name)
assert_labels_equal(label, labels3[idx])
# make sure we can't overwrite things
pytest.raises(ValueError, write_labels_to_annot, labels, 'sample',
annot_fname=fnames[0], subjects_dir=subjects_dir)
# however, this works
write_labels_to_annot(labels, 'sample', annot_fname=fnames[0],
overwrite=True, subjects_dir=subjects_dir)
# label without color
labels_ = labels[:]
labels_[0] = labels_[0].copy()
labels_[0].color = None
write_labels_to_annot(labels_, 'sample', annot_fname=fnames[0],
overwrite=True, subjects_dir=subjects_dir)
# duplicate color
labels_[0].color = labels_[2].color
pytest.raises(ValueError, write_labels_to_annot, labels_, 'sample',
annot_fname=fnames[0], overwrite=True,
subjects_dir=subjects_dir)
# invalid color inputs
labels_[0].color = (1.1, 1., 1., 1.)
pytest.raises(ValueError, write_labels_to_annot, labels_, 'sample',
annot_fname=fnames[0], overwrite=True,
subjects_dir=subjects_dir)
# overlapping labels
labels_ = labels[:]
cuneus_lh = labels[6]
precuneus_lh = labels[50]
labels_.append(precuneus_lh + cuneus_lh)
pytest.raises(ValueError, write_labels_to_annot, labels_, 'sample',
annot_fname=fnames[0], overwrite=True,
subjects_dir=subjects_dir)
# unlabeled vertices
labels_lh = [label for label in labels if label.name.endswith('lh')]
write_labels_to_annot(labels_lh[1:], 'sample', annot_fname=fnames[0],
overwrite=True, subjects_dir=subjects_dir)
labels_reloaded = read_labels_from_annot('sample', annot_fname=fnames[0],
subjects_dir=subjects_dir)
assert_equal(len(labels_lh), len(labels_reloaded))
label0 = labels_lh[0]
label1 = labels_reloaded[-1]
assert_equal(label1.name, "unknown-lh")
assert (np.all(np.in1d(label0.vertices, label1.vertices)))
# unnamed labels
labels4 = labels[:]
labels4[0].name = None
pytest.raises(ValueError, write_labels_to_annot, labels4,
annot_fname=fnames[0])
@requires_sklearn
@testing.requires_testing_data
def test_split_label():
"""Test splitting labels."""
aparc = read_labels_from_annot('fsaverage', 'aparc', 'lh',
regexp='lingual', subjects_dir=subjects_dir)
lingual = aparc[0]
# Test input error
pytest.raises(ValueError, lingual.split, 'bad_input_string')
# split with names
parts = ('lingual_post', 'lingual_ant')
post, ant = split_label(lingual, parts, subjects_dir=subjects_dir)
# check output names
assert_equal(post.name, parts[0])
assert_equal(ant.name, parts[1])
# check vertices add up
lingual_reconst = post + ant
lingual_reconst.name = lingual.name
lingual_reconst.comment = lingual.comment
lingual_reconst.color = lingual.color
assert_labels_equal(lingual_reconst, lingual)
# compare output of Label.split() method
post1, ant1 = lingual.split(parts, subjects_dir=subjects_dir)
assert_labels_equal(post1, post)
assert_labels_equal(ant1, ant)
# compare fs_like split with freesurfer split
antmost = split_label(lingual, 40, None, subjects_dir, True)[-1]
fs_vert = [210, 4401, 7405, 12079, 16276, 18956, 26356, 32713, 32716,
32719, 36047, 36050, 42797, 42798, 42799, 59281, 59282, 59283,
71864, 71865, 71866, 71874, 71883, 79901, 79903, 79910, 103024,
107849, 107850, 122928, 139356, 139357, 139373, 139374, 139375,
139376, 139377, 139378, 139381, 149117, 149118, 149120, 149127]
assert_array_equal(antmost.vertices, fs_vert)
# check default label name
assert_equal(antmost.name, "lingual_div40-lh")
# Apply contiguous splitting to DMN label from parcellation in Yeo, 2011
label_default_mode = read_label(op.join(subjects_dir, 'fsaverage', 'label',
'lh.7Networks_7.label'))
DMN_sublabels = label_default_mode.split(parts='contiguous',
subject='fsaverage',
subjects_dir=subjects_dir)
assert_equal([len(label.vertices) for label in DMN_sublabels],
[16181, 7022, 5965, 5300, 823] + [1] * 23)
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_sklearn
def test_stc_to_label():
"""Test stc_to_label."""
src = read_source_spaces(fwd_fname)
src_bad = read_source_spaces(src_bad_fname)
stc = read_source_estimate(stc_fname, 'sample')
os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
labels1 = _stc_to_label(stc, src='sample', smooth=3)
labels2 = _stc_to_label(stc, src=src, smooth=3)
assert_equal(len(labels1), len(labels2))
for l1, l2 in zip(labels1, labels2):
assert_labels_equal(l1, l2, decimal=4)
with pytest.warns(RuntimeWarning, match='have holes'):
labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=True,
connected=True)
pytest.raises(ValueError, stc_to_label, stc, 'sample', smooth=True,
connected=True)
pytest.raises(RuntimeError, stc_to_label, stc, smooth=True, src=src_bad,
connected=True)
assert_equal(len(labels_lh), 1)
assert_equal(len(labels_rh), 1)
# test getting tris
tris = labels_lh[0].get_tris(src[0]['use_tris'], vertices=stc.vertices[0])
pytest.raises(ValueError, spatial_tris_adjacency, tris,
remap_vertices=False)
adjacency = spatial_tris_adjacency(tris, remap_vertices=True)
assert (adjacency.shape[0] == len(stc.vertices[0]))
# "src" as a subject name
pytest.raises(TypeError, stc_to_label, stc, src=1, smooth=False,
connected=False, subjects_dir=subjects_dir)
pytest.raises(ValueError, stc_to_label, stc, src=SourceSpaces([src[0]]),
smooth=False, connected=False, subjects_dir=subjects_dir)
pytest.raises(ValueError, stc_to_label, stc, src='sample', smooth=False,
connected=True, subjects_dir=subjects_dir)
pytest.raises(ValueError, stc_to_label, stc, src='sample', smooth=True,
connected=False, subjects_dir=subjects_dir)
labels_lh, labels_rh = stc_to_label(stc, src='sample', smooth=False,
connected=False,
subjects_dir=subjects_dir)
assert (len(labels_lh) > 1)
assert (len(labels_rh) > 1)
# with smooth='patch'
with pytest.warns(RuntimeWarning, match='have holes'):
labels_patch = stc_to_label(stc, src=src, smooth=True)
assert len(labels_patch) == len(labels1)
for l1, l2 in zip(labels1, labels2):
assert_labels_equal(l1, l2, decimal=4)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_morph():
"""Test inter-subject label morphing."""
label_orig = read_label(real_label_fname)
label_orig.subject = 'sample'
# should work for specifying vertices for both hemis, or just the
# hemi of the given label
vals = list()
for grade in [5, [np.arange(10242), np.arange(10242)], np.arange(10242)]:
label = label_orig.copy()
# this should throw an error because the label has all zero values
pytest.raises(ValueError, label.morph, 'sample', 'fsaverage')
label.values.fill(1)
label = label.morph(None, 'fsaverage', 5, grade, subjects_dir, 1)
label = label.morph('fsaverage', 'sample', 5, None, subjects_dir, 2)
assert (np.in1d(label_orig.vertices, label.vertices).all())
assert (len(label.vertices) < 3 * len(label_orig.vertices))
vals.append(label.vertices)
assert_array_equal(vals[0], vals[1])
# make sure label smoothing can run
assert_equal(label.subject, 'sample')
verts = [np.arange(10242), np.arange(10242)]
for hemi in ['lh', 'rh']:
label.hemi = hemi
with pytest.warns(None): # morph map maybe missing
label.morph(None, 'fsaverage', 5, verts, subjects_dir, 2)
pytest.raises(TypeError, label.morph, None, 1, 5, verts,
subjects_dir, 2)
pytest.raises(TypeError, label.morph, None, 'fsaverage', 5.5, verts,
subjects_dir, 2)
with pytest.warns(None): # morph map maybe missing
label.smooth(subjects_dir=subjects_dir) # make sure this runs
@testing.requires_testing_data
def test_grow_labels():
"""Test generation of circular source labels."""
seeds = [0, 50000]
# these were chosen manually in mne_analyze
should_be_in = [[49, 227], [51207, 48794]]
hemis = [0, 1]
names = ['aneurism', 'tumor']
labels = grow_labels('sample', seeds, 3, hemis, subjects_dir, names=names)
tgt_names = ['aneurism-lh', 'tumor-rh']
tgt_hemis = ['lh', 'rh']
for label, seed, hemi, sh, name in zip(labels, seeds, tgt_hemis,
should_be_in, tgt_names):
assert (np.any(label.vertices == seed))
assert (np.all(np.in1d(sh, label.vertices)))
assert_equal(label.hemi, hemi)
assert_equal(label.name, name)
# grow labels with and without overlap
seeds = [57532, [58887, 6304]]
l01, l02 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir)
seeds = [57532, [58887, 6304]]
l11, l12 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir,
overlap=False)
# test label naming
assert_equal(l01.name, 'Label_0-lh')
assert_equal(l02.name, 'Label_1-lh')
assert_equal(l11.name, 'Label_0-lh')
assert_equal(l12.name, 'Label_1-lh')
# test color assignment
l11_c, l12_c = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir,
overlap=False, colors=None)
assert_equal(l11_c.color, _n_colors(2)[0])
assert_equal(l12_c.color, _n_colors(2)[1])
lab_colors = np.array([[0, 0, 1, 1], [1, 0, 0, 1]])
l11_c, l12_c = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir,
overlap=False, colors=lab_colors)
assert_array_equal(l11_c.color, lab_colors[0, :])
assert_array_equal(l12_c.color, lab_colors[1, :])
lab_colors = np.array([.1, .2, .3, .9])
l11_c, l12_c = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir,
overlap=False, colors=lab_colors)
assert_array_equal(l11_c.color, lab_colors)
assert_array_equal(l12_c.color, lab_colors)
# make sure set 1 does not overlap
overlap = np.intersect1d(l11.vertices, l12.vertices, True)
assert_array_equal(overlap, [])
# make sure both sets cover the same vertices
l0 = l01 + l02
l1 = l11 + l12
assert_array_equal(l1.vertices, l0.vertices)
@testing.requires_testing_data
def test_random_parcellation():
"""Test generation of random cortical parcellation."""
hemi = 'both'
n_parcel = 50
surface = 'sphere.reg'
subject = 'sample_ds'
rng = np.random.RandomState(0)
# Parcellation
labels = random_parcellation(subject, n_parcel, hemi, subjects_dir,
surface=surface, random_state=rng)
# test number of labels
assert_equal(len(labels), n_parcel)
if hemi == 'both':
hemi = ['lh', 'rh']
hemis = np.atleast_1d(hemi)
for hemi in set(hemis):
vertices_total = []
for label in labels:
if label.hemi == hemi:
# test that labels are not empty
assert (len(label.vertices) > 0)
# vertices of hemi covered by labels
vertices_total = np.append(vertices_total, label.vertices)
# test that labels don't intersect
assert_equal(len(np.unique(vertices_total)), len(vertices_total))
surf_fname = op.join(subjects_dir, subject, 'surf', hemi + '.' +
surface)
vert, _ = read_surface(surf_fname)
# Test that labels cover whole surface
assert_array_equal(np.sort(vertices_total), np.arange(len(vert)))
@testing.requires_testing_data
def test_label_sign_flip():
"""Test label sign flip computation."""
src = read_source_spaces(src_fname)
label = Label(vertices=src[0]['vertno'][:5], hemi='lh')
src[0]['nn'][label.vertices] = np.array(
[[1., 0., 0.],
[0., 1., 0.],
[0, 0, 1.],
[1. / np.sqrt(2), 1. / np.sqrt(2), 0.],
[1. / np.sqrt(2), 1. / np.sqrt(2), 0.]])
known_flips = np.array([1, 1, np.nan, 1, 1])
idx = [0, 1, 3, 4] # indices that are usable (third row is orthognoal)
flip = label_sign_flip(label, src)
assert_array_almost_equal(np.dot(flip[idx], known_flips[idx]), len(idx))
bi_label = label + Label(vertices=src[1]['vertno'][:5], hemi='rh')
src[1]['nn'][src[1]['vertno'][:5]] = -src[0]['nn'][label.vertices]
flip = label_sign_flip(bi_label, src)
known_flips = np.array([1, 1, np.nan, 1, 1, 1, 1, np.nan, 1, 1])
idx = [0, 1, 3, 4, 5, 6, 8, 9]
assert_array_almost_equal(np.dot(flip[idx], known_flips[idx]), 0.)
src[1]['nn'][src[1]['vertno'][:5]] *= -1
flip = label_sign_flip(bi_label, src)
assert_array_almost_equal(np.dot(flip[idx], known_flips[idx]), len(idx))
@testing.requires_testing_data
def test_label_center_of_mass():
"""Test computing the center of mass of a label."""
stc = read_source_estimate(stc_fname)
stc.lh_data[:] = 0
vertex_stc = stc.center_of_mass('sample', subjects_dir=subjects_dir)[0]
assert_equal(vertex_stc, 124791)
label = Label(stc.vertices[1], pos=None, values=stc.rh_data.mean(axis=1),
hemi='rh', subject='sample')
vertex_label = label.center_of_mass(subjects_dir=subjects_dir)
assert_equal(vertex_label, vertex_stc)
labels = read_labels_from_annot('sample', parc='aparc.a2009s',
subjects_dir=subjects_dir)
src = read_source_spaces(src_fname)
# Try a couple of random ones, one from left and one from right
# Visually verified in about the right place using mne_analyze
for label, expected in zip([labels[2], labels[3], labels[-5]],
[141162, 145221, 55979]):
label.values[:] = -1
pytest.raises(ValueError, label.center_of_mass,
subjects_dir=subjects_dir)
label.values[:] = 0
pytest.raises(ValueError, label.center_of_mass,
subjects_dir=subjects_dir)
label.values[:] = 1
assert_equal(label.center_of_mass(subjects_dir=subjects_dir), expected)
assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
restrict_vertices=label.vertices),
expected)
# restrict to source space
idx = 0 if label.hemi == 'lh' else 1
# this simple nearest version is not equivalent, but is probably
# close enough for many labels (including the test ones):
pos = label.pos[np.where(label.vertices == expected)[0][0]]
pos = (src[idx]['rr'][src[idx]['vertno']] - pos)
pos = np.argmin(np.sum(pos * pos, axis=1))
src_expected = src[idx]['vertno'][pos]
# see if we actually get the same one
src_restrict = np.intersect1d(label.vertices, src[idx]['vertno'])
assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
restrict_vertices=src_restrict),
src_expected)
assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
restrict_vertices=src),
src_expected)
# degenerate cases
pytest.raises(ValueError, label.center_of_mass, subjects_dir=subjects_dir,
restrict_vertices='foo')
pytest.raises(TypeError, label.center_of_mass, subjects_dir=subjects_dir,
surf=1)
pytest.raises(IOError, label.center_of_mass, subjects_dir=subjects_dir,
surf='foo')
run_tests_if_main()
@testing.requires_testing_data
def test_select_sources():
"""Test the selection of sources for simulation."""
subject = 'sample'
label_file = op.join(subjects_dir, subject, 'label', 'aparc',
'temporalpole-rh.label')
# Regardless of other parameters, using extent 0 should always yield a
# a single source.
tp_label = read_label(label_file)
tp_label.values[:] = 1
labels = ['lh', tp_label]
locations = ['random', 'center']
for label, location in product(labels, locations):
label = select_sources(
subject, label, location, extent=0, subjects_dir=subjects_dir)
assert (len(label.vertices) == 1)
# As we increase the extent, the new region should contain the previous
# one.
label = select_sources(subject, 'lh', 0, extent=0,
subjects_dir=subjects_dir)
for extent in range(1, 3):
new_label = select_sources(subject, 'lh', 0, extent=extent * 2,
subjects_dir=subjects_dir)
assert (set(new_label.vertices) > set(label.vertices))
assert (new_label.hemi == 'lh')
label = new_label
# With a large enough extent and not allowing growing outside the label,
# every vertex of the label should be in the region.
label = select_sources(subject, tp_label, 0, extent=30,
grow_outside=False, subjects_dir=subjects_dir)
assert (set(label.vertices) == set(tp_label.vertices))
# Without this restriction, we should get new vertices.
label = select_sources(subject, tp_label, 0, extent=30,
grow_outside=True, subjects_dir=subjects_dir)
assert (set(label.vertices) > set(tp_label.vertices))
# Other parameters are taken into account.
label = select_sources(subject, tp_label, 0, extent=10,
grow_outside=False, subjects_dir=subjects_dir,
name='mne')
assert (label.name == 'mne')
assert (label.hemi == 'rh')
| bsd-3-clause |
ywcui1990/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_cairo.py | 69 | 16706 | """
A Cairo backend for matplotlib
Author: Steve Chaplin
Cairo is a vector graphics library with cross-device output support.
Features of Cairo:
* anti-aliasing
* alpha channel
* saves image files as PNG, PostScript, PDF
http://cairographics.org
Requires (in order, all available from Cairo website):
cairo, pycairo
Naming Conventions
* classes MixedUpperCase
* varables lowerUpper
* functions underscore_separated
"""
from __future__ import division
import os, sys, warnings, gzip
import numpy as npy
def _fn_name(): return sys._getframe(1).f_code.co_name
try:
import cairo
except ImportError:
raise ImportError("Cairo backend requires that pycairo is installed.")
_version_required = (1,2,0)
if cairo.version_info < _version_required:
raise ImportError ("Pycairo %d.%d.%d is installed\n"
"Pycairo %d.%d.%d or later is required"
% (cairo.version_info + _version_required))
backend_version = cairo.version
del _version_required
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox, Affine2D
from matplotlib.font_manager import ttfFontProperty
from matplotlib import rcParams
_debug = False
#_debug = True
# Image::color_conv(format) for draw_image()
if sys.byteorder == 'little':
BYTE_FORMAT = 0 # BGRA
else:
BYTE_FORMAT = 1 # ARGB
class RendererCairo(RendererBase):
fontweights = {
100 : cairo.FONT_WEIGHT_NORMAL,
200 : cairo.FONT_WEIGHT_NORMAL,
300 : cairo.FONT_WEIGHT_NORMAL,
400 : cairo.FONT_WEIGHT_NORMAL,
500 : cairo.FONT_WEIGHT_NORMAL,
600 : cairo.FONT_WEIGHT_BOLD,
700 : cairo.FONT_WEIGHT_BOLD,
800 : cairo.FONT_WEIGHT_BOLD,
900 : cairo.FONT_WEIGHT_BOLD,
'ultralight' : cairo.FONT_WEIGHT_NORMAL,
'light' : cairo.FONT_WEIGHT_NORMAL,
'normal' : cairo.FONT_WEIGHT_NORMAL,
'medium' : cairo.FONT_WEIGHT_NORMAL,
'semibold' : cairo.FONT_WEIGHT_BOLD,
'bold' : cairo.FONT_WEIGHT_BOLD,
'heavy' : cairo.FONT_WEIGHT_BOLD,
'ultrabold' : cairo.FONT_WEIGHT_BOLD,
'black' : cairo.FONT_WEIGHT_BOLD,
}
fontangles = {
'italic' : cairo.FONT_SLANT_ITALIC,
'normal' : cairo.FONT_SLANT_NORMAL,
'oblique' : cairo.FONT_SLANT_OBLIQUE,
}
def __init__(self, dpi):
"""
"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self.dpi = dpi
self.text_ctx = cairo.Context (
cairo.ImageSurface (cairo.FORMAT_ARGB32,1,1))
self.mathtext_parser = MathTextParser('Cairo')
def set_ctx_from_surface (self, surface):
self.ctx = cairo.Context (surface)
self.ctx.save() # restore, save - when call new_gc()
def set_width_height(self, width, height):
self.width = width
self.height = height
self.matrix_flipy = cairo.Matrix (yy=-1, y0=self.height)
# use matrix_flipy for ALL rendering?
# - problem with text? - will need to switch matrix_flipy off, or do a
# font transform?
def _fill_and_stroke (self, ctx, fill_c, alpha):
if fill_c is not None:
ctx.save()
if len(fill_c) == 3:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], alpha)
else:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], alpha*fill_c[3])
ctx.fill_preserve()
ctx.restore()
ctx.stroke()
#@staticmethod
def convert_path(ctx, tpath):
for points, code in tpath.iter_segments():
if code == Path.MOVETO:
ctx.move_to(*points)
elif code == Path.LINETO:
ctx.line_to(*points)
elif code == Path.CURVE3:
ctx.curve_to(points[0], points[1],
points[0], points[1],
points[2], points[3])
elif code == Path.CURVE4:
ctx.curve_to(*points)
elif code == Path.CLOSEPOLY:
ctx.close_path()
convert_path = staticmethod(convert_path)
def draw_path(self, gc, path, transform, rgbFace=None):
if len(path.vertices) > 18980:
raise ValueError("The Cairo backend can not draw paths longer than 18980 points.")
ctx = gc.ctx
transform = transform + \
Affine2D().scale(1.0, -1.0).translate(0, self.height)
tpath = transform.transform_path(path)
ctx.new_path()
self.convert_path(ctx, tpath)
self._fill_and_stroke(ctx, rgbFace, gc.get_alpha())
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
# bbox - not currently used
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
im.flipud_out()
rows, cols, buf = im.color_conv (BYTE_FORMAT)
surface = cairo.ImageSurface.create_for_data (
buf, cairo.FORMAT_ARGB32, cols, rows, cols*4)
# function does not pass a 'gc' so use renderer.ctx
ctx = self.ctx
y = self.height - y - rows
ctx.set_source_surface (surface, x, y)
ctx.paint()
im.flipud_out()
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
# Note: x,y are device/display coords, not user-coords, unlike other
# draw_* methods
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
ctx = gc.ctx
ctx.new_path()
ctx.move_to (x, y)
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
size = prop.get_size_in_points() * self.dpi / 72.0
ctx.save()
if angle:
ctx.rotate (-angle * npy.pi / 180)
ctx.set_font_size (size)
ctx.show_text (s.encode("utf-8"))
ctx.restore()
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
ctx = gc.ctx
width, height, descent, glyphs, rects = self.mathtext_parser.parse(
s, self.dpi, prop)
ctx.save()
ctx.translate(x, y)
if angle:
ctx.rotate (-angle * npy.pi / 180)
for font, fontsize, s, ox, oy in glyphs:
ctx.new_path()
ctx.move_to(ox, oy)
fontProp = ttfFontProperty(font)
ctx.save()
ctx.select_font_face (fontProp.name,
self.fontangles [fontProp.style],
self.fontweights[fontProp.weight])
size = fontsize * self.dpi / 72.0
ctx.set_font_size(size)
ctx.show_text(s.encode("utf-8"))
ctx.restore()
for ox, oy, w, h in rects:
ctx.new_path()
ctx.rectangle (ox, oy, w, h)
ctx.set_source_rgb (0, 0, 0)
ctx.fill_preserve()
ctx.restore()
def flipy(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return True
#return False # tried - all draw objects ok except text (and images?)
# which comes out mirrored!
def get_canvas_width_height(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
if ismath:
width, height, descent, fonts, used_characters = self.mathtext_parser.parse(
s, self.dpi, prop)
return width, height, descent
ctx = self.text_ctx
ctx.save()
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
# Cairo (says it) uses 1/96 inch user space units, ref: cairo_gstate.c
# but if /96.0 is used the font is too small
size = prop.get_size_in_points() * self.dpi / 72.0
# problem - scale remembers last setting and font can become
# enormous causing program to crash
# save/restore prevents the problem
ctx.set_font_size (size)
y_bearing, w, h = ctx.text_extents (s)[1:4]
ctx.restore()
return w, h, h + y_bearing
def new_gc(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self.ctx.restore() # matches save() in set_ctx_from_surface()
self.ctx.save()
return GraphicsContextCairo (renderer=self)
def points_to_pixels(self, points):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return points/72.0 * self.dpi
class GraphicsContextCairo(GraphicsContextBase):
_joind = {
'bevel' : cairo.LINE_JOIN_BEVEL,
'miter' : cairo.LINE_JOIN_MITER,
'round' : cairo.LINE_JOIN_ROUND,
}
_capd = {
'butt' : cairo.LINE_CAP_BUTT,
'projecting' : cairo.LINE_CAP_SQUARE,
'round' : cairo.LINE_CAP_ROUND,
}
def __init__(self, renderer):
GraphicsContextBase.__init__(self)
self.renderer = renderer
self.ctx = renderer.ctx
def set_alpha(self, alpha):
self._alpha = alpha
rgb = self._rgb
self.ctx.set_source_rgba (rgb[0], rgb[1], rgb[2], alpha)
#def set_antialiased(self, b):
# enable/disable anti-aliasing is not (yet) supported by Cairo
def set_capstyle(self, cs):
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
self.ctx.set_line_cap (self._capd[cs])
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
self._cliprect = rectangle
if rectangle is None:
return
x,y,w,h = rectangle.bounds
# pixel-aligned clip-regions are faster
x,y,w,h = round(x), round(y), round(w), round(h)
ctx = self.ctx
ctx.new_path()
ctx.rectangle (x, self.renderer.height - h - y, w, h)
ctx.clip ()
# Alternative: just set _cliprect here and actually set cairo clip rect
# in fill_and_stroke() inside ctx.save() ... ctx.restore()
def set_clip_path(self, path):
if path is not None:
tpath, affine = path.get_transformed_path_and_affine()
ctx = self.ctx
ctx.new_path()
affine = affine + Affine2D().scale(1.0, -1.0).translate(0.0, self.renderer.height)
tpath = affine.transform_path(tpath)
RendererCairo.convert_path(ctx, tpath)
ctx.clip()
def set_dashes(self, offset, dashes):
self._dashes = offset, dashes
if dashes == None:
self.ctx.set_dash([], 0) # switch dashes off
else:
self.ctx.set_dash (
self.renderer.points_to_pixels (npy.asarray(dashes)), offset)
def set_foreground(self, fg, isRGB=None):
GraphicsContextBase.set_foreground(self, fg, isRGB)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def set_graylevel(self, frac):
GraphicsContextBase.set_graylevel(self, frac)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def set_joinstyle(self, js):
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
self.ctx.set_line_join(self._joind[js])
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
self._linewidth = w
self.ctx.set_line_width (self.renderer.points_to_pixels(w))
def new_figure_manager(num, *args, **kwargs): # called by backends/__init__.py
"""
Create a new figure manager instance
"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasCairo(thisFig)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasCairo (FigureCanvasBase):
def print_png(self, fobj, *args, **kwargs):
width, height = self.get_width_height()
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width, height)
surface = cairo.ImageSurface (cairo.FORMAT_ARGB32, width, height)
renderer.set_ctx_from_surface (surface)
self.figure.draw (renderer)
surface.write_to_png (fobj)
def print_pdf(self, fobj, *args, **kwargs):
return self._save(fobj, 'pdf', *args, **kwargs)
def print_ps(self, fobj, *args, **kwargs):
return self._save(fobj, 'ps', *args, **kwargs)
def print_svg(self, fobj, *args, **kwargs):
return self._save(fobj, 'svg', *args, **kwargs)
def print_svgz(self, fobj, *args, **kwargs):
return self._save(fobj, 'svgz', *args, **kwargs)
def get_default_filetype(self):
return rcParams['cairo.format']
def _save (self, fo, format, **kwargs):
# save PDF/PS/SVG
orientation = kwargs.get('orientation', 'portrait')
dpi = 72
self.figure.dpi = dpi
w_in, h_in = self.figure.get_size_inches()
width_in_points, height_in_points = w_in * dpi, h_in * dpi
if orientation == 'landscape':
width_in_points, height_in_points = (height_in_points,
width_in_points)
if format == 'ps':
if not cairo.HAS_PS_SURFACE:
raise RuntimeError ('cairo has not been compiled with PS '
'support enabled')
surface = cairo.PSSurface (fo, width_in_points, height_in_points)
elif format == 'pdf':
if not cairo.HAS_PDF_SURFACE:
raise RuntimeError ('cairo has not been compiled with PDF '
'support enabled')
surface = cairo.PDFSurface (fo, width_in_points, height_in_points)
elif format in ('svg', 'svgz'):
if not cairo.HAS_SVG_SURFACE:
raise RuntimeError ('cairo has not been compiled with SVG '
'support enabled')
if format == 'svgz':
filename = fo
if is_string_like(fo):
fo = open(fo, 'wb')
fo = gzip.GzipFile(None, 'wb', fileobj=fo)
surface = cairo.SVGSurface (fo, width_in_points, height_in_points)
else:
warnings.warn ("unknown format: %s" % format)
return
# surface.set_dpi() can be used
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width_in_points, height_in_points)
renderer.set_ctx_from_surface (surface)
ctx = renderer.ctx
if orientation == 'landscape':
ctx.rotate (npy.pi/2)
ctx.translate (0, -height_in_points)
# cairo/src/cairo_ps_surface.c
# '%%Orientation: Portrait' is always written to the file header
# '%%Orientation: Landscape' would possibly cause problems
# since some printers would rotate again ?
# TODO:
# add portrait/landscape checkbox to FileChooser
self.figure.draw (renderer)
show_fig_border = False # for testing figure orientation and scaling
if show_fig_border:
ctx.new_path()
ctx.rectangle(0, 0, width_in_points, height_in_points)
ctx.set_line_width(4.0)
ctx.set_source_rgb(1,0,0)
ctx.stroke()
ctx.move_to(30,30)
ctx.select_font_face ('sans-serif')
ctx.set_font_size(20)
ctx.show_text('Origin corner')
ctx.show_page()
surface.finish()
| agpl-3.0 |
hlin117/scikit-learn | sklearn/tests/test_dummy.py | 186 | 17778 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
xuwei401/cuda-convnet2 | shownet.py | 180 | 18206 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| apache-2.0 |
amarquand/nispat | nispat/utils.py | 1 | 26418 | from __future__ import print_function
import os
import numpy as np
from scipy import stats
from subprocess import call
from scipy.stats import genextreme, norm
from six import with_metaclass
from abc import ABCMeta, abstractmethod
import pickle
import matplotlib.pyplot as plt
import pandas as pd
import bspline
from bspline import splinelab
from sklearn.datasets import make_regression
import pymc3 as pm
# -----------------
# Utility functions
# -----------------
def create_poly_basis(X, dimpoly):
""" compute a polynomial basis expansion of the specified order"""
if len(X.shape) == 1:
X = X[:, np.newaxis]
D = X.shape[1]
Phi = np.zeros((X.shape[0], D*dimpoly))
colid = np.arange(0, D)
for d in range(1, dimpoly+1):
Phi[:, colid] = X ** d
colid += D
return Phi
def create_bspline_basis(xmin, xmax, p = 3, nknots = 5):
""" compute a Bspline basis set where:
:param p: order of spline (3 = cubic)
:param nknots: number of knots (endpoints only counted once)
"""
knots = np.linspace(xmin, xmax, nknots)
k = splinelab.augknt(knots, p) # pad the knot vector
B = bspline.Bspline(k, p)
return B
def squared_dist(x, z=None):
""" compute sum((x-z) ** 2) for all vectors in a 2d array"""
# do some basic checks
if z is None:
z = x
if len(x.shape) == 1:
x = x[:, np.newaxis]
if len(z.shape) == 1:
z = z[:, np.newaxis]
nx, dx = x.shape
nz, dz = z.shape
if dx != dz:
raise ValueError("""
Cannot compute distance: vectors have different length""")
# mean centre for numerical stability
m = np.mean(np.vstack((np.mean(x, axis=0), np.mean(z, axis=0))), axis=0)
x = x - m
z = z - m
xx = np.tile(np.sum((x*x), axis=1)[:, np.newaxis], (1, nz))
zz = np.tile(np.sum((z*z), axis=1), (nx, 1))
dist = (xx - 2*x.dot(z.T) + zz)
return dist
def compute_pearsonr(A, B):
""" Manually computes the Pearson correlation between two matrices.
Basic usage::
compute_pearsonr(A, B)
where
:param A: an N * M data array
:param cov: an N * M array
:returns Rho: N dimensional vector of correlation coefficients
:returns ys2: N dimensional vector of p-values
Notes::
This function is useful when M is large and only the diagonal entries
of the resulting correlation matrix are of interest. This function
does not compute the full correlation matrix as an intermediate step"""
# N = A.shape[1]
N = A.shape[0]
# first mean centre
Am = A - np.mean(A, axis=0)
Bm = B - np.mean(B, axis=0)
# then normalize
An = Am / np.sqrt(np.sum(Am**2, axis=0))
Bn = Bm / np.sqrt(np.sum(Bm**2, axis=0))
del(Am, Bm)
Rho = np.sum(An * Bn, axis=0)
del(An, Bn)
# Fisher r-to-z
Zr = (np.arctanh(Rho) - np.arctanh(0)) * np.sqrt(N - 3)
N = stats.norm()
pRho = 2*N.cdf(-np.abs(Zr))
# pRho = 1-N.cdf(Zr)
return Rho, pRho
def explained_var(ytrue, ypred):
""" Computes the explained variance of predicted values.
Basic usage::
exp_var = explained_var(ytrue, ypred)
where
:ytrue: n*p matrix of true values where n is the number of samples
and p is the number of features.
:ypred: n*p matrix of predicted values where n is the number of samples
and p is the number of features.
:returns exp_var: p dimentional vector of explained variances for each feature.
"""
exp_var = 1 - (ytrue - ypred).var(axis = 0) / ytrue.var(axis = 0)
return exp_var
def compute_MSLL(ytrue, ypred, ypred_var, train_mean = None, train_var = None):
""" Computes the MSLL or MLL (not standardized) if 'train_mean' and 'train_var' are None.
Basic usage::
MSLL = compute_MSLL(ytrue, ypred, ytrue_sig, noise_variance, train_mean, train_var)
where
:ytrue : n*p matrix of true values where n is the number of samples
and p is the number of features.
:ypred : n*p matrix of predicted values where n is the number of samples
and p is the number of features.
:ypred_var : n*p matrix of summed noise variances and prediction variances where n is the number of samples
and p is the number of features.
:train_mean : p dimensional vector of mean values of the training data for each feature.
:train_var : p dimensional vector of covariances of the training data for each feature.
:returns loss : p dimensional vector of MSLL or MLL for each feature.
"""
if train_mean is not None and train_var is not None:
# make sure y_train_mean and y_train_sig have right dimensions (subjects x voxels):
Y_train_mean = np.repeat(train_mean, ytrue.shape[0], axis = 0)
Y_train_sig = np.repeat(train_var, ytrue.shape[0], axis = 0)
# compute MSLL:
loss = np.mean(0.5 * np.log(2 * np.pi * ypred_var) + (ytrue - ypred)**2 / (2 * ypred_var) -
0.5 * np.log(2 * np.pi * Y_train_sig) - (ytrue - Y_train_mean)**2 / (2 * Y_train_sig), axis = 0)
else:
# compute MLL:
loss = np.mean(0.5 * np.log(2 * np.pi * ypred_var) + (ytrue - ypred)**2 / (2 * ypred_var), axis = 0)
return loss
class WarpBase(with_metaclass(ABCMeta)):
""" Base class for likelihood warping following:
Rios and Torab (2019) Compositionally-warped Gaussian processes
https://www.sciencedirect.com/science/article/pii/S0893608019301856
All Warps must define the following methods::
Warp.get_n_params() - return number of parameters
Warp.f() - warping function (Non-Gaussian field -> Gaussian)
Warp.invf() - inverse warp
Warp.df() - derivatives
Warp.warp_predictions() - compute predictive distribution
"""
def __init__(self):
self.n_params = np.nan
def get_n_params(self):
""" Report the number of parameters required """
assert not np.isnan(self.n_params), \
"Warp function not initialised"
return self.n_params
def warp_predictions(self, mu, s2, param, percentiles=[0.025, 0.975]):
""" Compute the warped predictions from a gaussian predictive
distribution, specifed by a mean (mu) and variance (s2)
:param mu: Gassian predictive mean
:param s2: Predictive variance
:param param: warping parameters
:param percentiles: Desired percentiles of the warped likelihood
:returns: * median - median of the predictive distribution
* pred_interval - predictive interval(s)
"""
# Compute percentiles of a standard Gaussian
N = norm
Z = N.ppf(percentiles)
# find the median (using mu = median)
median = self.invf(mu, param)
# compute the predictive intervals (non-stationary)
pred_interval = np.zeros((len(mu), len(Z)))
for i, z in enumerate(Z):
pred_interval[:,i] = self.invf(mu + np.sqrt(s2)*z, param)
return median, pred_interval
@abstractmethod
def f(self, x, param):
""" Evaluate the warping function (mapping non-Gaussian respone
variables to Gaussian variables)"""
@abstractmethod
def invf(self, y, param):
""" Evaluate the warping function (mapping Gaussian latent variables
to non-Gaussian response variables) """
@abstractmethod
def df(self, x, param):
""" Return the derivative of the warp, dw(x)/dx """
class WarpAffine(WarpBase):
""" Affine warp
y = a + b*x
"""
def __init__(self):
self.n_params = 2
def _get_params(self, param):
if len(param) != self.n_params:
raise(ValueError,
'number of parameters must be ' + str(self.n_params))
return param[0], param[1]
def f(self, x, params):
a, b = self._get_params(params)
y = a + b*x
return y
def invf(self, y, params):
a, b = self._get_params(params)
x = (y - a) / b
return x
def df(self, x, params):
a, b = self._get_params(params)
df = np.ones(x.shape)*b
return df
class WarpBoxCox(WarpBase):
""" Box cox transform having a single parameter (lambda), i.e.
y = (sign(x) * abs(x) ** lamda - 1) / lambda
This follows the generalization in Bicken and Doksum (1981) JASA 76
and allows x to assume negative values.
"""
def __init__(self):
self.n_params = 1
def _get_params(self, param):
return np.exp(param)
def f(self, x, params):
lam = self._get_params(params)
if lam == 0:
y = np.log(x)
else:
y = (np.sign(x) * np.abs(x) ** lam - 1) / lam
return y
def invf(self, y, params):
lam = self._get_params(params)
if lam == 0:
x = np.exp(y)
else:
x = np.sign(lam * y + 1) * np.abs(lam * y + 1) ** (1 / lam)
return x
def df(self, x, params):
lam = self._get_params(params)
dx = np.abs(x) ** (lam - 1)
return dx
class WarpSinArcsinh(WarpBase):
""" Sin-hyperbolic arcsin warp having two parameters (a, b) and defined by
y = sinh(b * arcsinh(x) - a)
see Jones and Pewsey A (2009) Biometrika, 96 (4) (2009)
"""
def __init__(self):
self.n_params = 2
def _get_params(self, param):
if len(param) != self.n_params:
raise(ValueError,
'number of parameters must be ' + str(self.n_params))
return param[0], param[1]
def f(self, x, params):
a, b = self._get_params(params)
y = np.sinh(b * np.arcsinh(x) - a)
return y
def invf(self, y, params):
a, b = self._get_params(params)
x = np.sinh((np.arcsinh(y)+a)/b)
return x
def df(self, x, params):
a, b = self._get_params(params)
dx = (b *np.cosh(b * np.arcsinh(x) - a))/np.sqrt(1 + x ** 2)
return dx
class WarpCompose(WarpBase):
""" Composition of warps. These are passed in as an array and
intialised automatically. For example::
W = WarpCompose(('WarpBoxCox', 'WarpAffine'))
where ell_i are lengthscale parameters and sf2 is the signal variance
"""
def __init__(self, warpnames=None):
if warpnames is None:
raise ValueError("A list of warp functions is required")
self.warps = []
self.n_params = 0
for wname in warpnames:
warp = eval(wname + '()')
self.n_params += warp.get_n_params()
self.warps.append(warp)
def f(self, x, theta):
theta_offset = 0
for ci, warp in enumerate(self.warps):
n_params_c = warp.get_n_params()
theta_c = [theta[c] for c in
range(theta_offset, theta_offset + n_params_c)]
theta_offset += n_params_c
if ci == 0:
fw = warp.f(x, theta_c)
else:
fw = warp.f(fw, theta_c)
return fw
def invf(self, x, theta):
theta_offset = 0
for ci, warp in enumerate(self.warps):
n_params_c = warp.get_n_params()
theta_c = [theta[c] for c in
range(theta_offset, theta_offset + n_params_c)]
theta_offset += n_params_c
if ci == 0:
finvw = warp.invf(x, theta_c)
else:
finvw = warp.invf(finvw, theta_c)
return finvw
def df(self, x, theta):
theta_offset = 0
for ci, warp in enumerate(self.warps):
n_params_c = warp.get_n_params()
theta_c = [theta[c] for c in
range(theta_offset, theta_offset + n_params_c)]
theta_offset += n_params_c
if ci == 0:
dfw = warp.df(x, theta_c)
else:
dfw = warp.df(dfw, theta_c)
return dfw
# -----------------------
# Functions for inference
# -----------------------
class CustomCV:
""" Custom cross-validation approach. This function does not do much, it
merely provides a wrapper designed to be compatible with
scikit-learn (e.g. sklearn.model_selection...)
:param train: a list of indices of training splits (each itself a list)
:param test: a list of indices of test splits (each itself a list)
:returns tr: Indices for training set
:returns te: Indices for test set """
def __init__(self, train, test, X=None, y=None):
self.train = train
self.test = test
self.n_splits = len(train)
if X is not None:
self.N = X.shape[0]
else:
self.N = None
def split(self, X, y=None):
if self.N is None:
self.N = X.shape[0]
for i in range(0, self.n_splits):
tr = self.train[i]
te = self.test[i]
yield tr, te
# -----------------------
# Functions for inference
# -----------------------
def bashwrap(processing_dir, python_path, script_command, job_name,
bash_environment=None):
""" This function wraps normative modelling into a bash script to run it
on a torque cluster system.
** Input:
* processing_dir -> Full path to the processing dir
* python_path -> Full path to the python distribution
* command to execute -> python command to execute
* job_name -> Name for the bash script that is the output of
this function
* covfile_path -> Full path to a .txt file that contains all
covariats (subjects x covariates) for the
responsefile
* respfile_path -> Full path to a .txt that contains all features
(subjects x features)
* cv_folds -> Number of cross validations
* testcovfile_path -> Full path to a .txt file that contains all
covariats (subjects x covariates) for the
testresponse file
* testrespfile_path -> Full path to a .txt file that contains all
test features
* bash_environment -> A file containing the necessary commands
for your bash environment to work
** Output:
* A bash.sh file containing the commands for normative modelling saved
to the processing directory
witten by Thomas Wolfers
"""
# change to processing dir
os.chdir(processing_dir)
output_changedir = ['cd ' + processing_dir + '\n']
# sets bash environment if necessary
if bash_environment is not None:
bash_environment = [bash_environment]
print("""Your own environment requires in any case:
#!/bin/bash\n export and optionally OMP_NUM_THREADS=1\n""")
else:
bash_lines = '#!/bin/bash\n\n'
bash_cores = 'export OMP_NUM_THREADS=1\n'
bash_environment = [bash_lines + bash_cores]
command = [python_path + ' ' + script_command + '\n']
# writes bash file into processing dir
bash_file_name = os.path.join(processing_dir, job_name + '.sh')
with open(bash_file_name, 'w') as bash_file:
bash_file.writelines(bash_environment + output_changedir + command)
# changes permissoins for bash.sh file
os.chmod(bash_file_name, 0o700)
return bash_file_name
def qsub(job_path, memory, duration, logdir=None):
"""
This function submits a job.sh scipt to the torque custer using the qsub
command.
** Input:
* job_path -> Full path to the job.sh file
* memory -> Memory requirements written as string for example
4gb or 500mb
* duration -> The approximate duration of the job, a string with
HH:MM:SS for example 01:01:01
** Output:
* Submission of the job to the (torque) cluster
witten by Thomas Wolfers
"""
if logdir is None:
logdir = os.path.expanduser('~')
# created qsub command
qsub_call = ['echo ' + job_path + ' | qsub -N ' + job_path + ' -l ' +
'mem=' + memory + ',walltime=' + duration +
' -e ' + logdir + ' -o ' + logdir]
# submits job to cluster
call(qsub_call, shell=True)
def extreme_value_prob_fit(NPM, perc):
n = NPM.shape[0]
t = NPM.shape[1]
n_perc = int(round(t * perc))
m = np.zeros(n)
for i in range(n):
temp = np.abs(NPM[i, :])
temp = np.sort(temp)
temp = temp[t - n_perc:]
temp = temp[0:int(np.floor(0.90*temp.shape[0]))]
m[i] = np.mean(temp)
params = genextreme.fit(m)
return params
def extreme_value_prob(params, NPM, perc):
n = NPM.shape[0]
t = NPM.shape[1]
n_perc = int(round(t * perc))
m = np.zeros(n)
for i in range(n):
temp = np.abs(NPM[i, :])
temp = np.sort(temp)
temp = temp[t - n_perc:]
temp = temp[0:int(np.floor(0.90*temp.shape[0]))]
m[i] = np.mean(temp)
probs = genextreme.cdf(m,*params)
return probs
def ravel_2D(a):
s = a.shape
return np.reshape(a,[s[0], np.prod(s[1:])])
def unravel_2D(a, s):
return np.reshape(a,s)
def threshold_NPM(NPMs, fdr_thr=0.05, npm_thr=0.1):
""" Compute voxels with significant NPMs. """
p_values = stats.norm.cdf(-np.abs(NPMs))
results = np.zeros(NPMs.shape)
masks = np.full(NPMs.shape, False, dtype=bool)
for i in range(p_values.shape[0]):
masks[i,:] = FDR(p_values[i,:], fdr_thr)
results[i,] = NPMs[i,:] * masks[i,:].astype(np.int)
m = np.sum(masks,axis=0)/masks.shape[0] > npm_thr
#m = np.any(masks,axis=0)
return results, masks, m
def FDR(p_values, alpha):
""" Compute the false discovery rate in all voxels for a subject. """
dim = np.shape(p_values)
p_values = np.reshape(p_values,[np.prod(dim),])
sorted_p_values = np.sort(p_values)
sorted_p_values_idx = np.argsort(p_values);
testNum = len(p_values)
thresh = ((np.array(range(testNum)) + 1)/np.float(testNum)) * alpha
h = sorted_p_values <= thresh
unsort = np.argsort(sorted_p_values_idx)
h = h[unsort]
h = np.reshape(h, dim)
return h
def calibration_error(Y,m,s,cal_levels):
ce = 0
for cl in cal_levels:
z = np.abs(norm.ppf((1-cl)/2))
ub = m + z * s
lb = m - z * s
ce = ce + np.abs(cl - np.sum(np.logical_and(Y>=lb,Y<=ub))/Y.shape[0])
return ce
def simulate_data(method='linear', n_samples=100, n_features=1, n_grps=1,
working_dir=None, plot=False, random_state=None, noise=None):
"""
This function simulates linear synthetic data for testing nispat methods.
- Inputs:
- method: simulate 'linear' or 'non-linear' function.
- n_samples: number of samples in each group of the training and test sets.
If it is an int then the same sample number will be used for all groups.
It can be also a list of size of n_grps that decides the number of samples
in each group (default=100).
- n_features: A positive integer that decides the number of features
(default=1).
- n_grps: A positive integer that decides the number of groups in data
(default=1).
- working_dir: Directory to save data (default=None).
- plot: Boolean to plot the simulated training data (default=False).
- random_state: random state for generating random numbers (Default=None).
- noise: Type of added noise to the data. The options are 'gaussian',
'exponential', and 'hetero_gaussian' (The defauls is None.).
- Outputs:
- X_train, Y_train, grp_id_train, X_test, Y_test, grp_id_test, coef
"""
if isinstance(n_samples, int):
n_samples = [n_samples for i in range(n_grps)]
X_train, Y_train, X_test, Y_test = [], [], [], []
grp_id_train, grp_id_test = [], []
coef = []
for i in range(n_grps):
bias = np.random.randint(-10, high=10)
if method == 'linear':
X_temp, Y_temp, coef_temp = make_regression(n_samples=n_samples[i]*2,
n_features=n_features, n_targets=1,
noise=10 * np.random.rand(), bias=bias,
n_informative=1, coef=True,
random_state=random_state)
elif method == 'non-linear':
X_temp = np.random.randint(-2,6,[2*n_samples[i], n_features]) \
+ np.random.randn(2*n_samples[i], n_features)
Y_temp = X_temp[:,0] * 20 * np.random.rand() + np.random.randint(10,100) \
* np.sin(2 * np.random.rand() + 2 * np.pi /5 * X_temp[:,0])
coef_temp = 0
elif method == 'combined':
X_temp = np.random.randint(-2,6,[2*n_samples[i], n_features]) \
+ np.random.randn(2*n_samples[i], n_features)
Y_temp = (X_temp[:,0]**3) * np.random.uniform(0, 0.5) \
+ X_temp[:,0] * 20 * np.random.rand() \
+ np.random.randint(10, 100)
coef_temp = 0
else:
raise ValueError("Unknow method. Please specify valid method among \
'linear' or 'non-linear'.")
coef.append(coef_temp/100)
X_train.append(X_temp[:X_temp.shape[0]//2])
Y_train.append(Y_temp[:X_temp.shape[0]//2]/100)
X_test.append(X_temp[X_temp.shape[0]//2:])
Y_test.append(Y_temp[X_temp.shape[0]//2:]/100)
grp_id = np.repeat(i, X_temp.shape[0])
grp_id_train.append(grp_id[:X_temp.shape[0]//2])
grp_id_test.append(grp_id[X_temp.shape[0]//2:])
if noise == 'hetero_gaussian':
t = np.random.randint(5,10)
Y_train[i] = Y_train[i] + np.random.randn(Y_train[i].shape[0]) / t \
* np.log(1 + np.exp(X_train[i][:,0]))
Y_test[i] = Y_test[i] + np.random.randn(Y_test[i].shape[0]) / t \
* np.log(1 + np.exp(X_test[i][:,0]))
elif noise == 'gaussian':
t = np.random.randint(3,10)
Y_train[i] = Y_train[i] + np.random.randn(Y_train[i].shape[0])/t
Y_test[i] = Y_test[i] + np.random.randn(Y_test[i].shape[0])/t
elif noise == 'exponential':
t = np.random.randint(1,3)
Y_train[i] = Y_train[i] + np.random.exponential(1, Y_train[i].shape[0]) / t
Y_test[i] = Y_test[i] + np.random.exponential(1, Y_test[i].shape[0]) / t
elif noise == 'hetero_gaussian_smaller':
t = np.random.randint(5,10)
Y_train[i] = Y_train[i] + np.random.randn(Y_train[i].shape[0]) / t \
* np.log(1 + np.exp(0.3 * X_train[i][:,0]))
Y_test[i] = Y_test[i] + np.random.randn(Y_test[i].shape[0]) / t \
* np.log(1 + np.exp(0.3 * X_test[i][:,0]))
X_train = np.vstack(X_train)
X_test = np.vstack(X_test)
Y_train = np.concatenate(Y_train)
Y_test = np.concatenate(Y_test)
grp_id_train = np.expand_dims(np.concatenate(grp_id_train), axis=1)
grp_id_test = np.expand_dims(np.concatenate(grp_id_test), axis=1)
for i in range(n_features):
plt.figure()
for j in range(n_grps):
plt.scatter(X_train[grp_id_train[:,0]==j,i],
Y_train[grp_id_train[:,0]==j,], label='Group ' + str(j))
plt.xlabel('X' + str(i))
plt.ylabel('Y')
plt.legend()
if working_dir is not None:
if not os.path.isdir(working_dir):
os.mkdir(working_dir)
with open(os.path.join(working_dir ,'trbefile.pkl'), 'wb') as file:
pickle.dump(pd.DataFrame(grp_id_train),file)
with open(os.path.join(working_dir ,'tsbefile.pkl'), 'wb') as file:
pickle.dump(pd.DataFrame(grp_id_test),file)
with open(os.path.join(working_dir ,'X_train.pkl'), 'wb') as file:
pickle.dump(pd.DataFrame(X_train),file)
with open(os.path.join(working_dir ,'X_test.pkl'), 'wb') as file:
pickle.dump(pd.DataFrame(X_test),file)
with open(os.path.join(working_dir ,'Y_train.pkl'), 'wb') as file:
pickle.dump(pd.DataFrame(Y_train),file)
with open(os.path.join(working_dir ,'Y_test.pkl'), 'wb') as file:
pickle.dump(pd.DataFrame(Y_test),file)
return X_train, Y_train, grp_id_train, X_test, Y_test, grp_id_test, coef
def divergence_plot(nm, ylim=None):
if nm.hbr.configs['n_chains'] > 1 and nm.hbr.model_type != 'nn':
a = pm.summary(nm.hbr.trace).round(2)
plt.figure()
plt.hist(a['r_hat'],10)
plt.title('Gelman-Rubin diagnostic for divergence')
divergent = nm.hbr.trace['diverging']
tracedf = pm.trace_to_dataframe(nm.hbr.trace)
_, ax = plt.subplots(2, 1, figsize=(15, 4), sharex=True, sharey=True)
ax[0].plot(tracedf.values[divergent == 0].T, color='k', alpha=.05)
ax[0].set_title('No Divergences', fontsize=10)
ax[1].plot(tracedf.values[divergent == 1].T, color='C2', lw=.5, alpha=.5)
ax[1].set_title('Divergences', fontsize=10)
plt.ylim(ylim)
plt.xticks(range(tracedf.shape[1]), list(tracedf.columns))
plt.xticks(rotation=90, fontsize=7)
plt.tight_layout()
plt.show() | gpl-3.0 |
mlyundin/scikit-learn | sklearn/neighbors/tests/test_kde.py | 208 | 5556 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
yyjiang/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
waterponey/scikit-learn | sklearn/manifold/tests/test_mds.py | 99 | 1873 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.manifold import mds
from sklearn.utils.testing import assert_raises
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
wanggang3333/scikit-learn | sklearn/linear_model/tests/test_omp.py | 272 | 7752 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
dhruv13J/scikit-learn | sklearn/metrics/tests/test_classification.py | 42 | 52642 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, [(), ()]), 0)
assert_equal(accuracy_score(y1, y2, normalize=False), 1)
assert_equal(accuracy_score(y1, y1, normalize=False), 2)
assert_equal(accuracy_score(y2, y2, normalize=False), 2)
assert_equal(accuracy_score(y2, [(), ()], normalize=False), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
@ignore_warnings
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
"""Test handling of explicit additional (not in input) labels to PRF
"""
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
"""Test a subset of labels may be requested for PRF"""
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
@ignore_warnings # sequence of sequences is deprecated
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
make_ml = make_multilabel_classification
_, y_true_ll = make_ml(n_features=1, n_classes=n_classes, random_state=0,
n_samples=n_samples)
_, y_pred_ll = make_ml(n_features=1, n_classes=n_classes, random_state=1,
n_samples=n_samples)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
lb = MultiLabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, [(), ()]), 1)
assert_equal(zero_one_loss(y2, [tuple(), (10, )]), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, np.logical_not(y2)), 1)
assert_equal(hamming_loss(y1, np.logical_not(y1)), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, [(), ()]), 0.75)
assert_equal(hamming_loss(y1, [tuple(), (10, )]), 0.625)
assert_almost_equal(hamming_loss(y2, [tuple(), (10, )],
classes=np.arange(11)), 0.1818, 2)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, [(), ()]), 0)
# |y3 inter y4 | = [0, 1, 1]
# |y3 union y4 | = [2, 1, 3]
y3 = [(0,), (1,), (3,)]
y4 = [(4,), (4,), (5, 6)]
assert_almost_equal(jaccard_similarity_score(y3, y4), 0)
# |y5 inter y6 | = [0, 1, 1]
# |y5 union y6 | = [2, 1, 3]
y5 = [(0,), (1,), (2, 3)]
y6 = [(1,), (1,), (2, 0)]
assert_almost_equal(jaccard_similarity_score(y5, y6), (1 + 1 / 3) / 3)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true_ll = [(0,), (1,), (2, 3)]
y_pred_ll = [(1,), (1,), (2, 0)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
#tp = [0, 1, 1, 0]
#fn = [1, 0, 0, 1]
#fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weigted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check weigted
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true_ll = [(1,), (2,), (2, 3)]
y_pred_ll = [(4,), (4,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(1, 5)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check weigted
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true_ll = [(1,), (0,), (2, 1,)]
y_pred_ll = [tuple(), (3,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
@ignore_warnings # sequence of sequences is deprecated
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
SEQ = 'multilabel-sequences'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(SEQ, [[2, 3], [1], [3]]),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(SEQ, SEQ): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(IND, SEQ): None,
(MC, SEQ): None,
(BIN, SEQ): None,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(SEQ, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(SEQ, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(SEQ, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, SEQ, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, 0.24],
[-2.36, -0.79, -0.27, 0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
| bsd-3-clause |
feranick/GES_AT | Other/Cursors/cursors.py | 1 | 1136 | import sys
from PyQt5 import QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.pyplot as plt
from matplotlib.widgets import Cursor
class Window(QtWidgets.QDialog):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
self.figure = plt.figure(facecolor='white')
self.canvas = FigureCanvas(self.figure)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.canvas)
self.setLayout(layout)
''' plot some random stuff '''
ax = self.figure.add_subplot(111)
self.ax = ax
ax.plot([1,2])
# Set cursor
cursor = Cursor(self.ax, useblit=False, color='red', linewidth=1)
############## The added part: #############
def onclick(event):
cursor.onmove(event)
self.canvas.mpl_connect('button_press_event', onclick)
############################################
self.canvas.draw()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
main = Window()
main.show()
sys.exit(app.exec_())
| gpl-3.0 |
jayflo/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
priyankadeswal/network-address-translator | src/core/examples/sample-rng-plot.py | 188 | 1246 | # -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
| gpl-2.0 |
cwhanse/pvlib-python | benchmarks/benchmarks/irradiance.py | 3 | 2710 | """
ASV benchmarks for irradiance.py
"""
import pandas as pd
from pvlib import irradiance, location
class Irradiance:
def setup(self):
self.times = pd.date_range(start='20180601', freq='1min',
periods=14400)
self.days = pd.date_range(start='20180601', freq='d', periods=30)
self.location = location.Location(40, -80)
self.solar_position = self.location.get_solarposition(self.times)
self.clearsky_irradiance = self.location.get_clearsky(self.times)
self.tilt = 20
self.azimuth = 180
self.aoi = irradiance.aoi(self.tilt, self.azimuth,
self.solar_position.apparent_zenith,
self.solar_position.azimuth)
def time_get_extra_radiation(self):
irradiance.get_extra_radiation(self.days)
def time_aoi(self):
irradiance.aoi(self.tilt, self.azimuth,
self.solar_position.apparent_zenith,
self.solar_position.azimuth)
def time_aoi_projection(self):
irradiance.aoi_projection(self.tilt, self.azimuth,
self.solar_position.apparent_zenith,
self.solar_position.azimuth)
def time_get_ground_diffuse(self):
irradiance.get_ground_diffuse(self.tilt, self.clearsky_irradiance.ghi)
def time_get_total_irradiance(self):
irradiance.get_total_irradiance(self.tilt, self.azimuth,
self.solar_position.apparent_zenith,
self.solar_position.azimuth,
self.clearsky_irradiance.dni,
self.clearsky_irradiance.ghi,
self.clearsky_irradiance.dhi)
def time_disc(self):
irradiance.disc(self.clearsky_irradiance.ghi,
self.solar_position.apparent_zenith,
self.times)
def time_dirint(self):
irradiance.dirint(self.clearsky_irradiance.ghi,
self.solar_position.apparent_zenith,
self.times)
def time_dirindex(self):
irradiance.dirindex(self.clearsky_irradiance.ghi,
self.clearsky_irradiance.ghi,
self.clearsky_irradiance.dni,
self.solar_position.apparent_zenith,
self.times)
def time_erbs(self):
irradiance.erbs(self.clearsky_irradiance.ghi,
self.solar_position.apparent_zenith,
self.times)
| bsd-3-clause |
bigfootproject/OSMEF | data_processing/graphs/cpu_histogram.py | 1 | 3477 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import json
data = json.load(open("../../osmef/data.json"))
N = 10
rxMeans = []
rxMeans.append(data["localhost"]["c=1"]["rx.cpu"]["avg"])
rxMeans.append(data["vm_localhost"]["c=1"]["rx.cpu"]["avg"])
rxMeans.append(data["vm_host"]["c=1"]["rx.cpu"]["avg"])
rxMeans.append(data["host_vm"]["c=1"]["rx.cpu"]["avg"])
rxMeans.append(data["host_to_host"]["c=1"]["rx.cpu"]["avg"])
rxMeans.append(data["host_to_host_gre"]["c=1"]["rx.cpu"]["avg"])
rxMeans.append(data["vm_to_vm_1"]["c=1"]["rx.cpu"]["avg"])
rxMeans.append(data["vm_to_vm_2"]["c=1"]["rx.cpu"]["avg"])
rxMeans.append(data["vm_to_vm_3"]["c=1"]["rx.cpu"]["avg"])
rxMeans.append(data["vm_to_vm_4"]["c=1"]["rx.cpu"]["avg"])
rxStd = []
rxStd.append(data["localhost"]["c=1"]["rx.cpu"]["std"])
rxStd.append(data["vm_localhost"]["c=1"]["rx.cpu"]["std"])
rxStd.append(data["vm_host"]["c=1"]["rx.cpu"]["std"])
rxStd.append(data["host_vm"]["c=1"]["rx.cpu"]["std"])
rxStd.append(data["host_to_host"]["c=1"]["rx.cpu"]["std"])
rxStd.append(data["host_to_host_gre"]["c=1"]["rx.cpu"]["std"])
rxStd.append(data["vm_to_vm_1"]["c=1"]["rx.cpu"]["std"])
rxStd.append(data["vm_to_vm_2"]["c=1"]["rx.cpu"]["std"])
rxStd.append(data["vm_to_vm_3"]["c=1"]["rx.cpu"]["std"])
rxStd.append(data["vm_to_vm_4"]["c=1"]["rx.cpu"]["std"])
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
rects1 = ax.bar(ind, rxMeans, width, color='r', yerr=rxStd)
txMeans = []
txMeans.append(data["localhost"]["c=1"]["tx.cpu"]["avg"])
txMeans.append(data["vm_localhost"]["c=1"]["tx.cpu"]["avg"])
txMeans.append(data["vm_host"]["c=1"]["tx.cpu"]["avg"])
txMeans.append(data["host_vm"]["c=1"]["tx.cpu"]["avg"])
txMeans.append(data["host_to_host"]["c=1"]["tx.cpu"]["avg"])
txMeans.append(data["host_to_host_gre"]["c=1"]["tx.cpu"]["avg"])
txMeans.append(data["vm_to_vm_1"]["c=1"]["tx.cpu"]["avg"])
txMeans.append(data["vm_to_vm_2"]["c=1"]["tx.cpu"]["avg"])
txMeans.append(data["vm_to_vm_3"]["c=1"]["tx.cpu"]["avg"])
txMeans.append(data["vm_to_vm_4"]["c=1"]["tx.cpu"]["avg"])
txStd = []
txStd.append(data["localhost"]["c=1"]["tx.cpu"]["std"])
txStd.append(data["vm_localhost"]["c=1"]["tx.cpu"]["std"])
txStd.append(data["vm_host"]["c=1"]["tx.cpu"]["std"])
txStd.append(data["host_vm"]["c=1"]["tx.cpu"]["std"])
txStd.append(data["host_to_host"]["c=1"]["tx.cpu"]["std"])
txStd.append(data["host_to_host_gre"]["c=1"]["tx.cpu"]["std"])
txStd.append(data["vm_to_vm_1"]["c=1"]["tx.cpu"]["std"])
txStd.append(data["vm_to_vm_2"]["c=1"]["tx.cpu"]["std"])
txStd.append(data["vm_to_vm_3"]["c=1"]["tx.cpu"]["std"])
txStd.append(data["vm_to_vm_4"]["c=1"]["tx.cpu"]["std"])
rects2 = ax.bar(ind+width, txMeans, width, color='y', yerr=txStd)
# add some
ax.set_ylabel('CPU usage %')
ax.set_title('CPU usage with 1 connection')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('localhost', 'VM localhost', 'VM to host', 'host to VM', 'host to host', 'host to host GRE', 'VM to VM 1', 'VM to VM 2', 'VM to VM 3', 'VM to VM 4') )
ax.legend( (rects1[0], rects2[0]), ('Receiver', 'Transmitter') )
#def autolabel(rects):
# # attach some text labels
# for rect in rects:
# height = rect.get_height()
# ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
# ha='center', va='bottom')
#
#autolabel(rects1)
#autolabel(rects2)
fig.autofmt_xdate()
plt.savefig("cpu_histogram.pdf")
| apache-2.0 |
yunfeilu/scikit-learn | sklearn/neighbors/nearest_centroid.py | 199 | 7249 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
RedhawkSDR/integration-gnuhawk | gnuradio/gr-filter/examples/synth_to_chan.py | 13 | 3633 | #!/usr/bin/env python
#
# Copyright 2010,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
from gnuradio import filter
import sys
try:
import scipy
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
fmtx = list()
for fi in freqs:
s = gr.sig_source_f(fs, gr.GR_SIN_WAVE, fi, 1)
fm = blks2.nbfm_tx (fs, 4*fs, max_dev=10000, tau=75e-6)
sigs.append(s)
fmtx.append(fm)
syntaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print "Synthesis Num. Taps = %d (taps per filter = %d)" % (len(syntaps),
len(syntaps)/nchans)
chtaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print "Channelizer Num. Taps = %d (taps per filter = %d)" % (len(chtaps),
len(chtaps)/nchans)
filtbank = filter.pfb_synthesizer_ccf(nchans, syntaps)
channelizer = filter.pfb.channelizer_ccf(nchans, chtaps)
noise_level = 0.01
head = gr.head(gr.sizeof_gr_complex, N)
noise = gr.noise_source_c(gr.GR_GAUSSIAN, noise_level)
addnoise = gr.add_cc()
snk_synth = gr.vector_sink_c()
tb = gr.top_block()
tb.connect(noise, (addnoise,0))
tb.connect(filtbank, head, (addnoise, 1))
tb.connect(addnoise, channelizer)
tb.connect(addnoise, snk_synth)
snk = list()
for i,si in enumerate(sigs):
tb.connect(si, fmtx[i], (filtbank, i))
for i in xrange(nchans):
snk.append(gr.vector_sink_c())
tb.connect((channelizer, i), snk[i])
tb.run()
if 1:
channel = 1
data = snk[channel].data()[1000:]
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(data[10000:10200] )
s1.set_title(("Output Signal from Channel %d" % channel))
fftlen = 2048
winfunc = scipy.blackman
#winfunc = scipy.hamming
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.psd(data, NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
s2.set_title(("Output PSD from Channel %d" % channel))
f3 = pylab.figure(3)
s3 = f3.add_subplot(1,1,1)
s3.psd(snk_synth.data()[1000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
s3.set_title("Output of Synthesis Filter")
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
Barmaley-exe/scikit-learn | doc/conf.py | 16 | 8442 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
jhonatanoliveira/pgmpy | pgmpy/tests/test_estimators/test_BaseEstimator.py | 2 | 2903 | import unittest
import pandas as pd
import numpy as np
from pgmpy.estimators import BaseEstimator
class TestBaseEstimator(unittest.TestCase):
def setUp(self):
self.d1 = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0], 'D': ['X', 'Y', 'Z']})
self.d2 = pd.DataFrame(data={'A': [0, np.NaN, 1], 'B': [0, 1, 0], 'C': [1, 1, np.NaN], 'D': [np.NaN, 'Y', np.NaN]})
self.titanic_data = pd.read_csv('pgmpy/tests/test_estimators/testdata/titanic_train.csv')
def test_state_count(self):
e = BaseEstimator(self.d1)
self.assertEqual(e.state_counts('A').values.tolist(), [[2], [1]])
self.assertEqual(e.state_counts('C', ['A', 'B']).values.tolist(),
[[0., 0., 1., 0.], [1., 1., 0., 0.]])
def test_missing_data(self):
e = BaseEstimator(self.d2, state_names={'C': [0, 1]}, complete_samples_only=False)
self.assertEqual(e.state_counts('A', complete_samples_only=True).values.tolist(), [[0], [0]])
self.assertEqual(e.state_counts('A').values.tolist(), [[1], [1]])
self.assertEqual(e.state_counts('C', parents=['A', 'B'], complete_samples_only=True).values.tolist(),
[[0, 0, 0, 0], [0, 0, 0, 0]])
self.assertEqual(e.state_counts('C', parents=['A', 'B']).values.tolist(),
[[0, 0, 0, 0], [1, 0, 0, 0]])
def test_test_conditional_independence(self):
data = pd.DataFrame(np.random.randint(0, 2, size=(1000, 4)), columns=list('ABCD'))
data['E'] = data['A'] + data['B'] + data['C']
est = BaseEstimator(data)
self.assertGreater(est.test_conditional_independence('A', 'C')[1], 0.01) # independent
self.assertGreater(est.test_conditional_independence('A', 'B', 'D')[1], 0.01) # independent
self.assertLess(est.test_conditional_independence('A', 'B', ['D', 'E'])[1], 0.01) # dependent
def test_test_conditional_independence_titanic(self):
est = BaseEstimator(self.titanic_data)
np.testing.assert_almost_equal(est.test_conditional_independence('Embarked', 'Sex'),
(13.355630515001746, 0.020264556044311655, True))
np.testing.assert_almost_equal(est.test_conditional_independence('Pclass', 'Survived', ['Embarked']),
(96.403283942888635, 4.1082315854166553e-13, True))
np.testing.assert_almost_equal(est.test_conditional_independence('Embarked', 'Survived', ["Sex", "Pclass"]),
(21.537481934494085, 0.96380273702382602, True))
# insufficient data test commented out, because generates warning
# self.assertEqual(est.test_conditional_independence('Sex', 'Survived', ["Age", "Embarked"]),
# (235.51133052530713, 0.99999999683394869, False))
def tearDown(self):
del self.d1
| mit |
mehdidc/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
ebressert/ScipyNumpy_book_examples | python_examples/scikits_411_ex1.py | 2 | 1604 | import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage as ndimage
import skimage.filter as skif
# Generating data points with a non-uniform background
x = np.random.uniform(low=0, high=100, size=20).astype(int)
y = np.random.uniform(low=0, high=100, size=20).astype(int)
# Creating image with non-uniform background
func = lambda x, y: x ** 2 + y ** 2
grid_x, grid_y = np.mgrid[-1:1:100j, -2:2:100j]
bkg = func(grid_x, grid_y)
bkg = bkg / np.max(bkg)
# Creating points
clean = np.zeros((100, 100))
clean[(x, y)] += 5
clean = ndimage.gaussian_filter(clean, 3)
clean = clean / np.max(clean)
# Combining both the non-uniform background
# and points
fimg = bkg + clean
fimg = fimg / np.max(fimg)
# Defining minimum neighboring size of objects
block_size = 3
# Adaptive threshold function which returns image
# map of structures that are different relative to
# background
adaptive_cut = skif.threshold_adaptive(fimg, block_size, offset=0)
# Global threshold
global_thresh = skif.threshold_otsu(fimg)
global_cut = fimg > global_thresh
# Creating figure to highlight difference between
# adaptive and global threshold methods
fig = plt.figure(figsize=(8, 4))
fig.subplots_adjust(hspace=0.05, wspace=0.05)
ax1 = fig.add_subplot(131)
ax1.imshow(fimg)
ax1.xaxis.set_visible(False)
ax1.yaxis.set_visible(False)
ax2 = fig.add_subplot(132)
ax2.imshow(global_cut)
ax2.xaxis.set_visible(False)
ax2.yaxis.set_visible(False)
ax3 = fig.add_subplot(133)
ax3.imshow(adaptive_cut)
ax3.xaxis.set_visible(False)
ax3.yaxis.set_visible(False)
fig.savefig('scikits_411_ex1.pdf', bbox_inches='tight')
| mit |
ldirer/scikit-learn | sklearn/feature_selection/tests/test_base.py | 98 | 3681 | import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
from sklearn.utils.testing import assert_raises, assert_equal
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
rothnic/bokeh | bokeh/charts/builder/tests/test_timeseries_builder.py | 33 | 2825 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import datetime
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import TimeSeries
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestTimeSeries(unittest.TestCase):
def test_supported_input(self):
now = datetime.datetime.now()
delta = datetime.timedelta(minutes=1)
dts = [now + delta*i for i in range(5)]
xyvalues = OrderedDict({'Date': dts})
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
xyvaluesdf = pd.DataFrame(xyvalues)
groups = ['python', 'pypy', 'jython']
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
ts = create_chart(TimeSeries, _xy, index='Date')
builder = ts._builders[0]
self.assertEqual(builder._groups, groups)
assert_array_equal(builder._data['x_python'], dts)
assert_array_equal(builder._data['x_pypy'], dts)
assert_array_equal(builder._data['x_jython'], dts)
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_pypy'], y_pypy)
assert_array_equal(builder._data['y_jython'], y_jython)
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(TimeSeries, _xy, index=dts)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['x_0'], dts)
assert_array_equal(builder._data['x_1'], dts)
assert_array_equal(builder._data['x_2'], dts)
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
| bsd-3-clause |
jottenlips/aima-python | submissions/Miles/myNN.py | 13 | 3799 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
'''
My bayes into Neural Networks
'''
from submissions.Miles import food
class DataFrame:
data = [] # grid of floating point numbers
feature_names = [] # column names
target = [] # list of the target value
target_names = [] # target labels
foodFF = DataFrame() # what I pass on to examples
foodFF.data = []
targetData = []
'''
Extract data from the CORGIS food.
'''
food = food.get_reports()
for info in food:
try:
# item = str(info["Category"])
item = float(info["Data"]["Fat"]["Saturated Fat"])
targetData.append(item)
fiber = float(info["Data"]["Fiber"])
carbohydrate = float(info["Data"]["Carboydrate"]) # they misspelled carbohydrates LOL
water = float(info["Data"]["Water"])
vitamin = float(info["Data"]["Vitamins"]["Vitamin C"])
foodFF.data.append([fiber, carbohydrate, water, vitamin])
except:
traceback.print_exc()
foodFF.feature_names = [
'Fiber',
'Carbohydrates',
'Water',
'Vitamin C',
]
'''
Build the target list,
one entry for each row in the input frame.
The Naive Bayesian network is a classifier,
i.e. it sorts data points into bins.
The best it can do to estimate a continuous variable
is to break the domain into segments, and predict
the segment into which the variable's value will fall.
In this example, I'm breaking Trump's % into two
arbitrary segments.
'''
foodFF.target = []
def foodTarget(percentage):
if percentage > 10:
return 1
return 0
for item2 in targetData:
# choose the target
target_t = foodTarget(item2)
foodFF.target.append(target_t)
# comparing the fat contents of a food to other contents of same food
foodFF.target_names = [
'Saturated Fat is <= 10%',
'Saturated Fat is > 10%',
# 'Butter',
# 'Milk',
# 'Cheese'
]
Examples = {
'Food': foodFF,
}
# start the new info for neural networks
'''
Make a custom classifier,
'''
mlpc = MLPClassifier(
hidden_layer_sizes = (100,),
activation = 'identity',
solver='lbfgs', # 'adam',
# alpha = 0.0001,
batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
# shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Try scaling the data.
'''
foodScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(foodFF.data)
foodScaled.data = scaleGrid(foodFF.data)
foodScaled.feature_names = foodFF.feature_names
foodScaled.target = foodFF.target
foodScaled.target_names = foodFF.target_names
Examples = {
'FoodDefault': {
'frame': foodFF,
},
'FoodSGD': {
'frame': foodFF,
'mlpc': mlpc
},
'FoodScaled': {
'frame': foodScaled,
},
} | mit |
scarrazza/smpdf | extrascripts/smallthing.py | 1 | 1171 | import shelve
from smpdflib.initialization import init_app
if __name__ == '__main__':
init_app()
import matplotlib.pyplot as plt
from smpdflib.core import make_observable, PDF, produce_results, results_table
from smpdflib.plots import plot_bindist
#Db folder should exist...
db = shelve.open('db/db')
to_plot = {make_observable('data/applgrid/CMSWCHARM-WpCb-eta4.root', order=1) : 4,
make_observable('data/applgrid/APPLgrid-LHCb-Z0-ee_arXiv1212.4260-eta34.root', order=1) : 8,
}
pdfs = [PDF('MC900_nnlo', label="legendtext"),
PDF('CMC100_nnlo', label="other"),
PDF('MCH_nnlo_100', label="another")
]
if __name__ == '__main__':
for obs, bin in to_plot.items():
results = produce_results(pdfs, [obs], db=db)
obs_table = results_table(results)
for (obs,b), fig in plot_bindist(obs_table, bin, base_pdf = pdfs[0]):
ax = fig.axes[0]
ax.set_xlabel("My X label")
ax.set_ylabel("MY Y label")
ax.set_title("My title")
path = "%s_bin_%s.pdf" % (obs, b+1)
fig.savefig(path)
plt.close(fig)
| gpl-2.0 |
nomadcube/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
hammerlab/pyensembl | test/test_memory_cache.py | 1 | 3401 | from __future__ import absolute_import
import tempfile
from pyensembl import MemoryCache
import pandas as pd
from nose.tools import raises
memory_cache = MemoryCache()
class Counter(object):
"""
Use this class to count how many times a function gets called by
cached_object and cached_dataframe.
"""
def __init__(self):
self.count = 0
def increment(self):
self.count += 1
return self.count
def increment_dataframe(self):
value = self.increment()
return pd.DataFrame({'x': [value]})
def test_cached_object_with_tempfile():
"""
test_cached_object_with_tempfile : A temporary file exists before
calling into compute_cache.cached_object but is empty, should be treated
as if result has never been computed before (rather than trying to load
the empty file).
"""
counter = Counter()
with tempfile.NamedTemporaryFile() as f:
# call repeatedly to test the hot and cold cache logic
result = memory_cache.cached_object(
f.name, compute_fn=counter.increment)
assert result == 1, "Expected result=1, got %s" % (result,)
assert counter.count == 1, \
"Expected compute_fn to be called once, got %s" % (counter.count,)
def test_cached_dataframe_with_tempfile():
"""
test_cached_dataframe_with_tempfile : A temporary file exists before
calling into compute_cache.cached_dataframe but is empty,
should be treated as if result has never been computed before
(rather than trying to load the empty file).
"""
counter = Counter()
with tempfile.NamedTemporaryFile(suffix='.csv') as f:
# call repeatedly to test hot and cold cache logic
for _ in range(2):
df = memory_cache.cached_dataframe(
f.name, compute_fn=counter.increment_dataframe)
# get counter value from inside of dataframe
result = df['x'].ix[0]
assert result == 1, \
"Expected result=1, got %s" % (result,)
assert counter.count == 1, \
"Expected compute_fn to be called once, got %s" % (
counter.count,)
def test_cached_dataframe_returns_correct_type():
def make_a_dataframe():
return pd.DataFrame({'x': [0, 1, 2]})
with tempfile.NamedTemporaryFile(suffix='.csv') as f:
# call repeatedly to test the cold and hot cache logic
for _ in range(2):
df = memory_cache.cached_dataframe(
f.name, compute_fn=make_a_dataframe)
assert isinstance(df, pd.DataFrame), \
"Expected DataFrame, got %s : %s" % (df, type(df))
def test_cached_object_with_list_returns_correct_type():
def make_a_list():
return [1, 2, 3]
with tempfile.NamedTemporaryFile() as f:
# call repeatedly to test the cold and hot cache logic
for _ in range(2):
df = memory_cache.cached_object(
f.name, compute_fn=make_a_list)
assert isinstance(df, list), \
"Expected list, got %s : %s" % (df, type(df))
@raises(Exception)
def test_dataframe_path_must_be_csv():
# compute_cache should raise an exception when filename doesn't
# end with .csv extension
memory_cache.cached_dataframe(
csv_path="tempfile_not_csv",
compute_fn=lambda _: pd.DataFrame({'x': []}))
| apache-2.0 |
bloyl/mne-python | tutorials/time-freq/50_ssvep.py | 10 | 27044 | """
.. _tut-ssvep:
==========================================================
Frequency-tagging: Basic analysis of an SSVEP/vSSR dataset
==========================================================
In this tutorial we compute the frequency spectrum and quantify signal-to-noise
ratio (SNR) at a target frequency in EEG data recorded during fast periodic
visual stimulation (FPVS) at 12 Hz and 15 Hz in different trials.
Extracting SNR at stimulation frequency is a simple way to quantify frequency
tagged responses in MEEG (a.k.a. steady state visually evoked potentials,
SSVEP, or visual steady-state responses, vSSR in the visual domain,
or auditory steady-state responses, ASSR in the auditory domain).
For a general introduction to the method see
`Norcia et al. (2015) <https://doi.org/10.1167/15.6.4>`_ for the visual domain,
and `Picton et al. (2003) <https://doi.org/10.3109/14992020309101316>`_ for
the auditory domain.
**Data and outline:**
We use a simple example dataset with frequency tagged visual stimulation:
N=2 participants observed checkerboard patterns inverting with a constant
frequency of either 12.0 Hz of 15.0 Hz.
32 channels wet EEG was recorded.
(see :ref:`ssvep-dataset` for more information).
We will visualize both the power-spectral density (PSD) and the SNR
spectrum of the epoched data,
extract SNR at stimulation frequency,
plot the topography of the response,
and statistically separate 12 Hz and 15 Hz responses in the different trials.
Since the evoked response is mainly generated in early visual areas of the
brain the statistical analysis will be carried out on an occipital
ROI.
.. contents:: Outline
:depth: 2
""" # noqa: E501
# Authors: Dominik Welke <[email protected]>
# Evgenii Kalenkovich <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
import numpy as np
from scipy.stats import ttest_rel
###############################################################################
# Data preprocessing
# ------------------
# Due to a generally high SNR in SSVEP/vSSR, typical preprocessing steps
# are considered optional. This doesn't mean, that a proper cleaning would not
# increase your signal quality!
#
# * Raw data have FCz reference, so we will apply common-average rereferencing.
#
# * We will apply a 0.1 highpass filter.
#
# * Lastly, we will cut the data in 20 s epochs corresponding to the trials.
#
#
# Load raw data
data_path = mne.datasets.ssvep.data_path()
bids_fname = data_path + '/sub-02/ses-01/eeg/sub-02_ses-01_task-ssvep_eeg.vhdr'
raw = mne.io.read_raw_brainvision(bids_fname, preload=True, verbose=False)
raw.info['line_freq'] = 50.
# Set montage
montage = mne.channels.make_standard_montage('easycap-M1')
raw.set_montage(montage, verbose=False)
# Set common average reference
raw.set_eeg_reference('average', projection=False, verbose=False)
# Apply bandpass filter
raw.filter(l_freq=0.1, h_freq=None, fir_design='firwin', verbose=False)
# Construct epochs
event_id = {
'12hz': 255,
'15hz': 155
}
events, _ = mne.events_from_annotations(raw, verbose=False)
raw.info["events"] = events
tmin, tmax = -1., 20. # in s
baseline = None
epochs = mne.Epochs(
raw, events=events,
event_id=[event_id['12hz'], event_id['15hz']], tmin=tmin,
tmax=tmax, baseline=baseline, verbose=False)
###############################################################################
# Frequency analysis
# ------------------
# Now we compute the frequency spectrum of the EEG data.
# You will already see the peaks at the stimulation frequencies and some of
# their harmonics, without any further processing.
#
# The 'classical' PSD plot will be compared to a plot of the SNR spectrum.
# SNR will be computed as a ratio of the power in a given frequency bin
# to the average power in its neighboring bins.
# This procedure has two advantages over using the raw PSD:
#
# * it normalizes the spectrum and accounts for 1/f power decay.
#
# * power modulations which are not very narrow band will disappear.
#
# Calculate power spectral density (PSD)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# The frequency spectrum will be computed using Fast Fourier transform (FFT).
# This seems to be common practice in the steady-state literature and is
# based on the exact knowledge of the stimulus and the assumed response -
# especially in terms of it's stability over time.
# For a discussion see e.g.
# `Bach & Meigen (1999) <https://doi.org/10.1023/A:1002648202420>`_
#
# We will exclude the first second of each trial from the analysis:
#
# * steady-state response often take a while to stabilize, and the
# transient phase in the beginning can distort the signal estimate.
#
# * this section of data is expected to be dominated by responses related to
# the stimulus onset, and we are not interested in this.
#
# In MNE we call plain FFT as a special case of Welch's method, with only a
# single Welch window spanning the entire trial and no specific windowing
# function (i.e. applying a boxcar window).
#
tmin = 1.
tmax = 20.
fmin = 1.
fmax = 90.
sfreq = epochs.info['sfreq']
psds, freqs = mne.time_frequency.psd_welch(
epochs,
n_fft=int(sfreq * (tmax - tmin)),
n_overlap=0, n_per_seg=None,
tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax,
window='boxcar',
verbose=False)
###############################################################################
# Calculate signal to noise ratio (SNR)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# SNR - as we define it here - is a measure of relative power:
# it's the ratio of power in a given frequency bin - the 'signal' -
# to a 'noise' baseline - the average power in the surrounding frequency bins.
# This approach was initially proposed by
# `Meigen & Bach (1999) <https://doi.org/10.1023/A:1002097208337>`_
#
# Hence, we need to set some parameters for this baseline - how many
# neighboring bins should be taken for this computation, and do we want to skip
# the direct neighbors (this can make sense if the stimulation frequency is not
# super constant, or frequency bands are very narrow).
#
# The function below does what we want.
#
def snr_spectrum(psd, noise_n_neighbor_freqs=1, noise_skip_neighbor_freqs=1):
"""Compute SNR spectrum from PSD spectrum using convolution.
Parameters
----------
psd : ndarray, shape ([n_trials, n_channels,] n_frequency_bins)
Data object containing PSD values. Works with arrays as produced by
MNE's PSD functions or channel/trial subsets.
noise_n_neighbor_freqs : int
Number of neighboring frequencies used to compute noise level.
increment by one to add one frequency bin ON BOTH SIDES
noise_skip_neighbor_freqs : int
set this >=1 if you want to exclude the immediately neighboring
frequency bins in noise level calculation
Returns
-------
snr : ndarray, shape ([n_trials, n_channels,] n_frequency_bins)
Array containing SNR for all epochs, channels, frequency bins.
NaN for frequencies on the edges, that do not have enough neighbors on
one side to calculate SNR.
"""
# Construct a kernel that calculates the mean of the neighboring
# frequencies
averaging_kernel = np.concatenate((
np.ones(noise_n_neighbor_freqs),
np.zeros(2 * noise_skip_neighbor_freqs + 1),
np.ones(noise_n_neighbor_freqs)))
averaging_kernel /= averaging_kernel.sum()
# Calculate the mean of the neighboring frequencies by convolving with the
# averaging kernel.
mean_noise = np.apply_along_axis(
lambda psd_: np.convolve(psd_, averaging_kernel, mode='valid'),
axis=-1, arr=psd
)
# The mean is not defined on the edges so we will pad it with nas. The
# padding needs to be done for the last dimension only so we set it to
# (0, 0) for the other ones.
edge_width = noise_n_neighbor_freqs + noise_skip_neighbor_freqs
pad_width = [(0, 0)] * (mean_noise.ndim - 1) + [(edge_width, edge_width)]
mean_noise = np.pad(
mean_noise, pad_width=pad_width, constant_values=np.nan
)
return psd / mean_noise
###############################################################################
# Now we call the function to compute our SNR spectrum.
#
# As described above, we have to define two parameters.
#
# * how many noise bins do we want?
#
# * do we want to skip the n bins directly next to the target bin?
#
#
# Tweaking these parameters *can* drastically impact the resulting spectrum,
# but mainly if you choose extremes.
# E.g. if you'd skip very many neighboring bins, broad band power modulations
# (such as the alpha peak) should reappear in the SNR spectrum.
# On the other hand, if you skip none you might miss or smear peaks if the
# induced power is distributed over two or more frequency bins (e.g. if the
# stimulation frequency isn't perfectly constant, or you have very narrow
# bins).
#
# Here, we want to compare power at each bin with average power of the
# **three neighboring bins** (on each side) and **skip one bin** directly next
# to it.
#
snrs = snr_spectrum(psds, noise_n_neighbor_freqs=3,
noise_skip_neighbor_freqs=1)
##############################################################################
# Plot PSD and SNR spectra
# ^^^^^^^^^^^^^^^^^^^^^^^^
# Now we will plot grand average PSD (in blue) and SNR (in red) ± sd
# for every frequency bin.
# PSD is plotted on a log scale.
#
fig, axes = plt.subplots(2, 1, sharex='all', sharey='none', figsize=(8, 5))
freq_range = range(np.where(np.floor(freqs) == 1.)[0][0],
np.where(np.ceil(freqs) == fmax - 1)[0][0])
psds_plot = 10 * np.log10(psds)
psds_mean = psds_plot.mean(axis=(0, 1))[freq_range]
psds_std = psds_plot.std(axis=(0, 1))[freq_range]
axes[0].plot(freqs[freq_range], psds_mean, color='b')
axes[0].fill_between(
freqs[freq_range], psds_mean - psds_std, psds_mean + psds_std,
color='b', alpha=.2)
axes[0].set(title="PSD spectrum", ylabel='Power Spectral Density [dB]')
# SNR spectrum
snr_mean = snrs.mean(axis=(0, 1))[freq_range]
snr_std = snrs.std(axis=(0, 1))[freq_range]
axes[1].plot(freqs[freq_range], snr_mean, color='r')
axes[1].fill_between(
freqs[freq_range], snr_mean - snr_std, snr_mean + snr_std,
color='r', alpha=.2)
axes[1].set(
title="SNR spectrum", xlabel='Frequency [Hz]',
ylabel='SNR', ylim=[-2, 30], xlim=[fmin, fmax])
fig.show()
###############################################################################
# You can see that the peaks at the stimulation frequencies (12 Hz, 15 Hz)
# and their harmonics are visible in both plots (just as the line noise at
# 50 Hz).
# Yet, the SNR spectrum shows them more prominently as peaks from a
# noisy but more or less constant baseline of SNR = 1.
# You can further see that the SNR processing removes any broad-band power
# differences (such as the increased power in alpha band around 10 Hz),
# and also removes the 1/f decay in the PSD.
#
# Note, that while the SNR plot implies the possibility of values below 0
# (mean minus sd) such values do not make sense.
# Each SNR value is a ratio of positive PSD values, and the lowest possible PSD
# value is 0 (negative Y-axis values in the upper panel only result from
# plotting PSD on a log scale).
# Hence SNR values must be positive and can minimally go towards 0.
#
# Extract SNR values at the stimulation frequency
# -----------------------------------------------
#
# Our processing yielded a large array of many SNR values for each trial x
# channel x frequency-bin of the PSD array.
#
# For statistical analysis we obviously need to define specific subsets of this
# array. First of all, we are only interested in SNR at the stimulation
# frequency, but we also want to restrict the analysis to a spatial ROI.
# Lastly, answering your interesting research questions will probably rely on
# comparing SNR in different trials.
#
# Therefore we will have to find the indices of trials, channels, etc.
# Alternatively, one could subselect the trials already at the epoching step,
# using MNE's event information, and process different epoch structures
# separately.
#
# Let's only have a look at the trials with 12 Hz stimulation, for now.
#
# define stimulation frequency
stim_freq = 12.
###############################################################################
# Get index for the stimulation frequency (12 Hz)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Ideally, there would be a bin with the stimulation frequency exactly in its
# center. However, depending on your Spectral decomposition this is not
# always the case. We will find the bin closest to it - this one should contain
# our frequency tagged response.
#
# find index of frequency bin closest to stimulation frequency
i_bin_12hz = np.argmin(abs(freqs - stim_freq))
# could be updated to support multiple frequencies
# for later, we will already find the 15 Hz bin and the 1st and 2nd harmonic
# for both.
i_bin_24hz = np.argmin(abs(freqs - 24))
i_bin_36hz = np.argmin(abs(freqs - 36))
i_bin_15hz = np.argmin(abs(freqs - 15))
i_bin_30hz = np.argmin(abs(freqs - 30))
i_bin_45hz = np.argmin(abs(freqs - 45))
###############################################################################
# Get indices for the different trial types
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
i_trial_12hz = np.where(epochs.events[:, 2] == event_id['12hz'])[0]
i_trial_15hz = np.where(epochs.events[:, 2] == event_id['15hz'])[0]
###############################################################################
# Get indices of EEG channels forming the ROI
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Define different ROIs
roi_vis = ['POz', 'Oz', 'O1', 'O2', 'PO3', 'PO4', 'PO7',
'PO8', 'PO9', 'PO10', 'O9', 'O10'] # visual roi
# Find corresponding indices using mne.pick_types()
picks_roi_vis = mne.pick_types(epochs.info, eeg=True, stim=False,
exclude='bads', selection=roi_vis)
###############################################################################
# Apply the subset, and check the result
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Now we simply need to apply our selection and yield a result. Therefore,
# we typically report grand average SNR over the subselection.
#
# In this tutorial we don't verify the presence of a neural response.
# This is commonly done in the ASSR literature where SNR is
# often lower. An F-test or Hotelling T² would be
# appropriate for this purpose.
snrs_target = snrs[i_trial_12hz, :, i_bin_12hz][:, picks_roi_vis]
print("sub 2, 12 Hz trials, SNR at 12 Hz")
print(f'average SNR (occipital ROI): {snrs_target.mean()}')
##############################################################################
# Topography of the vSSR
# ----------------------
# But wait...
# As described in the intro, we have decided *a priori* to work with average
# SNR over a subset of occipital channels - a visual region of interest (ROI)
# - because we expect SNR to be higher on these channels than in other
# channels.
#
# Let's check out, whether this was a good decision!
#
# Here we will plot average SNR for each channel location as a topoplot.
# Then we will do a simple paired T-test to check, whether average SNRs over
# the two sets of channels are significantly different.
#
# get average SNR at 12 Hz for ALL channels
snrs_12hz = snrs[i_trial_12hz, :, i_bin_12hz]
snrs_12hz_chaverage = snrs_12hz.mean(axis=0)
# plot SNR topography
fig, ax = plt.subplots(1)
mne.viz.plot_topomap(snrs_12hz_chaverage, epochs.info, vmin=1., axes=ax)
print("sub 2, 12 Hz trials, SNR at 12 Hz")
print("average SNR (all channels): %f" % snrs_12hz_chaverage.mean())
print("average SNR (occipital ROI): %f" % snrs_target.mean())
tstat_roi_vs_scalp = \
ttest_rel(snrs_target.mean(axis=1), snrs_12hz.mean(axis=1))
print("12 Hz SNR in occipital ROI is significantly larger than 12 Hz SNR over "
"all channels: t = %.3f, p = %f" % tstat_roi_vs_scalp)
##############################################################################
# We can see, that 1) this participant indeed exhibits a cluster of channels
# with high SNR in the occipital region and 2) that the average SNR over all
# channels is smaller than the average of the visual ROI computed above.
# The difference is statistically significant. Great!
#
# Such a topography plot can be a nice tool to explore and play with your data
# - e.g. you could try how changing the reference will affect the spatial
# distribution of SNR values.
#
# However, we also wanted to show this plot to point at a potential
# problem with frequency-tagged (or any other brain imaging) data:
# there are many channels and somewhere you will likely find some
# statistically significant effect.
# It is very easy - even unintended - to end up double-dipping or p-hacking.
# So if you want to work with an ROI or individual channels, ideally select
# them *a priori* - before collecting or looking at the data - and preregister
# this decision so people will believe you.
# If you end up selecting an ROI or individual channel for reporting *because
# this channel or ROI shows an effect*, e.g. in an explorative analysis, this
# is also fine but make it transparently and correct for multiple comparison.
#
# Statistical separation of 12 Hz and 15 Hz vSSR
# ----------------------------------------------
# After this little detour into open science, let's move on and
# do the analyses we actually wanted to do:
#
# We will show that we can easily detect and discriminate the brains responses
# in the trials with different stimulation frequencies.
#
# In the frequency and SNR spectrum plot above, we had all trials mixed up.
# Now we will extract 12 and 15 Hz SNR in both types of trials individually,
# and compare the values with a simple t-test.
# We will also extract SNR of the 1st and 2nd harmonic for both stimulation
# frequencies. These are often reported as well and can show interesting
# interactions.
#
snrs_roi = snrs[:, picks_roi_vis, :].mean(axis=1)
freq_plot = [12, 15, 24, 30, 36, 45]
color_plot = [
'darkblue', 'darkgreen', 'mediumblue', 'green', 'blue', 'seagreen'
]
xpos_plot = [-5. / 12, -3. / 12, -1. / 12, 1. / 12, 3. / 12, 5. / 12]
fig, ax = plt.subplots()
labels = ['12 Hz trials', '15 Hz trials']
x = np.arange(len(labels)) # the label locations
width = 0.6 # the width of the bars
res = dict()
# loop to plot SNRs at stimulation frequencies and harmonics
for i, f in enumerate(freq_plot):
# extract snrs
stim_12hz_tmp = \
snrs_roi[i_trial_12hz, np.argmin(abs(freqs - f))]
stim_15hz_tmp = \
snrs_roi[i_trial_15hz, np.argmin(abs(freqs - f))]
SNR_tmp = [stim_12hz_tmp.mean(), stim_15hz_tmp.mean()]
# plot (with std)
ax.bar(
x + width * xpos_plot[i], SNR_tmp, width / len(freq_plot),
yerr=np.std(SNR_tmp),
label='%i Hz SNR' % f, color=color_plot[i])
# store results for statistical comparison
res['stim_12hz_snrs_%ihz' % f] = stim_12hz_tmp
res['stim_15hz_snrs_%ihz' % f] = stim_15hz_tmp
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('SNR')
ax.set_title('Average SNR at target frequencies')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend(['%i Hz' % f for f in freq_plot], title='SNR at:')
ax.set_ylim([0, 70])
ax.axhline(1, ls='--', c='r')
fig.show()
###############################################################################
# As you can easily see there are striking differences between the trials.
# Let's verify this using a series of two-tailed paired T-Tests.
#
# Compare 12 Hz and 15 Hz SNR in trials after averaging over channels
tstat_12hz_trial_stim = \
ttest_rel(res['stim_12hz_snrs_12hz'], res['stim_12hz_snrs_15hz'])
print("12 Hz Trials: 12 Hz SNR is significantly higher than 15 Hz SNR"
": t = %.3f, p = %f" % tstat_12hz_trial_stim)
tstat_12hz_trial_1st_harmonic = \
ttest_rel(res['stim_12hz_snrs_24hz'], res['stim_12hz_snrs_30hz'])
print("12 Hz Trials: 24 Hz SNR is significantly higher than 30 Hz SNR"
": t = %.3f, p = %f" % tstat_12hz_trial_1st_harmonic)
tstat_12hz_trial_2nd_harmonic = \
ttest_rel(res['stim_12hz_snrs_36hz'], res['stim_12hz_snrs_45hz'])
print("12 Hz Trials: 36 Hz SNR is significantly higher than 45 Hz SNR"
": t = %.3f, p = %f" % tstat_12hz_trial_2nd_harmonic)
print()
tstat_15hz_trial_stim = \
ttest_rel(res['stim_15hz_snrs_12hz'], res['stim_15hz_snrs_15hz'])
print("15 Hz trials: 12 Hz SNR is significantly lower than 15 Hz SNR"
": t = %.3f, p = %f" % tstat_15hz_trial_stim)
tstat_15hz_trial_1st_harmonic = \
ttest_rel(res['stim_15hz_snrs_24hz'], res['stim_15hz_snrs_30hz'])
print("15 Hz trials: 24 Hz SNR is significantly lower than 30 Hz SNR"
": t = %.3f, p = %f" % tstat_15hz_trial_1st_harmonic)
tstat_15hz_trial_2nd_harmonic = \
ttest_rel(res['stim_15hz_snrs_36hz'], res['stim_15hz_snrs_45hz'])
print("15 Hz trials: 36 Hz SNR is significantly lower than 45 Hz SNR"
": t = %.3f, p = %f" % tstat_15hz_trial_2nd_harmonic)
##############################################################################
# Debriefing
# ----------
# So that's it, we hope you enjoyed our little tour through this example
# dataset.
#
# As you could see, frequency-tagging is a very powerful tool that can yield
# very high signal to noise ratios and effect sizes that enable you to detect
# brain responses even within a single participant and single trials of only
# a few seconds duration.
#
# Bonus exercises
# ---------------
# For the overly motivated amongst you, let's see what else we can show with
# these data.
#
# Using the PSD function as implemented in MNE makes it very easy to change
# the amount of data that is actually used in the spectrum
# estimation.
#
# Here we employ this to show you some features of frequency
# tagging data that you might or might not have already intuitively expected:
#
# Effect of trial duration on SNR
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# First we will simulate shorter trials by taking only the first x s of our 20s
# trials (2, 4, 6, 8, ..., 20 s), and compute the SNR using a FFT window
# that covers the entire epoch:
#
stim_bandwidth = .5
# shorten data and welch window
window_lengths = [i for i in range(2, 21, 2)]
window_snrs = [[]] * len(window_lengths)
for i_win, win in enumerate(window_lengths):
# compute spectrogram
windowed_psd, windowed_freqs = mne.time_frequency.psd_welch(
epochs[str(event_id['12hz'])],
n_fft=int(sfreq * win),
n_overlap=0, n_per_seg=None,
tmin=0, tmax=win,
window='boxcar',
fmin=fmin, fmax=fmax, verbose=False)
# define a bandwidth of 1 Hz around stimfreq for SNR computation
bin_width = windowed_freqs[1] - windowed_freqs[0]
skip_neighbor_freqs = \
round((stim_bandwidth / 2) / bin_width - bin_width / 2. - .5) if (
bin_width < stim_bandwidth) else 0
n_neighbor_freqs = \
int((sum((windowed_freqs <= 13) & (windowed_freqs >= 11)
) - 1 - 2 * skip_neighbor_freqs) / 2)
# compute snr
windowed_snrs = \
snr_spectrum(
windowed_psd,
noise_n_neighbor_freqs=n_neighbor_freqs if (
n_neighbor_freqs > 0
) else 1,
noise_skip_neighbor_freqs=skip_neighbor_freqs)
window_snrs[i_win] = \
windowed_snrs[
:, picks_roi_vis,
np.argmin(
abs(windowed_freqs - 12.))].mean(axis=1)
fig, ax = plt.subplots(1)
ax.boxplot(window_snrs, labels=window_lengths, vert=True)
ax.set(title='Effect of trial duration on 12 Hz SNR',
ylabel='Average SNR', xlabel='Trial duration [s]')
ax.axhline(1, ls='--', c='r')
fig.show()
##############################################################################
# You can see that the signal estimate / our SNR measure increases with the
# trial duration.
#
# This should be easy to understand: in longer recordings there is simply
# more signal (one second of additional stimulation adds, in our case, 12
# cycles of signal) while the noise is (hopefully) stochastic and not locked
# to the stimulation frequency.
# In other words: with more data the signal term grows faster than the noise
# term.
#
# We can further see that the very short trials with FFT windows < 2-3s are not
# great - here we've either hit the noise floor and/or the transient response
# at the trial onset covers too much of the trial.
#
# Again, this tutorial doesn't statistically test for the presence of a neural
# response, but an F-test or Hotelling T² would be appropriate for this
# purpose.
#
# Time resolved SNR
# ^^^^^^^^^^^^^^^^^
# ..and finally we can trick MNE's PSD implementation to make it a
# sliding window analysis and come up with a time resolved SNR measure.
# This will reveal whether a participant blinked or scratched their head..
#
# Each of the ten trials is coded with a different color in the plot below.
#
# 3s sliding window
window_length = 4
window_starts = [i for i in range(20 - window_length)]
window_snrs = [[]] * len(window_starts)
for i_win, win in enumerate(window_starts):
# compute spectrogram
windowed_psd, windowed_freqs = mne.time_frequency.psd_welch(
epochs[str(event_id['12hz'])],
n_fft=int(sfreq * window_length) - 1,
n_overlap=0, n_per_seg=None,
window='boxcar',
tmin=win, tmax=win + window_length,
fmin=fmin, fmax=fmax,
verbose=False)
# define a bandwidth of 1 Hz around stimfreq for SNR computation
bin_width = windowed_freqs[1] - windowed_freqs[0]
skip_neighbor_freqs = \
round((stim_bandwidth / 2) / bin_width - bin_width / 2. - .5) if (
bin_width < stim_bandwidth) else 0
n_neighbor_freqs = \
int((sum((windowed_freqs <= 13) & (windowed_freqs >= 11)
) - 1 - 2 * skip_neighbor_freqs) / 2)
# compute snr
windowed_snrs = snr_spectrum(
windowed_psd,
noise_n_neighbor_freqs=n_neighbor_freqs if (
n_neighbor_freqs > 0) else 1,
noise_skip_neighbor_freqs=skip_neighbor_freqs)
window_snrs[i_win] = \
windowed_snrs[:, picks_roi_vis, np.argmin(
abs(windowed_freqs - 12.))].mean(axis=1)
fig, ax = plt.subplots(1)
colors = plt.get_cmap('Greys')(np.linspace(0, 1, 10))
for i in range(10):
ax.plot(window_starts, np.array(window_snrs)[:, i], color=colors[i])
ax.set(title='Time resolved 12 Hz SNR - %is sliding window' % window_length,
ylabel='Average SNR', xlabel='t0 of analysis window [s]')
ax.axhline(1, ls='--', c='r')
ax.legend(['individual trials in greyscale'])
fig.show()
##############################################################################
# Well.. turns out this was a bit too optimistic ;)
#
# But seriously: this was a nice idea, but we've reached the limit of
# what's possible with this single-subject example dataset.
# However, there might be data, applications, or research questions
# where such an analysis makes sense.
#
| bsd-3-clause |
tum-ens/urbs | doc/conf.py | 2 | 2044 | # -*- coding: utf-8 -*-
import sys, os
sys.path.append(os.path.abspath('..'))
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.autodoc',
]
#templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'urbs'
copyright = u'2014-2019, tum-ens'
version = '1.0.0'
release = '1.0.0'
exclude_patterns = ['_build']
#pygments_style = 'sphinx'
# HTML output
htmlhelp_basename = 'urbsdoc'
# LaTeX output
LATEX_PREAMBLE = """
\setcounter{tocdepth}{2}
"""
latex_elements = {
'papersize': 'a4paper',
'pointsize': '11pt',
'preamble': LATEX_PREAMBLE
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'urbs.tex', u'urbs Documentation',
u'tum-ens', 'manual'),
]
# Manual page output
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'urbs', u'urbs Documentation',
[u'tum-ens'], 1)
]
# Texinfo output
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'urbs', u'urbs Documentation',
u'tum-ens', 'urbs', 'A linear optimisation model for distributed energy systems',
'Miscellaneous'),
]
# Epub output
# Bibliographic Dublin Core info.
epub_title = u'urbs'
epub_author = u'tum-ens'
epub_publisher = u'tum-ens'
epub_copyright = u'2014-2019, tum-ens'
epub_exclude_files = ['search.html']
# Intersphinx
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'http://docs.python.org/': None,
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'matplotlib': ('http://matplotlib.org/', None)}
| gpl-3.0 |
xuewei4d/scikit-learn | sklearn/utils/_mocking.py | 11 | 10359 | import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from .validation import _num_samples, check_array, check_is_fitted
class ArraySlicingWrapper:
"""
Parameters
----------
array
"""
def __init__(self, array):
self.array = array
def __getitem__(self, aslice):
return MockDataFrame(self.array[aslice])
class MockDataFrame:
"""
Parameters
----------
array
"""
# have shape and length but don't support indexing.
def __init__(self, array):
self.array = array
self.values = array
self.shape = array.shape
self.ndim = array.ndim
# ugly hack to make iloc work.
self.iloc = ArraySlicingWrapper(array)
def __len__(self):
return len(self.array)
def __array__(self, dtype=None):
# Pandas data frames also are array-like: we want to make sure that
# input validation in cross-validation does not try to call that
# method.
return self.array
def __eq__(self, other):
return MockDataFrame(self.array == other.array)
def __ne__(self, other):
return not self == other
class CheckingClassifier(ClassifierMixin, BaseEstimator):
"""Dummy classifier to test pipelining and meta-estimators.
Checks some property of `X` and `y`in fit / predict.
This allows testing whether pipelines / cross-validation or metaestimators
changed the input.
Can also be used to check if `fit_params` are passed correctly, and
to force a certain score to be returned.
Parameters
----------
check_y, check_X : callable, default=None
The callable used to validate `X` and `y`. These callable should return
a bool where `False` will trigger an `AssertionError`.
check_y_params, check_X_params : dict, default=None
The optional parameters to pass to `check_X` and `check_y`.
methods_to_check : "all" or list of str, default="all"
The methods in which the checks should be applied. By default,
all checks will be done on all methods (`fit`, `predict`,
`predict_proba`, `decision_function` and `score`).
foo_param : int, default=0
A `foo` param. When `foo > 1`, the output of :meth:`score` will be 1
otherwise it is 0.
expected_fit_params : list of str, default=None
A list of the expected parameters given when calling `fit`.
Attributes
----------
classes_ : int
The classes seen during `fit`.
n_features_in_ : int
The number of features seen during `fit`.
Examples
--------
>>> from sklearn.utils._mocking import CheckingClassifier
This helper allow to assert to specificities regarding `X` or `y`. In this
case we expect `check_X` or `check_y` to return a boolean.
>>> from sklearn.datasets import load_iris
>>> X, y = load_iris(return_X_y=True)
>>> clf = CheckingClassifier(check_X=lambda x: x.shape == (150, 4))
>>> clf.fit(X, y)
CheckingClassifier(...)
We can also provide a check which might raise an error. In this case, we
expect `check_X` to return `X` and `check_y` to return `y`.
>>> from sklearn.utils import check_array
>>> clf = CheckingClassifier(check_X=check_array)
>>> clf.fit(X, y)
CheckingClassifier(...)
"""
def __init__(self, *, check_y=None, check_y_params=None,
check_X=None, check_X_params=None, methods_to_check="all",
foo_param=0, expected_fit_params=None):
self.check_y = check_y
self.check_y_params = check_y_params
self.check_X = check_X
self.check_X_params = check_X_params
self.methods_to_check = methods_to_check
self.foo_param = foo_param
self.expected_fit_params = expected_fit_params
def _check_X_y(self, X, y=None, should_be_fitted=True):
"""Validate X and y and make extra check.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data set.
y : array-like of shape (n_samples), default=None
The corresponding target, by default None.
should_be_fitted : bool, default=True
Whether or not the classifier should be already fitted.
By default True.
Returns
-------
X, y
"""
if should_be_fitted:
check_is_fitted(self)
if self.check_X is not None:
params = {} if self.check_X_params is None else self.check_X_params
checked_X = self.check_X(X, **params)
if isinstance(checked_X, (bool, np.bool_)):
assert checked_X
else:
X = checked_X
if y is not None and self.check_y is not None:
params = {} if self.check_y_params is None else self.check_y_params
checked_y = self.check_y(y, **params)
if isinstance(checked_y, (bool, np.bool_)):
assert checked_y
else:
y = checked_y
return X, y
def fit(self, X, y, **fit_params):
"""Fit classifier.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples, n_outputs) or (n_samples,), \
default=None
Target relative to X for classification or regression;
None for unsupervised learning.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
Returns
-------
self
"""
assert _num_samples(X) == _num_samples(y)
if self.methods_to_check == "all" or "fit" in self.methods_to_check:
X, y = self._check_X_y(X, y, should_be_fitted=False)
self.n_features_in_ = np.shape(X)[1]
self.classes_ = np.unique(
check_array(y, ensure_2d=False, allow_nd=True)
)
if self.expected_fit_params:
missing = set(self.expected_fit_params) - set(fit_params)
if missing:
raise AssertionError(
f'Expected fit parameter(s) {list(missing)} not seen.'
)
for key, value in fit_params.items():
if _num_samples(value) != _num_samples(X):
raise AssertionError(
f'Fit parameter {key} has length {_num_samples(value)}'
f'; expected {_num_samples(X)}.'
)
return self
def predict(self, X):
"""Predict the first class seen in `classes_`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data.
Returns
-------
preds : ndarray of shape (n_samples,)
Predictions of the first class seens in `classes_`.
"""
if (self.methods_to_check == "all" or
"predict" in self.methods_to_check):
X, y = self._check_X_y(X)
return self.classes_[np.zeros(_num_samples(X), dtype=int)]
def predict_proba(self, X):
"""Predict probabilities for each class.
Here, the dummy classifier will provide a probability of 1 for the
first class of `classes_` and 0 otherwise.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data.
Returns
-------
proba : ndarray of shape (n_samples, n_classes)
The probabilities for each sample and class.
"""
if (self.methods_to_check == "all" or
"predict_proba" in self.methods_to_check):
X, y = self._check_X_y(X)
proba = np.zeros((_num_samples(X), len(self.classes_)))
proba[:, 0] = 1
return proba
def decision_function(self, X):
"""Confidence score.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data.
Returns
-------
decision : ndarray of shape (n_samples,) if n_classes == 2\
else (n_samples, n_classes)
Confidence score.
"""
if (self.methods_to_check == "all" or
"decision_function" in self.methods_to_check):
X, y = self._check_X_y(X)
if len(self.classes_) == 2:
# for binary classifier, the confidence score is related to
# classes_[1] and therefore should be null.
return np.zeros(_num_samples(X))
else:
decision = np.zeros((_num_samples(X), len(self.classes_)))
decision[:, 0] = 1
return decision
def score(self, X=None, Y=None):
"""Fake score.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data, where n_samples is the number of samples and
n_features is the number of features.
Y : array-like of shape (n_samples, n_output) or (n_samples,)
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Either 0 or 1 depending of `foo_param` (i.e. `foo_param > 1 =>
score=1` otherwise `score=0`).
"""
if self.methods_to_check == "all" or "score" in self.methods_to_check:
self._check_X_y(X, Y)
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def _more_tags(self):
return {'_skip_test': True, 'X_types': ['1dlabel']}
class NoSampleWeightWrapper(BaseEstimator):
"""Wrap estimator which will not expose `sample_weight`.
Parameters
----------
est : estimator, default=None
The estimator to wrap.
"""
def __init__(self, est=None):
self.est = est
def fit(self, X, y):
return self.est.fit(X, y)
def predict(self, X):
return self.est.predict(X)
def predict_proba(self, X):
return self.est.predict_proba(X)
def _more_tags(self):
return {'_skip_test': True}
| bsd-3-clause |
CharLLCH/Word2Vec | pycode/Word2Vec_BagOfCentroids.py | 1 | 5355 | #!/usr/bin/env python
# Author: Angela Chapman
# Date: 8/6/2014
#
# This file contains code to accompany the Kaggle tutorial
# "Deep learning goes to the movies". The code in this file
# is for Part 2 of the tutorial and covers Bag of Centroids
# for a Word2Vec model. This code assumes that you have already
# run Word2Vec and saved a model called "300features_40minwords_10context"
#
# *************************************** #
# Load a pre-trained model
from gensim.models import Word2Vec
from sklearn.cluster import KMeans
import time
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from bs4 import BeautifulSoup
import re
from nltk.corpus import stopwords
import numpy as np
import os
from KaggleWord2VecUtility import KaggleWord2VecUtility
# Define a function to create bags of centroids
#
def create_bag_of_centroids( wordlist, word_centroid_map ):
#
# The number of clusters is equal to the highest cluster index
# in the word / centroid map
num_centroids = max( word_centroid_map.values() ) + 1
#
# Pre-allocate the bag of centroids vector (for speed)
bag_of_centroids = np.zeros( num_centroids, dtype="float32" )
#
# Loop over the words in the review. If the word is in the vocabulary,
# find which cluster it belongs to, and increment that cluster count
# by one
for word in wordlist:
if word in word_centroid_map:
index = word_centroid_map[word]
bag_of_centroids[index] += 1
#
# Return the "bag of centroids"
return bag_of_centroids
if __name__ == '__main__':
model = Word2Vec.load("../result/300features_40minwords_10context")
# ****** Run k-means on the word vectors and print a few clusters
#
start = time.time() # Start time
# Set "k" (num_clusters) to be 1/5th of the vocabulary size, or an
# average of 5 words per cluster
word_vectors = model.syn0
num_clusters = word_vectors.shape[0] / 5
# Initalize a k-means object and use it to extract centroids
print "Running K means"
kmeans_clustering = KMeans( n_clusters = num_clusters )
idx = kmeans_clustering.fit_predict( word_vectors )
# Get the end time and print how long the process took
end = time.time()
elapsed = end - start
print "Time taken for K Means clustering: ", elapsed, "seconds."
# Create a Word / Index dictionary, mapping each vocabulary word to
# a cluster number
word_centroid_map = dict(zip( model.index2word, idx ))
# Print the first ten clusters
for cluster in xrange(0,10):
#
# Print the cluster number
print "\nCluster %d" % cluster
#
# Find all of the words for that cluster number, and print them out
words = []
for i in xrange(0,len(word_centroid_map.values())):
if( word_centroid_map.values()[i] == cluster ):
words.append(word_centroid_map.keys()[i])
print words
# Create clean_train_reviews and clean_test_reviews as we did before
#
# Read data from files
#train = pd.read_csv( os.path.join(os.path.dirname(__file__), 'data', 'labeledTrainData.tsv'), header=0, delimiter="\t", quoting=3 )
#test = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'testData.tsv'), header=0, delimiter="\t", quoting=3 )
train = pd.read_csv("/home/charch/gitwork/Word2Vec/data/labeledTrainData.tsv", header=0, delimiter="\t", quoting=3 )
test = pd.read_csv("/home/charch/gitwork/Word2Vec/data/testData.tsv", header=0, delimiter="\t", quoting=3)
print "Cleaning training reviews"
clean_train_reviews = []
for review in train["review"]:
clean_train_reviews.append( KaggleWord2VecUtility.review_to_wordlist( review, \
remove_stopwords=True ))
print "Cleaning test reviews"
clean_test_reviews = []
for review in test["review"]:
clean_test_reviews.append( KaggleWord2VecUtility.review_to_wordlist( review, \
remove_stopwords=True ))
# ****** Create bags of centroids
#
# Pre-allocate an array for the training set bags of centroids (for speed)
train_centroids = np.zeros( (train["review"].size, num_clusters), \
dtype="float32" )
# Transform the training set reviews into bags of centroids
counter = 0
for review in clean_train_reviews:
train_centroids[counter] = create_bag_of_centroids( review, \
word_centroid_map )
counter += 1
# Repeat for test reviews
test_centroids = np.zeros(( test["review"].size, num_clusters), \
dtype="float32" )
counter = 0
for review in clean_test_reviews:
test_centroids[counter] = create_bag_of_centroids( review, \
word_centroid_map )
counter += 1
# ****** Fit a random forest and extract predictions
#
forest = RandomForestClassifier(n_estimators = 100)
# Fitting the forest may take a few minutes
print "Fitting a random forest to labeled training data..."
forest = forest.fit(train_centroids,train["sentiment"])
result = forest.predict(test_centroids)
# Write the test results
output = pd.DataFrame(data={"id":test["id"], "sentiment":result})
output.to_csv("../result/BagOfCentroids.csv", index=False, quoting=3)
print "Wrote BagOfCentroids.csv"
| gpl-2.0 |
loli/semisupervisedforests | sklearn/tests/test_hmm.py | 31 | 28118 | from __future__ import print_function
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from unittest import TestCase
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn import hmm
from sklearn import mixture
from sklearn.utils.extmath import logsumexp
from sklearn.utils import check_random_state
from nose import SkipTest
rng = np.random.RandomState(0)
np.seterr(all='warn')
class TestBaseHMM(TestCase):
def setUp(self):
self.prng = np.random.RandomState(9)
class StubHMM(hmm._BaseHMM):
def _compute_log_likelihood(self, X):
return self.framelogprob
def _generate_sample_from_state(self):
pass
def _init(self):
pass
def setup_example_hmm(self):
# Example from http://en.wikipedia.org/wiki/Forward-backward_algorithm
h = self.StubHMM(2)
h.transmat_ = [[0.7, 0.3], [0.3, 0.7]]
h.startprob_ = [0.5, 0.5]
framelogprob = np.log([[0.9, 0.2],
[0.9, 0.2],
[0.1, 0.8],
[0.9, 0.2],
[0.9, 0.2]])
# Add dummy observations to stub.
h.framelogprob = framelogprob
return h, framelogprob
def test_init(self):
h, framelogprob = self.setup_example_hmm()
for params in [('transmat_',), ('startprob_', 'transmat_')]:
d = dict((x[:-1], getattr(h, x)) for x in params)
h2 = self.StubHMM(h.n_components, **d)
self.assertEqual(h.n_components, h2.n_components)
for p in params:
assert_array_almost_equal(getattr(h, p), getattr(h2, p))
def test_set_startprob(self):
h, framelogprob = self.setup_example_hmm()
startprob = np.array([0.0, 1.0])
h.startprob_ = startprob
assert np.allclose(startprob, h.startprob_)
def test_set_transmat(self):
h, framelogprob = self.setup_example_hmm()
transmat = np.array([[0.8, 0.2], [0.0, 1.0]])
h.transmat_ = transmat
assert np.allclose(transmat, h.transmat_)
def test_do_forward_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, fwdlattice = h._do_forward_pass(framelogprob)
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
reffwdlattice = np.array([[0.4500, 0.1000],
[0.3105, 0.0410],
[0.0230, 0.0975],
[0.0408, 0.0150],
[0.0298, 0.0046]])
assert_array_almost_equal(np.exp(fwdlattice), reffwdlattice, 4)
def test_do_backward_pass(self):
h, framelogprob = self.setup_example_hmm()
bwdlattice = h._do_backward_pass(framelogprob)
refbwdlattice = np.array([[0.0661, 0.0455],
[0.0906, 0.1503],
[0.4593, 0.2437],
[0.6900, 0.4100],
[1.0000, 1.0000]])
assert_array_almost_equal(np.exp(bwdlattice), refbwdlattice, 4)
def test_do_viterbi_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, state_sequence = h._do_viterbi_pass(framelogprob)
refstate_sequence = [0, 0, 1, 0, 0]
assert_array_equal(state_sequence, refstate_sequence)
reflogprob = -4.4590
self.assertAlmostEqual(logprob, reflogprob, places=4)
def test_score_samples(self):
h, framelogprob = self.setup_example_hmm()
nobs = len(framelogprob)
logprob, posteriors = h.score_samples([])
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
refposteriors = np.array([[0.8673, 0.1327],
[0.8204, 0.1796],
[0.3075, 0.6925],
[0.8204, 0.1796],
[0.8673, 0.1327]])
assert_array_almost_equal(posteriors, refposteriors, decimal=4)
def test_hmm_score_samples_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
logprob, hmmposteriors = h.score_samples([])
assert_array_almost_equal(hmmposteriors.sum(axis=1), np.ones(nobs))
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
assert_array_almost_equal(hmmposteriors, gmmposteriors)
def test_hmm_decode_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
viterbi_ll, state_sequence = h.decode([])
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
gmmstate_sequence = gmmposteriors.argmax(axis=1)
assert_array_equal(state_sequence, gmmstate_sequence)
def test_base_hmm_attributes(self):
n_components = 20
startprob = self.prng.rand(n_components)
startprob = startprob / startprob.sum()
transmat = self.prng.rand(n_components, n_components)
transmat /= np.tile(transmat.sum(axis=1)
[:, np.newaxis], (1, n_components))
h = self.StubHMM(n_components)
self.assertEqual(h.n_components, n_components)
h.startprob_ = startprob
assert_array_almost_equal(h.startprob_, startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((n_components - 2, 2)))
h.transmat_ = transmat
assert_array_almost_equal(h.transmat_, transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((n_components - 2, n_components)))
def train_hmm_and_keep_track_of_log_likelihood(hmm, obs, n_iter=1, **kwargs):
hmm.n_iter = 1
hmm.fit(obs)
loglikelihoods = []
for n in range(n_iter):
hmm.n_iter = 1
hmm.init_params = ''
hmm.fit(obs)
loglikelihoods.append(sum(hmm.score(x) for x in obs))
return loglikelihoods
class GaussianHMMBaseTester(object):
def setUp(self):
self.prng = prng = np.random.RandomState(10)
self.n_components = n_components = 3
self.n_features = n_features = 3
self.startprob = prng.rand(n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = prng.rand(n_components, n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, n_components))
self.means = prng.randint(-20, 20, (n_components, n_features))
self.covars = {
'spherical': (1.0 + 2 * np.dot(prng.rand(n_components, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)),
'diag': (1.0 + 2 * prng.rand(n_components, n_features)) ** 2,
'full': np.array([make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)
for x in range(n_components)]),
}
self.expanded_covars = {
'spherical': [np.eye(n_features) * cov
for cov in self.covars['spherical']],
'diag': [np.diag(cov) for cov in self.covars['diag']],
'tied': [self.covars['tied']] * n_components,
'full': self.covars['full'],
}
def test_bad_covariance_type(self):
hmm.GaussianHMM(20, self.covariance_type)
self.assertRaises(ValueError, hmm.GaussianHMM, 20,
'badcovariance_type')
def test_score_samples_and_decode(self):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.means_ = self.means
h.covars_ = self.covars[self.covariance_type]
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * h.means_
gaussidx = np.repeat(np.arange(self.n_components), 5)
nobs = len(gaussidx)
obs = self.prng.randn(nobs, self.n_features) + h.means_[gaussidx]
ll, posteriors = h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, gaussidx)
def test_sample(self, n=1000):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * self.means
h.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
h.startprob_ = self.startprob
samples = h.sample(n)[0]
self.assertEqual(samples.shape, (n, self.n_features))
def test_fit(self, params='stmc', n_iter=5, verbose=False, **kwargs):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.means_ = 20 * self.means
h.covars_ = self.covars[self.covariance_type]
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > -0.8,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, -0.8, self.covariance_type, trainll))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
def test_fit_with_length_one_signal(self):
obs = [self.prng.rand(10, self.n_features),
self.prng.rand(8, self.n_features),
self.prng.rand(1, self.n_features)]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: zero-size array to reduction operation maximum which has no identity
h.fit(obs)
def test_fit_with_priors(self, params='stmc', n_iter=5, verbose=False):
startprob_prior = 10 * self.startprob + 2.0
transmat_prior = 10 * self.transmat + 2.0
means_prior = self.means
means_weight = 2.0
covars_weight = 2.0
if self.covariance_type in ('full', 'tied'):
covars_weight += self.n_features
covars_prior = self.covars[self.covariance_type]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.startprob_prior = startprob_prior
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.transmat_prior = transmat_prior
h.means_ = 20 * self.means
h.means_prior = means_prior
h.means_weight = means_weight
h.covars_ = self.covars[self.covariance_type]
h.covars_prior = covars_prior
h.covars_weight = covars_weight
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs[:1])
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test MAP train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
# XXX: Why such a large tolerance?
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_non_ergodic_transmat(self):
startprob = np.array([1, 0, 0, 0, 0])
transmat = np.array([[0.9, 0.1, 0, 0, 0],
[0, 0.9, 0.1, 0, 0],
[0, 0, 0.9, 0.1, 0],
[0, 0, 0, 0.9, 0.1],
[0, 0, 0, 0, 1.0]])
h = hmm.GaussianHMM(n_components=5,
covariance_type='full', startprob=startprob,
transmat=transmat, n_iter=100, init_params='st')
h.means_ = np.zeros((5, 10))
h.covars_ = np.tile(np.identity(10), (5, 1, 1))
obs = [h.sample(10)[0] for _ in range(10)]
h.fit(obs=obs)
class TestGaussianHMMWithSphericalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'spherical'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
class TestGaussianHMMWithDiagonalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'diag'
class TestGaussianHMMWithTiedCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGaussianHMMWithFullCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'full'
class MultinomialHMMTestCase(TestCase):
"""Using examples from Wikipedia
- http://en.wikipedia.org/wiki/Hidden_Markov_model
- http://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 2 # ('Rainy', 'Sunny')
self.n_symbols = 3 # ('walk', 'shop', 'clean')
self.emissionprob = [[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]]
self.startprob = [0.6, 0.4]
self.transmat = [[0.7, 0.3], [0.4, 0.6]]
self.h = hmm.MultinomialHMM(self.n_components,
startprob=self.startprob,
transmat=self.transmat)
self.h.emissionprob_ = self.emissionprob
def test_set_emissionprob(self):
h = hmm.MultinomialHMM(self.n_components)
emissionprob = np.array([[0.8, 0.2, 0.0], [0.7, 0.2, 1.0]])
h.emissionprob = emissionprob
assert np.allclose(emissionprob, h.emissionprob)
def test_wikipedia_viterbi_example(self):
# From http://en.wikipedia.org/wiki/Viterbi_algorithm:
# "This reveals that the observations ['walk', 'shop', 'clean']
# were most likely generated by states ['Sunny', 'Rainy',
# 'Rainy'], with probability 0.01344."
observations = [0, 1, 2]
logprob, state_sequence = self.h.decode(observations)
self.assertAlmostEqual(np.exp(logprob), 0.01344)
assert_array_equal(state_sequence, [1, 0, 0])
def test_decode_map_algorithm(self):
observations = [0, 1, 2]
h = hmm.MultinomialHMM(self.n_components, startprob=self.startprob,
transmat=self.transmat, algorithm="map",)
h.emissionprob_ = self.emissionprob
logprob, state_sequence = h.decode(observations)
assert_array_equal(state_sequence, [1, 0, 0])
def test_predict(self):
observations = [0, 1, 2]
state_sequence = self.h.predict(observations)
posteriors = self.h.predict_proba(observations)
assert_array_equal(state_sequence, [1, 0, 0])
assert_array_almost_equal(posteriors, [
[0.23170303, 0.76829697],
[0.62406281, 0.37593719],
[0.86397706, 0.13602294],
])
def test_attributes(self):
h = hmm.MultinomialHMM(self.n_components)
self.assertEqual(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
h.emissionprob_ = self.emissionprob
assert_array_almost_equal(h.emissionprob_, self.emissionprob)
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
self.assertEqual(h.n_symbols, self.n_symbols)
def test_score_samples(self):
idx = np.repeat(np.arange(self.n_components), 10)
nobs = len(idx)
obs = [int(x) for x in np.floor(self.prng.rand(nobs) * self.n_symbols)]
ll, posteriors = self.h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
def test_sample(self, n=1000):
samples = self.h.sample(n)[0]
self.assertEqual(len(samples), n)
self.assertEqual(len(np.unique(samples)), self.n_symbols)
def test_fit(self, params='ste', n_iter=5, verbose=False, **kwargs):
h = self.h
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.emissionprob_ = hmm.normalize(
self.prng.rand(self.n_components, self.n_symbols), axis=1)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
self.assertTrue(np.all(np.diff(trainll) > -1.e-3))
def test_fit_emissionprob(self):
self.test_fit('e')
def test_fit_with_init(self, params='ste', n_iter=5, verbose=False,
**kwargs):
h = self.h
learner = hmm.MultinomialHMM(self.n_components)
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# use init_function to initialize paramerters
learner._init(train_obs, params)
trainll = train_hmm_and_keep_track_of_log_likelihood(
learner, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print()
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
self.assertTrue(np.all(np.diff(trainll) > -1.e-3))
def create_random_gmm(n_mix, n_features, covariance_type, prng=0):
prng = check_random_state(prng)
g = mixture.GMM(n_mix, covariance_type=covariance_type)
g.means_ = prng.randint(-20, 20, (n_mix, n_features))
mincv = 0.1
g.covars_ = {
'spherical': (mincv + mincv * np.dot(prng.rand(n_mix, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features)),
'diag': (mincv + mincv * prng.rand(n_mix, n_features)) ** 2,
'full': np.array(
[make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features) for x in range(n_mix)])
}[covariance_type]
g.weights_ = hmm.normalize(prng.rand(n_mix))
return g
class GMMHMMBaseTester(object):
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 3
self.n_mix = 2
self.n_features = 2
self.covariance_type = 'diag'
self.startprob = self.prng.rand(self.n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = self.prng.rand(self.n_components, self.n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, self.n_components))
self.gmms_ = []
for state in range(self.n_components):
self.gmms_.append(create_random_gmm(
self.n_mix, self.n_features, self.covariance_type,
prng=self.prng))
def test_attributes(self):
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
self.assertEqual(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_features)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
def test_score_samples_and_decode(self):
h = hmm.GMMHMM(self.n_components, gmms=self.gmms_)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
for g in h.gmms_:
g.means_ *= 20
refstateseq = np.repeat(np.arange(self.n_components), 5)
nobs = len(refstateseq)
obs = [h.gmms_[x].sample(1).flatten() for x in refstateseq]
ll, posteriors = h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, refstateseq)
def test_sample(self, n=1000):
h = hmm.GMMHMM(self.n_components, self.covariance_type,
startprob=self.startprob, transmat=self.transmat,
gmms=self.gmms_)
samples = h.sample(n)[0]
self.assertEqual(samples.shape, (n, self.n_features))
def test_fit(self, params='stmwc', n_iter=5, verbose=False, **kwargs):
h = hmm.GMMHMM(self.n_components, covars_prior=1.0)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.gmms_ = self.gmms_
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10, random_state=self.prng)[0]
for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
# XXX: this test appears to check that training log likelihood should
# never be decreasing (up to a tolerance of 0.5, why?) but this is not
# the case when the seed changes.
raise SkipTest("Unstable test: trainll is not always increasing "
"depending on seed")
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
class TestGMMHMMWithDiagCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'diag'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
def test_fit_means(self):
self.test_fit('m')
class TestGMMHMMWithTiedCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGMMHMMWithFullCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'full'
def test_normalize_1D():
A = rng.rand(2) + 1.0
for axis in range(1):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
def test_normalize_3D():
A = rng.rand(2, 2, 2) + 1.0
for axis in range(3):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
| bsd-3-clause |
AlexRobson/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
fy2462/apollo | modules/tools/mapshow/map.py | 1 | 7337 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import random
import matplotlib.pyplot as plt
from matplotlib import cm as cmx
from matplotlib import colors as mcolors
import common.proto_utils as proto_utils
from modules.map.proto import map_pb2
class Map:
def __init__(self):
self.map_pb = map_pb2.Map()
self.colors = []
self.init_colors()
def init_colors(self):
color_num = 6
self.colors = []
values = range(color_num)
jet = plt.get_cmap('brg')
color_norm = mcolors.Normalize(vmin=0, vmax=values[-1])
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap=jet)
for val in values:
color_val = scalar_map.to_rgba(val)
self.colors.append(color_val)
def load(self, map_file_name):
res = proto_utils.get_pb_from_file(map_file_name, self.map_pb)
return res != None
def draw_roads(self, ax):
cnt = 1
for road in self.map_pb.road:
color_val = self.colors[cnt % len(self.colors)]
self.draw_road(ax, road, color_val)
cnt += 1
def draw_road(self, ax, road, color_val):
for section in road.section:
for edge in section.boundary.outer_polygon.edge:
for segment in edge.curve.segment:
if segment.HasField('line_segment'):
px = []
py = []
for p in segment.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls='-', c=color_val, alpha = 0.5)
def draw_lanes(self, ax, is_show_lane_ids, laneids):
cnt = 1
for lane in self.map_pb.lane:
color_val = self.colors[cnt % len(self.colors)]
if len(laneids) == 0:
self._draw_lane_boundary(lane, ax, color_val)
self._draw_lane_central(lane, ax, color_val)
else:
if lane.id.id in laneids:
self._draw_lane_boundary(lane, ax, color_val)
self._draw_lane_central(lane, ax, color_val)
if is_show_lane_ids:
self._draw_lane_id(lane, ax, color_val)
elif lane.id.id in laneids:
self._draw_lane_id(lane, ax, color_val)
cnt += 1
def _draw_lane_id(self, lane, ax, color_val):
"""draw lane id"""
labelxys = []
labelxys.append((40, -40))
labelxys.append((-40, -40))
labelxys.append((40, 40))
labelxys.append((-40, 40))
has = ['right', 'left', 'right', 'left']
vas = ['bottom', 'bottom', 'top', 'top']
idx = random.randint(0, 3)
lxy = labelxys[idx]
x, y = self._find_lane_central_point(lane)
plt.annotate(
lane.id.id,
xy=(x, y), xytext=lxy,
textcoords='offset points', ha=has[idx], va=vas[idx],
bbox= dict(boxstyle = 'round,pad=0.5', fc=color_val, alpha=0.5),
arrowprops = dict(arrowstyle = '-|>', connectionstyle = 'arc3,rad=-0.2', fc=color_val, ec=color_val, alpha=0.5))
@staticmethod
def _find_lane_central_point(lane):
segment_idx = len(lane.left_boundary.curve.segment) / 2
median_segment = lane.left_boundary.curve.segment[segment_idx]
left_point_idx = len(median_segment.line_segment.point) / 2
left_median_point = median_segment.line_segment.point[left_point_idx]
segment_idx = len(lane.right_boundary.curve.segment) / 2
median_segment = lane.right_boundary.curve.segment[segment_idx]
right_point_idx = len(median_segment.line_segment.point) / 2
right_median_point = median_segment.line_segment.point[right_point_idx]
x = (left_median_point.x + right_median_point.x) / 2
y = (left_median_point.y + right_median_point.y) / 2
return x, y
@staticmethod
def _get_median_point(points):
"""get_median_point"""
if len(points) % 2 == 1:
point = points[len(points) / 2]
return point.x, point.y
else:
point1 = points[len(points) / 2 - 1]
point2 = points[len(points) / 2]
return (point1.x + point2.x) / 2.0, (point1.y + point2.y) / 2.0
@staticmethod
def _draw_lane_boundary(lane, ax, color_val):
"""draw boundary"""
for curve in lane.left_boundary.curve.segment:
if curve.HasField('line_segment'):
px = []
py = []
for p in curve.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls='-', c=color_val, alpha = 0.5)
for curve in lane.right_boundary.curve.segment:
if curve.HasField('line_segment'):
px = []
py = []
for p in curve.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls='-', c=color_val, alpha = 0.5)
@staticmethod
def _draw_lane_central(lane, ax, color_val):
"""draw boundary"""
for curve in lane.central_curve.segment:
if curve.HasField('line_segment'):
px = []
py = []
for p in curve.line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, ls=':', c=color_val, alpha = 0.5)
def draw_signal_lights(self, ax):
"""draw_signal_lights"""
for signal in self.map_pb.signal:
for stop_line in signal.stop_line:
for curve in stop_line.segment:
self._draw_signal(curve.line_segment, signal.id.id, ax)
@staticmethod
def _draw_signal(line_segment, label, ax):
"""draw a signal"""
px = []
py = []
for p in line_segment.point:
px.append(float(p.x))
py.append(float(p.y))
ax.plot(px, py, 'o-')
lxy = [random.randint(20, 80) * random.sample([-1, 1], 1)[0],
random.randint(20, 80) * random.sample([-1, 1], 1)[0]]
xy = (sum(px)/len(px), sum(py)/len(py))
plt.annotate(
label,
xy = xy, xytext = lxy,
textcoords = 'offset points',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0')) | apache-2.0 |
czhengsci/veidt | veidt/model/tests/test_models.py | 2 | 5371 | # coding: utf-8
from __future__ import division, print_function, unicode_literals, \
absolute_import
import unittest
import os
import json
import numpy as np
import pandas as pd
from pymatgen import Structure
from veidt.abstract import Describer
from veidt.describer.structural_describer import DistinctSiteProperty
from veidt.model.neural_network import MultiLayerPerceptron
from veidt.model.linear_model import LinearModel
from veidt.model.gaussian_process import GaussianProcessRegressionModel
import shutil
import tempfile
class NeuralNetTest(unittest.TestCase):
def setUp(self):
self.nn = MultiLayerPerceptron(
[25, 5], describer=DistinctSiteProperty(['8c'], ["Z"]))
self.nn2 = MultiLayerPerceptron(
[25, 5], describer=DistinctSiteProperty(['8c'], ["Z"]))
self.li2o = Structure.from_file(os.path.join(os.path.dirname(__file__),
"../../tests/Li2O.cif"))
self.na2o = Structure.from_file(os.path.join(os.path.dirname(__file__),
"../../tests/Na2O.cif"))
self.structures = [self.li2o] * 100 + [self.na2o] * 100
self.energies = np.array([3] * 100 + [4] * 100)
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
# Remove the directory after the test
shutil.rmtree(self.test_dir)
def test_fit_evaluate(self):
self.nn.fit(inputs=self.structures, outputs=self.energies, epochs=100)
# Given this is a fairly simple model, we should get close to exact.
#self.assertEqual(round(self.nn.predict([self.na2o])[0][0]), 4, 3)
self.assertTrue(3 <= round(self.nn.predict([self.na2o])[0][0]) <= 4)
def test_model_save_load(self):
model_fname = os.path.join(self.test_dir, 'test_nnmodel.h5')
scaler_fname = os.path.join(self.test_dir, 'test_nnscaler.save')
self.nn.fit(inputs=self.structures, outputs=self.energies, epochs=100)
self.nn.save(model_fname=model_fname, scaler_fname=scaler_fname)
self.nn2.load(model_fname=model_fname, scaler_fname=scaler_fname)
self.assertEqual(self.nn.predict([self.na2o])[0][0],
self.nn2.predict([self.na2o])[0][0])
class LinearModelTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.x_train = np.random.rand(10, 2)
cls.coef = np.random.rand(2)
cls.intercept = np.random.rand()
cls.y_train = cls.x_train.dot(cls.coef) + cls.intercept
def setUp(self):
class DummyDescriber(Describer):
def describe(self, obj):
pass
def describe_all(self, n):
return pd.DataFrame(n)
self.lm = LinearModel(DummyDescriber())
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
# Remove the directory after the test
shutil.rmtree(self.test_dir)
def test_fit_predict(self):
self.lm.fit(inputs=self.x_train, outputs=self.y_train)
x_test = np.random.rand(10, 2)
y_test = x_test.dot(self.coef) + self.intercept
y_pred = self.lm.predict(x_test)
np.testing.assert_array_almost_equal(y_test, y_pred)
np.testing.assert_array_almost_equal(self.coef, self.lm.coef)
self.assertAlmostEqual(self.intercept, self.lm.intercept)
def test_evaluate_fit(self):
self.lm.fit(inputs=self.x_train, outputs=self.y_train)
y_pred = self.lm.evaluate_fit()
np.testing.assert_array_almost_equal(y_pred, self.y_train)
def test_serialize(self):
json_str = json.dumps(self.lm.as_dict())
recover = LinearModel.from_dict(json.loads(json_str))
self.assertIsNotNone(recover)
def model_save_load(self):
self.lm.save(os.path.join(self.test_dir, 'test_lm.save'))
ori = self.lm.model.coef_
self.lm.load(os.path.join(self.test_dir, 'test_lm.save'))
loaded = self.lm.model.coef_
self.assertAlmostEqual(ori, loaded)
class GaussianProcessTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.this_dir = os.path.dirname(os.path.abspath(__file__))
cls.test_dir = tempfile.mkdtemp()
def setUp(self):
self.x_train = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
self.y_train = (self.x_train * np.sin(self.x_train)).ravel()
class DummyDescriber(Describer):
def describe(self, obj):
pass
def describe_all(self, n):
return pd.DataFrame(n)
self.gpr = GaussianProcessRegressionModel(describer=DummyDescriber(), \
kernel_category='RBF')
@classmethod
def tearDownClass(cls):
os.chdir(cls.this_dir)
shutil.rmtree(cls.test_dir)
def test_fit_predict(self):
self.gpr.fit(inputs=self.x_train, outputs=self.y_train)
x_test = np.atleast_2d(np.linspace(0, 9, 1000)).T
y_test = x_test * np.sin(x_test)
y_pred, sigma = self.gpr.predict(x_test, return_std=True)
upper_bound = y_pred + 1.96 * sigma
lower_bound = y_pred - 1.96 * sigma
self.assertTrue(np.all([l < y and y < u for u, y, l in\
zip(upper_bound, y_test, lower_bound)]))
if __name__ == "__main__":
unittest.main() | bsd-3-clause |
bhargav/scikit-learn | sklearn/utils/tests/test_multiclass.py | 34 | 13405 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
from sklearn.utils.multiclass import check_classification_targets
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formatted as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that weren't supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_check_classification_targets():
for y_type in EXAMPLES.keys():
if y_type in ["unknown", "continuous", 'continuous-multioutput']:
for example in EXAMPLES[y_type]:
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg,
check_classification_targets, example)
else:
for example in EXAMPLES[y_type]:
check_classification_targets(example)
# @ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
jinglining/flink | flink-python/pyflink/table/tests/test_table_environment_api.py | 1 | 30955 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
################################################################################
import glob
import os
import pathlib
import sys
from py4j.protocol import Py4JJavaError
from pyflink.dataset import ExecutionEnvironment
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.find_flink_home import _find_flink_source_root
from pyflink.java_gateway import get_gateway
from pyflink.table import DataTypes, CsvTableSink, StreamTableEnvironment, EnvironmentSettings, \
Module, ResultKind
from pyflink.table.descriptors import FileSystem, OldCsv, Schema
from pyflink.table.explain_detail import ExplainDetail
from pyflink.table.table_config import TableConfig
from pyflink.table.table_environment import BatchTableEnvironment
from pyflink.table.types import RowType
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase, PyFlinkBatchTableTestCase, \
PyFlinkBlinkBatchTableTestCase
from pyflink.util.utils import get_j_env_configuration
class TableEnvironmentTest(object):
def test_set_sys_executable_for_local_mode(self):
jvm = get_gateway().jvm
actual_executable = get_j_env_configuration(self.t_env) \
.getString(jvm.PythonOptions.PYTHON_EXECUTABLE.key(), None)
self.assertEqual(sys.executable, actual_executable)
def test_explain(self):
schema = RowType()\
.add('a', DataTypes.INT())\
.add('b', DataTypes.STRING())\
.add('c', DataTypes.STRING())
t_env = self.t_env
t = t_env.from_elements([], schema)
result = t.select("1 + a, b, c")
actual = result.explain()
assert isinstance(actual, str)
def test_explain_with_extended(self):
schema = RowType() \
.add('a', DataTypes.INT()) \
.add('b', DataTypes.STRING()) \
.add('c', DataTypes.STRING())
t_env = self.t_env
t = t_env.from_elements([], schema)
result = t.select("1 + a, b, c")
actual = result.explain(ExplainDetail.ESTIMATED_COST, ExplainDetail.CHANGELOG_MODE)
assert isinstance(actual, str)
def test_register_java_function(self):
t_env = self.t_env
t_env.register_java_function("scalar_func",
"org.apache.flink.table.expressions.utils.RichFunc0")
t_env.register_java_function(
"agg_func", "org.apache.flink.table.functions.aggfunctions.ByteMaxAggFunction")
t_env.register_java_function("table_func", "org.apache.flink.table.utils.TableFunc1")
actual = t_env.list_user_defined_functions()
expected = ['scalar_func', 'agg_func', 'table_func']
self.assert_equals(actual, expected)
def test_unload_and_load_module(self):
t_env = self.t_env
t_env.unload_module('core')
t_env.load_module('core', Module(
get_gateway().jvm.org.apache.flink.table.module.CoreModule.INSTANCE))
table_result = t_env.execute_sql("select concat('unload', 'load') as test_module")
self.assertEqual(table_result.get_result_kind(), ResultKind.SUCCESS_WITH_CONTENT)
self.assert_equals(table_result.get_table_schema().get_field_names(), ['test_module'])
def test_create_and_drop_java_function(self):
t_env = self.t_env
t_env.create_java_temporary_system_function(
"scalar_func", "org.apache.flink.table.expressions.utils.RichFunc0")
t_env.create_java_function(
"agg_func", "org.apache.flink.table.functions.aggfunctions.ByteMaxAggFunction")
t_env.create_java_temporary_function(
"table_func", "org.apache.flink.table.utils.TableFunc1")
self.assert_equals(t_env.list_user_defined_functions(),
['scalar_func', 'agg_func', 'table_func'])
t_env.drop_temporary_system_function("scalar_func")
t_env.drop_function("agg_func")
t_env.drop_temporary_function("table_func")
self.assert_equals(t_env.list_user_defined_functions(), [])
class StreamTableEnvironmentTests(TableEnvironmentTest, PyFlinkStreamTableTestCase):
def test_register_table_source_scan(self):
t_env = self.t_env
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
source_path = os.path.join(self.tempdir + '/streaming.csv')
csv_source = self.prepare_csv_source(source_path, [], field_types, field_names)
t_env.register_table_source("Source", csv_source)
result = t_env.scan("Source")
self.assertEqual(
'CatalogTable: (identifier: [`default_catalog`.`default_database`.`Source`]'
', fields: [a, b, c])',
result._j_table.getQueryOperation().asSummaryString())
def test_register_table_sink(self):
t_env = self.t_env
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"Sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.from_elements([(1, "Hi", "Hello")], ["a", "b", "c"]).insert_into("Sinks")
self.t_env.execute("test")
actual = source_sink_utils.results()
expected = ['1,Hi,Hello']
self.assert_equals(actual, expected)
def test_from_table_source(self):
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
source_path = os.path.join(self.tempdir + '/streaming.csv')
csv_source = self.prepare_csv_source(source_path, [], field_types, field_names)
result = self.t_env.from_table_source(csv_source)
self.assertEqual(
'TableSource: (fields: [a, b, c])',
result._j_table.getQueryOperation().asSummaryString())
def test_list_tables(self):
source_path = os.path.join(self.tempdir + '/streaming.csv')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
data = []
csv_source = self.prepare_csv_source(source_path, data, field_types, field_names)
t_env = self.t_env
t_env.register_table_source("Orders", csv_source)
t_env.register_table_sink(
"Sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.register_table_sink(
"Results",
source_sink_utils.TestAppendSink(field_names, field_types))
actual = t_env.list_tables()
expected = ['Orders', 'Results', 'Sinks']
self.assert_equals(actual, expected)
def test_temporary_tables(self):
t_env = self.t_env
t_env.connect(FileSystem().path(os.path.join(self.tempdir + '/temp_1.csv'))) \
.with_format(OldCsv()
.field_delimiter(',')
.field("a", DataTypes.INT())
.field("b", DataTypes.STRING())) \
.with_schema(Schema()
.field("a", DataTypes.INT())
.field("b", DataTypes.STRING())) \
.create_temporary_table("temporary_table_1")
t_env.connect(FileSystem().path(os.path.join(self.tempdir + '/temp_2.csv'))) \
.with_format(OldCsv()
.field_delimiter(',')
.field("a", DataTypes.INT())
.field("b", DataTypes.STRING())) \
.with_schema(Schema()
.field("a", DataTypes.INT())
.field("b", DataTypes.STRING())) \
.create_temporary_table("temporary_table_2")
actual = t_env.list_temporary_tables()
expected = ['temporary_table_1', 'temporary_table_2']
self.assert_equals(actual, expected)
t_env.drop_temporary_table("temporary_table_1")
actual = t_env.list_temporary_tables()
expected = ['temporary_table_2']
self.assert_equals(actual, expected)
def test_temporary_views(self):
t_env = self.t_env
t_env.create_temporary_view(
"temporary_view_1",
t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c']))
t_env.create_temporary_view(
"temporary_view_2",
t_env.from_elements([(1, 'Hi')], ['a', 'b']))
actual = t_env.list_temporary_views()
expected = ['temporary_view_1', 'temporary_view_2']
self.assert_equals(actual, expected)
t_env.drop_temporary_view("temporary_view_1")
actual = t_env.list_temporary_views()
expected = ['temporary_view_2']
self.assert_equals(actual, expected)
def test_from_path(self):
t_env = self.t_env
t_env.create_temporary_view(
"temporary_view_1",
t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c']))
result = t_env.from_path("temporary_view_1")
self.assertEqual(
'CatalogTable: (identifier: [`default_catalog`.`default_database`.`temporary_view_1`]'
', fields: [a, b, c])',
result._j_table.getQueryOperation().asSummaryString())
def test_insert_into(self):
t_env = self.t_env
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"Sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.insert_into("Sinks", t_env.from_elements([(1, "Hi", "Hello")], ["a", "b", "c"]))
self.t_env.execute("test")
actual = source_sink_utils.results()
expected = ['1,Hi,Hello']
self.assert_equals(actual, expected)
def test_statement_set(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sink1",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.register_table_sink(
"sink2",
source_sink_utils.TestAppendSink(field_names, field_types))
stmt_set = t_env.create_statement_set()
stmt_set.add_insert_sql("insert into sink1 select * from %s where a > 100" % source)\
.add_insert("sink2", source.filter("a < 100"), False)
actual = stmt_set.explain(ExplainDetail.CHANGELOG_MODE)
assert isinstance(actual, str)
def test_explain_with_multi_sinks(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sink1",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.register_table_sink(
"sink2",
source_sink_utils.TestAppendSink(field_names, field_types))
stmt_set = t_env.create_statement_set()
stmt_set.add_insert_sql("insert into sink1 select * from %s where a > 100" % source)
stmt_set.add_insert_sql("insert into sink2 select * from %s where a < 100" % source)
actual = stmt_set.explain(ExplainDetail.ESTIMATED_COST, ExplainDetail.CHANGELOG_MODE)
assert isinstance(actual, str)
def test_explain_sql_without_explain_detail(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
result = t_env.explain_sql("select a + 1, b, c from %s" % source)
assert isinstance(result, str)
def test_explain_sql_with_explain_detail(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
result = t_env.explain_sql(
"select a + 1, b, c from %s" % source, ExplainDetail.CHANGELOG_MODE)
assert isinstance(result, str)
def test_create_table_environment(self):
table_config = TableConfig()
table_config.set_max_generated_code_length(32000)
table_config.set_null_check(False)
table_config.set_local_timezone("Asia/Shanghai")
env = StreamExecutionEnvironment.get_execution_environment()
t_env = StreamTableEnvironment.create(env, table_config)
readed_table_config = t_env.get_config()
self.assertFalse(readed_table_config.get_null_check())
self.assertEqual(readed_table_config.get_max_generated_code_length(), 32000)
self.assertEqual(readed_table_config.get_local_timezone(), "Asia/Shanghai")
def test_create_table_environment_with_blink_planner(self):
t_env = StreamTableEnvironment.create(
self.env,
environment_settings=EnvironmentSettings.new_instance().use_blink_planner().build())
planner = t_env._j_tenv.getPlanner()
self.assertEqual(
planner.getClass().getName(),
"org.apache.flink.table.planner.delegation.StreamPlanner")
t_env = StreamTableEnvironment.create(
environment_settings=EnvironmentSettings.new_instance().build())
planner = t_env._j_tenv.getPlanner()
self.assertEqual(
planner.getClass().getName(),
"org.apache.flink.table.planner.delegation.StreamPlanner")
t_env = StreamTableEnvironment.create(
environment_settings=EnvironmentSettings.new_instance().use_old_planner().build())
planner = t_env._j_tenv.getPlanner()
self.assertEqual(
planner.getClass().getName(),
"org.apache.flink.table.planner.StreamPlanner")
def test_table_environment_with_blink_planner(self):
self.env.set_parallelism(1)
t_env = StreamTableEnvironment.create(
self.env,
environment_settings=EnvironmentSettings.new_instance().use_blink_planner().build())
source_path = os.path.join(self.tempdir + '/streaming.csv')
sink_path = os.path.join(self.tempdir + '/result.csv')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
data = [(1, 'hi', 'hello'), (2, 'hello', 'hello')]
csv_source = self.prepare_csv_source(source_path, data, field_types, field_names)
t_env.register_table_source("source", csv_source)
t_env.register_table_sink(
"sink",
CsvTableSink(field_names, field_types, sink_path))
source = t_env.scan("source")
result = source.alias("a, b, c").select("1 + a, b, c")
result.insert_into("sink")
t_env.execute("blink_test")
results = []
with open(sink_path, 'r') as f:
results.append(f.readline())
results.append(f.readline())
self.assert_equals(results, ['2,hi,hello\n', '3,hello,hello\n'])
def test_set_jars(self):
self.verify_set_java_dependencies("pipeline.jars", self.execute_with_t_env)
def test_set_jars_with_execute_sql(self):
self.verify_set_java_dependencies("pipeline.jars", self.execute_with_execute_sql)
def test_set_jars_with_statement_set(self):
self.verify_set_java_dependencies("pipeline.jars", self.execute_with_statement_set)
def test_set_jars_with_table(self):
self.verify_set_java_dependencies("pipeline.jars", self.execute_with_table)
def test_set_jars_with_table_execute_insert(self):
self.verify_set_java_dependencies("pipeline.jars", self.execute_with_table_execute_insert)
def test_set_jars_with_table_to_pandas(self):
self.verify_set_java_dependencies("pipeline.jars", self.execute_with_table_to_pandas)
def test_set_classpaths(self):
self.verify_set_java_dependencies("pipeline.classpaths", self.execute_with_t_env)
def test_set_classpaths_with_execute_sql(self):
self.verify_set_java_dependencies("pipeline.classpaths", self.execute_with_execute_sql)
def test_set_classpaths_with_statement_set(self):
self.verify_set_java_dependencies("pipeline.classpaths", self.execute_with_statement_set)
def test_set_classpaths_with_table(self):
self.verify_set_java_dependencies("pipeline.classpaths", self.execute_with_table)
def test_set_classpaths_with_table_execute_insert(self):
self.verify_set_java_dependencies(
"pipeline.classpaths", self.execute_with_table_execute_insert)
def test_set_classpaths_with_table_to_pandas(self):
self.verify_set_java_dependencies("pipeline.classpaths", self.execute_with_table_to_pandas)
def execute_with_t_env(self, t_env):
source = t_env.from_elements([(1, "Hi"), (2, "Hello")], ["a", "b"])
source.select("func1(a, b), func2(a, b)").insert_into("sink")
t_env.execute("test")
actual = source_sink_utils.results()
expected = ['1 and Hi,1 or Hi', '2 and Hello,2 or Hello']
self.assert_equals(actual, expected)
@staticmethod
def execute_with_execute_sql(t_env):
source = t_env.from_elements([(1, "Hi"), (2, "Hello")], ["a", "b"])
t_env.create_temporary_view("source", source)
t_env.execute_sql("select func1(a, b), func2(a, b) from source") \
.get_job_client() \
.get_job_execution_result() \
.result()
def execute_with_statement_set(self, t_env):
source = t_env.from_elements([(1, "Hi"), (2, "Hello")], ["a", "b"])
result = source.select("func1(a, b), func2(a, b)")
t_env.create_statement_set().add_insert("sink", result).execute() \
.get_job_client() \
.get_job_execution_result() \
.result()
actual = source_sink_utils.results()
expected = ['1 and Hi,1 or Hi', '2 and Hello,2 or Hello']
self.assert_equals(actual, expected)
@staticmethod
def execute_with_table(t_env):
source = t_env.from_elements([(1, "Hi"), (2, "Hello")], ["a", "b"])
result = source.select("func1(a, b), func2(a, b)")
result.execute() \
.get_job_client() \
.get_job_execution_result() \
.result()
def execute_with_table_execute_insert(self, t_env):
source = t_env.from_elements([(1, "Hi"), (2, "Hello")], ["a", "b"])
result = source.select("func1(a, b), func2(a, b)")
result.execute_insert("sink") \
.get_job_client() \
.get_job_execution_result() \
.result()
actual = source_sink_utils.results()
expected = ['1 and Hi,1 or Hi', '2 and Hello,2 or Hello']
self.assert_equals(actual, expected)
@staticmethod
def execute_with_table_to_pandas(t_env):
source = t_env.from_elements([(1, "Hi"), (2, "Hello")], ["a", "b"])
result = source.select("func1(a, b), func2(a, b)")
result.to_pandas()
def verify_set_java_dependencies(self, config_key, executor):
original_class_loader = \
get_gateway().jvm.Thread.currentThread().getContextClassLoader()
try:
jar_urls = []
func1_class_name = "org.apache.flink.python.util.TestScalarFunction1"
func2_class_name = "org.apache.flink.python.util.TestScalarFunction2"
func1_jar_pattern = "flink-python/target/func1/flink-python*-tests.jar"
func2_jar_pattern = "flink-python/target/func2/flink-python*-tests.jar"
self.ensure_jar_not_loaded(func1_class_name, func1_jar_pattern)
self.ensure_jar_not_loaded(func2_class_name, func2_jar_pattern)
jar_urls.extend(self.get_jar_url(func1_jar_pattern))
jar_urls.extend(self.get_jar_url(func2_jar_pattern))
# test set the "pipeline.jars" multiple times
self.t_env.get_config().get_configuration().set_string(config_key, ";".join(jar_urls))
first_class_loader = get_gateway().jvm.Thread.currentThread().getContextClassLoader()
self.t_env.get_config().get_configuration().set_string(config_key, jar_urls[0])
self.t_env.get_config().get_configuration().set_string(config_key, ";".join(jar_urls))
second_class_loader = get_gateway().jvm.Thread.currentThread().getContextClassLoader()
self.assertEqual(first_class_loader, second_class_loader)
self.t_env.register_java_function("func1", func1_class_name)
self.t_env.register_java_function("func2", func2_class_name)
table_sink = source_sink_utils.TestAppendSink(
["a", "b"], [DataTypes.STRING(), DataTypes.STRING()])
self.t_env.register_table_sink("sink", table_sink)
executor(self.t_env)
finally:
get_gateway().jvm.Thread.currentThread().setContextClassLoader(original_class_loader)
def ensure_jar_not_loaded(self, func_class_name, jar_filename_pattern):
test_jars = glob.glob(os.path.join(_find_flink_source_root(), jar_filename_pattern))
if not test_jars:
self.fail("'%s' is not available. Please compile the test jars first."
% jar_filename_pattern)
try:
self.t_env.register_java_function("func", func_class_name)
except Py4JJavaError:
pass
else:
self.fail("The scalar function '%s' should not be able to be loaded. Please remove "
"the '%s' from the classpath of the PythonGatewayServer process." %
(func_class_name, jar_filename_pattern))
@staticmethod
def get_jar_url(jar_filename_pattern):
test_jars = glob.glob(os.path.join(_find_flink_source_root(), jar_filename_pattern))
return [pathlib.Path(jar_path).as_uri() for jar_path in test_jars]
class BatchTableEnvironmentTests(TableEnvironmentTest, PyFlinkBatchTableTestCase):
def test_explain_with_multi_sinks(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sink1",
CsvTableSink(field_names, field_types, "path1"))
t_env.register_table_sink(
"sink2",
CsvTableSink(field_names, field_types, "path2"))
stmt_set = t_env.create_statement_set()
stmt_set.add_insert_sql("insert into sink1 select * from %s where a > 100" % source)
stmt_set.add_insert_sql("insert into sink2 select * from %s where a < 100" % source)
actual = stmt_set.explain(ExplainDetail.ESTIMATED_COST, ExplainDetail.CHANGELOG_MODE)
assert isinstance(actual, str)
def test_statement_set(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sink1",
CsvTableSink(field_names, field_types, "path1"))
t_env.register_table_sink(
"sink2",
CsvTableSink(field_names, field_types, "path2"))
stmt_set = t_env.create_statement_set()
stmt_set.add_insert_sql("insert into sink1 select * from %s where a > 100" % source)\
.add_insert("sink2", source.filter("a < 100"))
actual = stmt_set.explain()
assert isinstance(actual, str)
def test_create_table_environment(self):
table_config = TableConfig()
table_config.set_max_generated_code_length(32000)
table_config.set_null_check(False)
table_config.set_local_timezone("Asia/Shanghai")
env = ExecutionEnvironment.get_execution_environment()
t_env = BatchTableEnvironment.create(env, table_config)
readed_table_config = t_env.get_config()
self.assertFalse(readed_table_config.get_null_check())
self.assertEqual(readed_table_config.get_max_generated_code_length(), 32000)
self.assertEqual(readed_table_config.get_local_timezone(), "Asia/Shanghai")
def test_create_table_environment_with_old_planner(self):
t_env = BatchTableEnvironment.create(
environment_settings=EnvironmentSettings.new_instance().in_batch_mode()
.use_old_planner().build())
self.assertEqual(
t_env._j_tenv.getClass().getName(),
"org.apache.flink.table.api.bridge.java.internal.BatchTableEnvironmentImpl")
def test_create_table_environment_with_blink_planner(self):
t_env = BatchTableEnvironment.create(
environment_settings=EnvironmentSettings.new_instance().in_batch_mode()
.use_blink_planner().build())
planner = t_env._j_tenv.getPlanner()
self.assertEqual(
planner.getClass().getName(),
"org.apache.flink.table.planner.delegation.BatchPlanner")
def test_table_environment_with_blink_planner(self):
t_env = BatchTableEnvironment.create(
environment_settings=EnvironmentSettings.new_instance().in_batch_mode()
.use_blink_planner().build())
source_path = os.path.join(self.tempdir + '/streaming.csv')
sink_path = os.path.join(self.tempdir + '/results')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
data = [(1, 'hi', 'hello'), (2, 'hello', 'hello')]
csv_source = self.prepare_csv_source(source_path, data, field_types, field_names)
t_env.register_table_source("source", csv_source)
t_env.register_table_sink(
"sink",
CsvTableSink(field_names, field_types, sink_path))
source = t_env.scan("source")
result = source.alias("a, b, c").select("1 + a, b, c")
result.insert_into("sink")
t_env.execute("blink_test")
results = []
for root, dirs, files in os.walk(sink_path):
for sub_file in files:
with open(os.path.join(root, sub_file), 'r') as f:
line = f.readline()
while line is not None and line != '':
results.append(line)
line = f.readline()
self.assert_equals(results, ['2,hi,hello\n', '3,hello,hello\n'])
class BlinkBatchTableEnvironmentTests(PyFlinkBlinkBatchTableTestCase):
def test_explain_with_multi_sinks(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sink1",
CsvTableSink(field_names, field_types, "path1"))
t_env.register_table_sink(
"sink2",
CsvTableSink(field_names, field_types, "path2"))
stmt_set = t_env.create_statement_set()
stmt_set.add_insert_sql("insert into sink1 select * from %s where a > 100" % source)
stmt_set.add_insert_sql("insert into sink2 select * from %s where a < 100" % source)
actual = stmt_set.explain(ExplainDetail.ESTIMATED_COST, ExplainDetail.CHANGELOG_MODE)
self.assertIsInstance(actual, str)
def test_register_java_function(self):
t_env = self.t_env
t_env.register_java_function(
"scalar_func", "org.apache.flink.table.expressions.utils.RichFunc0")
t_env.register_java_function(
"agg_func", "org.apache.flink.table.functions.aggfunctions.ByteMaxAggFunction")
t_env.register_java_function(
"table_func", "org.apache.flink.table.utils.TableFunc2")
actual = t_env.list_user_defined_functions()
expected = ['scalar_func', 'agg_func', 'table_func']
self.assert_equals(actual, expected)
def test_unload_and_load_module(self):
t_env = self.t_env
t_env.unload_module('core')
t_env.load_module('core', Module(
get_gateway().jvm.org.apache.flink.table.module.CoreModule.INSTANCE))
table_result = t_env.execute_sql("select concat('unload', 'load') as test_module")
self.assertEqual(table_result.get_result_kind(), ResultKind.SUCCESS_WITH_CONTENT)
self.assert_equals(table_result.get_table_schema().get_field_names(), ['test_module'])
def test_create_and_drop_java_function(self):
t_env = self.t_env
t_env.create_java_temporary_system_function(
"scalar_func", "org.apache.flink.table.expressions.utils.RichFunc0")
t_env.create_java_function(
"agg_func", "org.apache.flink.table.functions.aggfunctions.ByteMaxAggFunction")
t_env.create_java_temporary_function(
"table_func", "org.apache.flink.table.utils.TableFunc1")
self.assert_equals(t_env.list_user_defined_functions(),
['scalar_func', 'agg_func', 'table_func'])
t_env.drop_temporary_system_function("scalar_func")
t_env.drop_function("agg_func")
t_env.drop_temporary_function("table_func")
self.assert_equals(t_env.list_user_defined_functions(), [])
| apache-2.0 |
Kongsea/tensorflow | tensorflow/contrib/distributions/python/ops/mixture.py | 9 | 18785 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util as distribution_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
class Mixture(distribution.Distribution):
"""Mixture distribution.
The `Mixture` object implements batched mixture distributions.
The mixture model is defined by a `Categorical` distribution (the mixture)
and a python list of `Distribution` objects.
Methods supported include `log_prob`, `prob`, `mean`, `sample`, and
`entropy_lower_bound`.
#### Examples
```python
# Create a mixture of two Gaussians:
tfd = tf.contrib.distributions
mix = 0.3
bimix_gauss = tfd.Mixture(
cat=tfd.Categorical(probs=[mix, 1.-mix]),
components=[
tfd.Normal(loc=-1., scale=0.1),
tfd.Normal(loc=+1., scale=0.5),
])
# Plot the PDF.
import matplotlib.pyplot as plt
x = tf.linspace(-2., 3., int(1e4)).eval()
plt.plot(x, bimix_gauss.prob(x).eval());
```
"""
def __init__(self,
cat,
components,
validate_args=False,
allow_nan_stats=True,
name="Mixture"):
"""Initialize a Mixture distribution.
A `Mixture` is defined by a `Categorical` (`cat`, representing the
mixture probabilities) and a list of `Distribution` objects
all having matching dtype, batch shape, event shape, and continuity
properties (the components).
The `num_classes` of `cat` must be possible to infer at graph construction
time and match `len(components)`.
Args:
cat: A `Categorical` distribution instance, representing the probabilities
of `distributions`.
components: A list or tuple of `Distribution` instances.
Each instance must have the same type, be defined on the same domain,
and have matching `event_shape` and `batch_shape`.
validate_args: Python `bool`, default `False`. If `True`, raise a runtime
error if batch or event ranks are inconsistent between cat and any of
the distributions. This is only checked if the ranks cannot be
determined statically at graph construction time.
allow_nan_stats: Boolean, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: A name for this distribution (optional).
Raises:
TypeError: If cat is not a `Categorical`, or `components` is not
a list or tuple, or the elements of `components` are not
instances of `Distribution`, or do not have matching `dtype`.
ValueError: If `components` is an empty list or tuple, or its
elements do not have a statically known event rank.
If `cat.num_classes` cannot be inferred at graph creation time,
or the constant value of `cat.num_classes` is not equal to
`len(components)`, or all `components` and `cat` do not have
matching static batch shapes, or all components do not
have matching static event shapes.
"""
parameters = locals()
if not isinstance(cat, categorical.Categorical):
raise TypeError("cat must be a Categorical distribution, but saw: %s" %
cat)
if not components:
raise ValueError("components must be a non-empty list or tuple")
if not isinstance(components, (list, tuple)):
raise TypeError("components must be a list or tuple, but saw: %s" %
components)
if not all(isinstance(c, distribution.Distribution) for c in components):
raise TypeError(
"all entries in components must be Distribution instances"
" but saw: %s" % components)
dtype = components[0].dtype
if not all(d.dtype == dtype for d in components):
raise TypeError("All components must have the same dtype, but saw "
"dtypes: %s" % [(d.name, d.dtype) for d in components])
static_event_shape = components[0].event_shape
static_batch_shape = cat.batch_shape
for d in components:
static_event_shape = static_event_shape.merge_with(d.event_shape)
static_batch_shape = static_batch_shape.merge_with(d.batch_shape)
if static_event_shape.ndims is None:
raise ValueError(
"Expected to know rank(event_shape) from components, but "
"none of the components provide a static number of ndims")
# Ensure that all batch and event ndims are consistent.
with ops.name_scope(name, values=[cat.logits]):
num_components = cat.event_size
static_num_components = tensor_util.constant_value(num_components)
if static_num_components is None:
raise ValueError(
"Could not infer number of classes from cat and unable "
"to compare this value to the number of components passed in.")
# Possibly convert from numpy 0-D array.
static_num_components = int(static_num_components)
if static_num_components != len(components):
raise ValueError("cat.num_classes != len(components): %d vs. %d" %
(static_num_components, len(components)))
cat_batch_shape = cat.batch_shape_tensor()
cat_batch_rank = array_ops.size(cat_batch_shape)
if validate_args:
batch_shapes = [d.batch_shape_tensor() for d in components]
batch_ranks = [array_ops.size(bs) for bs in batch_shapes]
check_message = ("components[%d] batch shape must match cat "
"batch shape")
self._assertions = [
check_ops.assert_equal(
cat_batch_rank, batch_ranks[di], message=check_message % di)
for di in range(len(components))
]
self._assertions += [
check_ops.assert_equal(
cat_batch_shape, batch_shapes[di], message=check_message % di)
for di in range(len(components))
]
else:
self._assertions = []
self._cat = cat
self._components = list(components)
self._num_components = static_num_components
self._static_event_shape = static_event_shape
self._static_batch_shape = static_batch_shape
# We let the Mixture distribution access _graph_parents since its arguably
# more like a baseclass.
graph_parents = self._cat._graph_parents # pylint: disable=protected-access
for c in self._components:
graph_parents += c._graph_parents # pylint: disable=protected-access
super(Mixture, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=name)
@property
def cat(self):
return self._cat
@property
def components(self):
return self._components
@property
def num_components(self):
return self._num_components
def _batch_shape_tensor(self):
return self._cat.batch_shape_tensor()
def _batch_shape(self):
return self._static_batch_shape
def _event_shape_tensor(self):
return self._components[0].event_shape_tensor()
def _event_shape(self):
return self._static_event_shape
def _expand_to_event_rank(self, x):
"""Expand the rank of x up to static_event_rank times for broadcasting.
The static event rank was checked to not be None at construction time.
Args:
x: A tensor to expand.
Returns:
The expanded tensor.
"""
expanded_x = x
for _ in range(self.event_shape.ndims):
expanded_x = array_ops.expand_dims(expanded_x, -1)
return expanded_x
def _mean(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
partial_means = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_means)
]
# These should all be the same shape by virtue of matching
# batch_shape and event_shape.
return math_ops.add_n(partial_means)
def _stddev(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
distribution_devs = [d.stddev() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
stacked_means = array_ops.stack(distribution_means, axis=-1)
stacked_devs = array_ops.stack(distribution_devs, axis=-1)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
broadcasted_cat_probs = (array_ops.stack(cat_probs, axis=-1) *
array_ops.ones_like(stacked_means))
batched_dev = distribution_utils.mixture_stddev(
array_ops.reshape(broadcasted_cat_probs, [-1, len(self.components)]),
array_ops.reshape(stacked_means, [-1, len(self.components)]),
array_ops.reshape(stacked_devs, [-1, len(self.components)]))
# I.e. re-shape to list(batch_shape) + list(event_shape).
return array_ops.reshape(batched_dev,
array_ops.shape(broadcasted_cat_probs)[:-1])
def _log_prob(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_probs = [d.log_prob(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_probs = [
cat_lp + d_lp
for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
]
concat_log_probs = array_ops.stack(final_log_probs, 0)
log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
return log_sum_exp
def _log_cdf(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_cdfs = [d.log_cdf(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_cdfs = [
cat_lp + d_lcdf
for (cat_lp, d_lcdf) in zip(cat_log_probs, distribution_log_cdfs)
]
concatted_log_cdfs = array_ops.stack(final_log_cdfs, axis=0)
mixture_log_cdf = math_ops.reduce_logsumexp(concatted_log_cdfs, [0])
return mixture_log_cdf
def _sample_n(self, n, seed=None):
with ops.control_dependencies(self._assertions):
n = ops.convert_to_tensor(n, name="n")
static_n = tensor_util.constant_value(n)
n = int(static_n) if static_n is not None else n
cat_samples = self.cat.sample(n, seed=seed)
static_samples_shape = cat_samples.get_shape()
if static_samples_shape.is_fully_defined():
samples_shape = static_samples_shape.as_list()
samples_size = static_samples_shape.num_elements()
else:
samples_shape = array_ops.shape(cat_samples)
samples_size = array_ops.size(cat_samples)
static_batch_shape = self.batch_shape
if static_batch_shape.is_fully_defined():
batch_shape = static_batch_shape.as_list()
batch_size = static_batch_shape.num_elements()
else:
batch_shape = self.batch_shape_tensor()
batch_size = math_ops.reduce_prod(batch_shape)
static_event_shape = self.event_shape
if static_event_shape.is_fully_defined():
event_shape = np.array(static_event_shape.as_list(), dtype=np.int32)
else:
event_shape = self.event_shape_tensor()
# Get indices into the raw cat sampling tensor. We will
# need these to stitch sample values back out after sampling
# within the component partitions.
samples_raw_indices = array_ops.reshape(
math_ops.range(0, samples_size), samples_shape)
# Partition the raw indices so that we can use
# dynamic_stitch later to reconstruct the samples from the
# known partitions.
partitioned_samples_indices = data_flow_ops.dynamic_partition(
data=samples_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
# Copy the batch indices n times, as we will need to know
# these to pull out the appropriate rows within the
# component partitions.
batch_raw_indices = array_ops.reshape(
array_ops.tile(math_ops.range(0, batch_size), [n]), samples_shape)
# Explanation of the dynamic partitioning below:
# batch indices are i.e., [0, 1, 0, 1, 0, 1]
# Suppose partitions are:
# [1 1 0 0 1 1]
# After partitioning, batch indices are cut as:
# [batch_indices[x] for x in 2, 3]
# [batch_indices[x] for x in 0, 1, 4, 5]
# i.e.
# [1 1] and [0 0 0 0]
# Now we sample n=2 from part 0 and n=4 from part 1.
# For part 0 we want samples from batch entries 1, 1 (samples 0, 1),
# and for part 1 we want samples from batch entries 0, 0, 0, 0
# (samples 0, 1, 2, 3).
partitioned_batch_indices = data_flow_ops.dynamic_partition(
data=batch_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
samples_class = [None for _ in range(self.num_components)]
for c in range(self.num_components):
n_class = array_ops.size(partitioned_samples_indices[c])
seed = distribution_util.gen_new_seed(seed, "mixture")
samples_class_c = self.components[c].sample(n_class, seed=seed)
# Pull out the correct batch entries from each index.
# To do this, we may have to flatten the batch shape.
# For sample s, batch element b of component c, we get the
# partitioned batch indices from
# partitioned_batch_indices[c]; and shift each element by
# the sample index. The final lookup can be thought of as
# a matrix gather along locations (s, b) in
# samples_class_c where the n_class rows correspond to
# samples within this component and the batch_size columns
# correspond to batch elements within the component.
#
# Thus the lookup index is
# lookup[c, i] = batch_size * s[i] + b[c, i]
# for i = 0 ... n_class[c] - 1.
lookup_partitioned_batch_indices = (
batch_size * math_ops.range(n_class) +
partitioned_batch_indices[c])
samples_class_c = array_ops.reshape(
samples_class_c,
array_ops.concat([[n_class * batch_size], event_shape], 0))
samples_class_c = array_ops.gather(
samples_class_c, lookup_partitioned_batch_indices,
name="samples_class_c_gather")
samples_class[c] = samples_class_c
# Stitch back together the samples across the components.
lhs_flat_ret = data_flow_ops.dynamic_stitch(
indices=partitioned_samples_indices, data=samples_class)
# Reshape back to proper sample, batch, and event shape.
ret = array_ops.reshape(lhs_flat_ret,
array_ops.concat([samples_shape,
self.event_shape_tensor()], 0))
ret.set_shape(
tensor_shape.TensorShape(static_samples_shape).concatenate(
self.event_shape))
return ret
def entropy_lower_bound(self, name="entropy_lower_bound"):
r"""A lower bound on the entropy of this mixture model.
The bound below is not always very tight, and its usefulness depends
on the mixture probabilities and the components in use.
A lower bound is useful for ELBO when the `Mixture` is the variational
distribution:
\\(
\log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q]
\\)
where \\( p \\) is the prior distribution, \\( q \\) is the variational,
and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound
\\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in
place of \\( H[q] \\).
For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with
\\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a
simple lower bound is:
\\(
\begin{align}
H[q] & = - \int q(z) \log q(z) dz \\\
& = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\
& \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\
& = \sum_i c_i H[q_i]
\end{align}
\\)
This is the term we calculate below for \\( G[q] \\).
Args:
name: A name for this operation (optional).
Returns:
A lower bound on the Mixture's entropy.
"""
with self._name_scope(name, values=[self.cat.logits]):
with ops.control_dependencies(self._assertions):
distribution_entropies = [d.entropy() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
partial_entropies = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)
]
# These are all the same shape by virtue of matching batch_shape
return math_ops.add_n(partial_entropies)
def _cat_probs(self, log_probs):
"""Get a list of num_components batchwise probabilities."""
which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = array_ops.unstack(cat_probs, num=self.num_components, axis=-1)
return cat_probs
| apache-2.0 |
loli/semisupervisedforests | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
madmouser1/aubio | python/demos/demo_mel-energy.py | 9 | 2203 | #! /usr/bin/env python
import sys
from aubio import fvec, source, pvoc, filterbank
from numpy import vstack, zeros
win_s = 512 # fft size
hop_s = win_s / 4 # hop size
if len(sys.argv) < 2:
print "Usage: %s <filename> [samplerate]" % sys.argv[0]
sys.exit(1)
filename = sys.argv[1]
samplerate = 0
if len( sys.argv ) > 2: samplerate = int(sys.argv[2])
s = source(filename, samplerate, hop_s)
samplerate = s.samplerate
pv = pvoc(win_s, hop_s)
f = filterbank(40, win_s)
f.set_mel_coeffs_slaney(samplerate)
energies = zeros((40,))
o = {}
total_frames = 0
downsample = 2
while True:
samples, read = s()
fftgrain = pv(samples)
new_energies = f(fftgrain)
print '%f' % (total_frames / float(samplerate) ),
print ' '.join(['%f' % b for b in new_energies])
energies = vstack( [energies, new_energies] )
total_frames += read
if read < hop_s: break
if 1:
print "done computing, now plotting"
import matplotlib.pyplot as plt
from demo_waveform_plot import get_waveform_plot
from demo_waveform_plot import set_xlabels_sample2time
fig = plt.figure()
plt.rc('lines',linewidth='.8')
wave = plt.axes([0.1, 0.75, 0.8, 0.19])
get_waveform_plot(filename, samplerate, block_size = hop_s, ax = wave )
wave.yaxis.set_visible(False)
wave.xaxis.set_visible(False)
n_plots = len(energies.T)
all_desc_times = [ x * hop_s for x in range(len(energies)) ]
for i, band in enumerate(energies.T):
ax = plt.axes ( [0.1, 0.75 - ((i+1) * 0.65 / n_plots), 0.8, 0.65 / n_plots], sharex = wave )
ax.plot(all_desc_times, band, '-', label = 'band %d' % i)
#ax.set_ylabel(method, rotation = 0)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.axis(xmax = all_desc_times[-1], xmin = all_desc_times[0])
ax.annotate('band %d' % i, xy=(-10, 0), xycoords='axes points',
horizontalalignment='right', verticalalignment='bottom',
size = 'xx-small',
)
set_xlabels_sample2time( ax, all_desc_times[-1], samplerate)
#plt.ylabel('spectral descriptor value')
ax.xaxis.set_visible(True)
plt.show()
| gpl-3.0 |
erichseamon/sklearn_pycon2015 | notebooks/fig_code/figures.py | 34 | 8633 | import numpy as np
import matplotlib.pyplot as plt
import warnings
def plot_venn_diagram():
fig, ax = plt.subplots(subplot_kw=dict(frameon=False, xticks=[], yticks=[]))
ax.add_patch(plt.Circle((0.3, 0.3), 0.3, fc='red', alpha=0.5))
ax.add_patch(plt.Circle((0.6, 0.3), 0.3, fc='blue', alpha=0.5))
ax.add_patch(plt.Rectangle((-0.1, -0.1), 1.1, 0.8, fc='none', ec='black'))
ax.text(0.2, 0.3, '$x$', size=30, ha='center', va='center')
ax.text(0.7, 0.3, '$y$', size=30, ha='center', va='center')
ax.text(0.0, 0.6, '$I$', size=30)
ax.axis('equal')
def plot_example_decision_tree():
fig = plt.figure(figsize=(10, 4))
ax = fig.add_axes([0, 0, 0.8, 1], frameon=False, xticks=[], yticks=[])
ax.set_title('Example Decision Tree: Animal Classification', size=24)
def text(ax, x, y, t, size=20, **kwargs):
ax.text(x, y, t,
ha='center', va='center', size=size,
bbox=dict(boxstyle='round', ec='k', fc='w'), **kwargs)
text(ax, 0.5, 0.9, "How big is\nthe animal?", 20)
text(ax, 0.3, 0.6, "Does the animal\nhave horns?", 18)
text(ax, 0.7, 0.6, "Does the animal\nhave two legs?", 18)
text(ax, 0.12, 0.3, "Are the horns\nlonger than 10cm?", 14)
text(ax, 0.38, 0.3, "Is the animal\nwearing a collar?", 14)
text(ax, 0.62, 0.3, "Does the animal\nhave wings?", 14)
text(ax, 0.88, 0.3, "Does the animal\nhave a tail?", 14)
text(ax, 0.4, 0.75, "> 1m", 12, alpha=0.4)
text(ax, 0.6, 0.75, "< 1m", 12, alpha=0.4)
text(ax, 0.21, 0.45, "yes", 12, alpha=0.4)
text(ax, 0.34, 0.45, "no", 12, alpha=0.4)
text(ax, 0.66, 0.45, "yes", 12, alpha=0.4)
text(ax, 0.79, 0.45, "no", 12, alpha=0.4)
ax.plot([0.3, 0.5, 0.7], [0.6, 0.9, 0.6], '-k')
ax.plot([0.12, 0.3, 0.38], [0.3, 0.6, 0.3], '-k')
ax.plot([0.62, 0.7, 0.88], [0.3, 0.6, 0.3], '-k')
ax.plot([0.0, 0.12, 0.20], [0.0, 0.3, 0.0], '--k')
ax.plot([0.28, 0.38, 0.48], [0.0, 0.3, 0.0], '--k')
ax.plot([0.52, 0.62, 0.72], [0.0, 0.3, 0.0], '--k')
ax.plot([0.8, 0.88, 1.0], [0.0, 0.3, 0.0], '--k')
ax.axis([0, 1, 0, 1])
def visualize_tree(estimator, X, y, boundaries=True,
xlim=None, ylim=None):
estimator.fit(X, y)
if xlim is None:
xlim = (X[:, 0].min() - 0.1, X[:, 0].max() + 0.1)
if ylim is None:
ylim = (X[:, 1].min() - 0.1, X[:, 1].max() + 0.1)
x_min, x_max = xlim
y_min, y_max = ylim
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
Z = estimator.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, alpha=0.2, cmap='rainbow')
plt.clim(y.min(), y.max())
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='rainbow')
plt.axis('off')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.clim(y.min(), y.max())
# Plot the decision boundaries
def plot_boundaries(i, xlim, ylim):
if i < 0:
return
tree = estimator.tree_
if tree.feature[i] == 0:
plt.plot([tree.threshold[i], tree.threshold[i]], ylim, '-k')
plot_boundaries(tree.children_left[i],
[xlim[0], tree.threshold[i]], ylim)
plot_boundaries(tree.children_right[i],
[tree.threshold[i], xlim[1]], ylim)
elif tree.feature[i] == 1:
plt.plot(xlim, [tree.threshold[i], tree.threshold[i]], '-k')
plot_boundaries(tree.children_left[i], xlim,
[ylim[0], tree.threshold[i]])
plot_boundaries(tree.children_right[i], xlim,
[tree.threshold[i], ylim[1]])
if boundaries:
plot_boundaries(0, plt.xlim(), plt.ylim())
def plot_tree_interactive(X, y):
from sklearn.tree import DecisionTreeClassifier
def interactive_tree(depth=1):
clf = DecisionTreeClassifier(max_depth=depth, random_state=0)
visualize_tree(clf, X, y)
from IPython.html.widgets import interact
return interact(interactive_tree, depth=[1, 5])
def plot_kmeans_interactive(min_clusters=1, max_clusters=6):
from IPython.html.widgets import interact
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.datasets.samples_generator import make_blobs
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
X, y = make_blobs(n_samples=300, centers=4,
random_state=0, cluster_std=0.60)
def _kmeans_step(frame=0, n_clusters=4):
rng = np.random.RandomState(2)
labels = np.zeros(X.shape[0])
centers = rng.randn(n_clusters, 2)
nsteps = frame // 3
for i in range(nsteps + 1):
old_centers = centers
if i < nsteps or frame % 3 > 0:
dist = euclidean_distances(X, centers)
labels = dist.argmin(1)
if i < nsteps or frame % 3 > 1:
centers = np.array([X[labels == j].mean(0)
for j in range(n_clusters)])
nans = np.isnan(centers)
centers[nans] = old_centers[nans]
# plot the data and cluster centers
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='rainbow',
vmin=0, vmax=n_clusters - 1);
plt.scatter(old_centers[:, 0], old_centers[:, 1], marker='o',
c=np.arange(n_clusters),
s=200, cmap='rainbow')
plt.scatter(old_centers[:, 0], old_centers[:, 1], marker='o',
c='black', s=50)
# plot new centers if third frame
if frame % 3 == 2:
for i in range(n_clusters):
plt.annotate('', centers[i], old_centers[i],
arrowprops=dict(arrowstyle='->', linewidth=1))
plt.scatter(centers[:, 0], centers[:, 1], marker='o',
c=np.arange(n_clusters),
s=200, cmap='rainbow')
plt.scatter(centers[:, 0], centers[:, 1], marker='o',
c='black', s=50)
plt.xlim(-4, 4)
plt.ylim(-2, 10)
if frame % 3 == 1:
plt.text(3.8, 9.5, "1. Reassign points to nearest centroid",
ha='right', va='top', size=14)
elif frame % 3 == 2:
plt.text(3.8, 9.5, "2. Update centroids to cluster means",
ha='right', va='top', size=14)
return interact(_kmeans_step, frame=[0, 50],
n_clusters=[min_clusters, max_clusters])
def plot_image_components(x, coefficients=None, mean=0, components=None,
imshape=(8, 8), n_components=6, fontsize=12):
if coefficients is None:
coefficients = x
if components is None:
components = np.eye(len(coefficients), len(x))
mean = np.zeros_like(x) + mean
fig = plt.figure(figsize=(1.2 * (5 + n_components), 1.2 * 2))
g = plt.GridSpec(2, 5 + n_components, hspace=0.3)
def show(i, j, x, title=None):
ax = fig.add_subplot(g[i, j], xticks=[], yticks=[])
ax.imshow(x.reshape(imshape), interpolation='nearest')
if title:
ax.set_title(title, fontsize=fontsize)
show(slice(2), slice(2), x, "True")
approx = mean.copy()
show(0, 2, np.zeros_like(x) + mean, r'$\mu$')
show(1, 2, approx, r'$1 \cdot \mu$')
for i in range(0, n_components):
approx = approx + coefficients[i] * components[i]
show(0, i + 3, components[i], r'$c_{0}$'.format(i + 1))
show(1, i + 3, approx,
r"${0:.2f} \cdot c_{1}$".format(coefficients[i], i + 1))
plt.gca().text(0, 1.05, '$+$', ha='right', va='bottom',
transform=plt.gca().transAxes, fontsize=fontsize)
show(slice(2), slice(-2, None), approx, "Approx")
def plot_pca_interactive(data, n_components=6):
from sklearn.decomposition import PCA
from IPython.html.widgets import interact
pca = PCA(n_components=n_components)
Xproj = pca.fit_transform(data)
def show_decomp(i=0):
plot_image_components(data[i], Xproj[i],
pca.mean_, pca.components_)
interact(show_decomp, i=(0, data.shape[0] - 1));
| bsd-3-clause |
Rosuav/shed | probability.py | 1 | 3924 | import math
import random
import statistics
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
def parse_roll_test(test):
# Parse the result of 'roll test' from Minstrel Hall
dist = {}
total = 0
for line in test.split("\n"):
if ":" in line:
val, prob = line.split(":")
dist[val] = int(prob)
total += int(prob)
# Parsing complete. We now have a mapping of result to number of instances,
# and a total result count. So dist[X]/total == probability of X occurring,
# for any X.
return dist, total
def chisq(dist, total):
expected = total / len(dist)
error = 0
for result, count in dist.items():
error += (count - expected) ** 2 / expected
print("χ² =", error)
if 0: chisq(*parse_roll_test("""
1: 10135
2: 9971
3: 9774
4: 9849
5: 10059
6: 9936
7: 9990
8: 10027
9: 9917
10: 10054
11: 10202
12: 10008
13: 10136
14: 10060
15: 10012
16: 9941
17: 10007
18: 9956
19: 10096
20: 9870
"""))
def roll_dice(n):
# Roll an N-sided dice. Mess with this to create unfair distributions.
roll = random.randrange(n) + 1
if roll in {1, n} and not random.randrange(40):
roll = random.randrange(n) + 1
return roll
def test_dice_roller(n, tries):
dist = {}
# Initialize to all zeroes to ensure that we have entries for everything
for i in range(n):
dist[i + 1] = 0
# Roll a bunch of dice and see what comes up
for _ in range(tries):
dist[roll_dice(n)] += 1
chisq(dist, tries)
# for _ in range(15): test_dice_roller(20, 200000)
#(x ** 500_000_000) * ((1-x) ** 500_000_000)
#fac(1_000_000_000) / (fac(500_000_000) * fac(500_000_000))
def choose(n, k):
# Actually calculate n choose k
# n! / (k!(n-k)!)
num = denom = 1
for i in range(1, k + 1):
num *= i + n - k
denom *= i
return num // denom
def approxchoose(n, k):
# Approximate n choose k for large n and k
# Returns values a smidge too high, and has to be special-cased for N choose 0 and N choose N
if k == 0 or k == n: return 1
return (n / (2 * math.pi * k * (n-k))) ** 0.5 * (n ** n) / (k ** k * (n - k) ** (n - k))
def choosehalf(n):
# Approximate 2n choose n
return 2 ** (2*n) / (math.pi * n)**0.5
print(choose(20, 10))
print(approxchoose(20, 10))
print(choosehalf(10))
# print(approxchoose(1_000_000, 500_000))
# print(choosehalf(500_000))
# Binomial distribution for N coin tosses
# What is the standard deviation of [0]*N + [1]*N ?
N = 1_000_000_000
half = 5_000_000_000
def stdev(N):
ss = 0
for n in range(N):
ss += approxchoose(N, n) * (n - N/2) ** 2
return (ss / (2**N-1)) ** 0.5
def pascal(n):
if n == 0: return [1]
p = [0] + pascal(n - 1) + [0]
return [p[i] + p[i + 1] for i in range(len(p) - 1)]
if 0:
# Calculate the error in the approxchoose function by comparing against
# the corresponding row in Pascal's Triangle
N = 20
for n, count in enumerate(pascal(N)):
print(f"%{len(str(N))}d %.5f" % (n, approxchoose(N, n) / count), count)
import sys; sys.exit(0)
# statistics.NormalDist(0.5, stdev(N))
# for N in (4, 10, 20, 100, 1_000_000, 1_000_000_000, 1_000_000_000_000):
for N in (5, 10, 15, 20, 50, 75, 100):
sigmaN = stdev(N) / N
print(N, sigmaN)
x = np.linspace(0, 1.0, N + 2)
if N < 50:
p = [p * n / 2**N * 2 for n, p in enumerate(pascal(N) + [0])]
plt.plot(x, p, label=f"Actual [{N}]")
if 0: # Slow calculation of the same figure stdev gives
samples = []
for n, count in enumerate(pascal(N)): samples.extend(count * [n])
# mu / N, sigma / N
muN, sigmaN = 0.5, statistics.stdev(samples)/N
print(N, sigmaN)
plt.plot(x, stats.norm.pdf(x, 0.5, sigmaN), label=str(N))
# What is the first derivative of the PDF of (1e9 choose 5e8) at 0.5?
# f''(x) = 1/sqrt(2*pi)*e**(-1/2x^2) * (x^2-1)
# 499_000_000 <= x <= 501_000_000 ?? What probability?
# CDF: What is the probability that x < 499e6 ?
# CDF: What is the probability that x < 501e6 ?
# What is the spread around the mean such that CDF(x+spread) - CDF(x-spread) == 0.99?
plt.legend()
plt.show()
| mit |
msmbuilder/msmbuilder | msmbuilder/cluster/kcenters.py | 9 | 6846 | # Author: Robert McGibbon <[email protected]>
# Contributors: Brooke Husic <[email protected]>
# Copyright (c) 2016, Stanford University
# All rights reserved.
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, print_function, division
import numpy as np
from sklearn.utils import check_random_state
from sklearn.base import ClusterMixin, TransformerMixin
from .. import libdistance
from . import MultiSequenceClusterMixin
from ..base import BaseEstimator
__all__ = ['KCenters']
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class _KCenters(ClusterMixin, TransformerMixin):
"""K-Centers clustering
Cluster a vector or Trajectory dataset using a simple heuristic to minimize
the maximum distance from any data point to its assigned cluster center.
The runtime of this algorithm is O(kN), where k is the number of
clusters and N is the size of the dataset, making it one of the least
expensive clustering algorithms available.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
metric : {"euclidean", "sqeuclidean", "cityblock", "chebyshev", "canberra",
"braycurtis", "hamming", "jaccard", "cityblock", "rmsd"}
The distance metric to use. metric = "rmsd" requires that sequences
passed to ``fit()`` be ```md.Trajectory```; other distance metrics
require ``np.ndarray``s.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
References
----------
.. [1] Gonzalez, Teofilo F. "Clustering to minimize the maximum
intercluster distance." Theor. Comput. Sci. 38 (1985): 293-306.
.. [2] Beauchamp, Kyle A., et al. "MSMBuilder2: modeling conformational
dynamics on the picosecond to millisecond scale." J. Chem. Theory.
Comput. 7.10 (2011): 3412-3419.
Attributes
----------
cluster_ids_ : array, [n_clusters]
Index of the data point that each cluster label corresponds to.
cluster_centers_ : array, [n_clusters, n_features] or md.Trajectory
Coordinates of cluster centers
labels_ : array, [n_samples,]
The label of each point is an integer in [0, n_clusters).
distances_ : array, [n_samples,]
Distance from each sample to the cluster center it is
assigned to.
inertia_ : float
Sum of distances of samples to their closest cluster center.
"""
def __init__(self, n_clusters=8, metric='euclidean', random_state=None):
self.n_clusters = n_clusters
self.metric = metric
self.random_state = random_state
def fit(self, X, y=None):
if isinstance(X, np.ndarray):
if not (X.dtype == 'float32' or X.dtype == 'float64'):
X = X.astype('float64')
n_samples = len(X)
new_center_index = check_random_state(self.random_state).randint(0, n_samples)
self.labels_ = np.zeros(n_samples, dtype=int)
self.distances_ = np.empty(n_samples, dtype=float)
self.distances_.fill(np.inf)
cluster_ids_ = []
for i in range(self.n_clusters):
d = libdistance.dist(X, X[new_center_index], metric=self.metric)
mask = (d < self.distances_)
self.distances_[mask] = d[mask]
self.labels_[mask] = i
cluster_ids_.append(new_center_index)
new_center_index = np.argmax(self.distances_)
self.cluster_ids_ = cluster_ids_
self.cluster_centers_ = X[cluster_ids_]
self.inertia_ = np.sum(self.distances_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
New data to predict.
Returns
-------
Y : array, shape [n_samples,]
Index of the closest center each sample belongs to.
"""
if isinstance(X, np.ndarray):
if not (X.dtype == 'float32' or X.dtype == 'float64'):
X = X.astype('float64')
labels, inertia = libdistance.assign_nearest(
X, self.cluster_centers_, metric=self.metric)
return labels
def fit_predict(self, X, y=None):
return self.fit(X, y).labels_
class KCenters(MultiSequenceClusterMixin, _KCenters, BaseEstimator):
_allow_trajectory = True
__doc__ = _KCenters.__doc__[: _KCenters.__doc__.find('Attributes')] + \
'''
Attributes
----------
`cluster_centers_` : array, [n_clusters, n_features]
Coordinates of cluster centers
`labels_` : list of arrays, each of shape [sequence_length, ]
`labels_[i]` is an array of the labels of each point in
sequence `i`. The label of each point is an integer in
[0, n_clusters).
`distances_` : list of arrays, each of shape [sequence_length, ]
`distances_[i]` is an array of the labels of each point in
sequence `i`. Distance from each sample to the cluster center
it is assigned to.
'''
def fit(self, sequences, y=None):
"""Fit the kcenters clustering on the data
Parameters
----------
sequences : list of array-like, each of shape [sequence_length, n_features]
A list of multivariate timeseries, or ``md.Trajectory``. Each
sequence may have a different length, but they all must have the
same number of features, or the same number of atoms if they are
``md.Trajectory``s.
Returns
-------
self
"""
MultiSequenceClusterMixin.fit(self, sequences)
self.distances_ = self._split(self.distances_)
return self
def summarize(self):
return """KCenters clustering
--------------------
n_clusters : {n_clusters}
metric : {metric}
Inertia : {inertia}
Mean distance : {mean_distance}
Max distance : {max_distance}
""".format(n_clusters=self.n_clusters, metric=self.metric,
inertia=self.inertia_, mean_distance=np.mean(np.concatenate(self.distances_)),
max_distance=np.max(np.concatenate(self.distances_)))
| lgpl-2.1 |
crichardson17/starburst_atlas | SFH_comparison/data/Geneva_cont_Rot/Geneva_cont_Rot_6/fullgrid/peaks_reader.py | 1 | 5057 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ---------------------------------------------------
headerloc = "/Users/helen/Documents/Elon/Thesis_Research/github_repo/starburst_atlas/headers_dir/headers.txt"
# ------------------------------------------------------------------------------------------------------
#data files' names from source directory constructed here. default source directory is working directory
numFiles = 3 #change this if you have more/less files
gridFiles = [None]*numFiles
emissionFiles = [None]*numFiles
for i in range(numFiles):
for file in os.listdir('.'):
if file.endswith("{:d}.grd".format(i+1)):
gridFiles[i] = file
#keep track of all the files you'll be importing by printing
#print file
if file.endswith("{:d}.txt".format(i+1)):
emissionFiles[i] = file
#keep track of all the files you'll be importing by printing
#print file
print ("Files names constructed")
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
print("Beginning file import")
for i in range(numFiles):
gridI = [];
with open(gridFiles[i], 'rb') as f:
csvReader = csv.reader(f, delimiter='\t')
for row in csvReader:
gridI.append(row)
gridI = asarray(gridI)
gridI = gridI[1:,6:8]
if ( i == 0 ):
grid = gridI
else :
grid = concatenate((grid,gridI))
for i in range(numFiles):
emissionLineI = [];
with open(emissionFiles[i], 'rb') as f:
csvReader = csv.reader(f, delimiter='\t')
headers = csvReader.next()
for row in csvReader:
emissionLineI.append(row)
emissionLineI = asarray(emissionLineI)
emissionLineI = emissionLineI[:,1:]
if ( i == 0 ):
Emissionlines = emissionLineI
else :
Emissionlines = concatenate((Emissionlines,emissionLineI))
hdens_values = grid[:,1]
phi_values = grid[:,0]
print("Import files complete")
#To fix when hdens > 10
#many of my grids were run off with hdens up to 12 so we needed to cut off part of the data
#first create temorary arrays
print("modifications begun")
hdens_values_2 = empty(shape=[0, 1])
phi_values_2 = empty(shape=[0, 1])
Emissionlines_2 = empty(shape=[0, len(Emissionlines[0,:])])
#save data in range desired to temp arrays
for i in range(len(hdens_values)):
if (float(hdens_values[i]) < 6.100) & (float(phi_values[i]) < 17.100) :
hdens_values_2 = append(hdens_values_2, hdens_values[i])
phi_values_2 = append(phi_values_2, phi_values[i])
Emissionlines_2 = vstack([Emissionlines_2, Emissionlines[i,:]])
#overwrite old arrays
hdens_values = hdens_values_2
phi_values = phi_values_2
Emissionlines = Emissionlines_2
print("modifications complete")
# ---------------------------------------------------
# ---------------------------------------------------
#there are the emission line names properly formatted
print("Importing headers from header file")
headersFile = open(headerloc,'r')
headers = headersFile.read().splitlines()
headersFile.close()
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
savetxt('peaks_Geneva_cont_6', max_values, delimiter='\t')
| gpl-2.0 |
svebk/memex-weapons | deepmodels/test_model.py | 1 | 5520 | import numpy as np
import matplotlib.pyplot as plt
import json
import cv2
import scipy
import pickle
import os,sys
import copy
# Make sure that caffe is on the python path:
#caffe_root = '../' # this file is expected to be in {caffe_root}/examples
#import sys
#sys.path.insert(0, caffe_root + 'python')
import caffe
class CaffeExtractorConf():
MODEL_FILE = './deepmodels/CNN_20K_deploy.prototxt'
PRETRAINED = './deepmodels/CNN_20K.caffemodel'
MEAN_FILE = './deepmodels/imagenet_mean.npy'
CAT_FILE = './deepmodels/cat_20K.txt'
WEAPONS_CAT_FILE = './deepmodels/weapon_classes.npy'
WEAPONS_MAPPING_FILE = './deepmodels/imagenet_memex_mapping.json'
WITH_FLIP = True
WITH_CROP = False
OUTPUT_LAYERS=['prob']
CAFFE_MODE = "CPU"
class CaffeExtractor():
conf = None
net = None
mean_img = None
def __init__(self, in_conf=CaffeExtractorConf()):
self.conf = in_conf
self.initialize()
def initialize(self):
self.np_cat = self.loadCatList(self.conf.CAT_FILE)
#self.wf=np.where(np.load(self.conf.WEAPONS_CAT_FILE)==1)[0]
#self.mapCatList()
# Caffe net init
if self.conf.CAFFE_MODE == "GPU":
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
self.mean_img = np.load(self.conf.MEAN_FILE)
self.net = caffe.Net(self.conf.MODEL_FILE, self.conf.PRETRAINED, caffe.TEST)
self.conf.batch_size = self.net.blobs['data'].data.shape[0]
self.conf.IN_DIM = (self.net.blobs['data'].data.shape[2],self.net.blobs['data'].data.shape[3])
def formatInput(self,IMAGE_FILE):
start=(256-224)/2
end=start+224
input_image = cv2.imread(IMAGE_FILE)
if input_image is None:
return None
input_image_t=np.transpose(input_image,(2,0,1))
if self.conf.WITH_FLIP:
input_image_fl = np.fliplr(input_image)
input_image_fl_t=np.transpose(input_image,(2,0,1))
if self.conf.WITH_CROP:
input_data = np.asarray([(input_image_t[:,start:end,start:end]-self.mean_img[:,start:end,start:end]),(input_image_fl_t[:,start:end,start:end]-self.mean_img[:,start:end,start:end])])
else:
input_image_tr=scipy.misc.imresize(input_image_t.transpose(1,2,0),self.conf.IN_DIM).transpose(2,0,1)
input_image_fl_tr=scipy.misc.imresize(input_image_fl_t.transpose(1,2,0),self.conf.IN_DIM).transpose(2,0,1)
input_data = np.asarray([(input_image_tr-self.mean_img[:,start:end,start:end]),(input_image_fl_tr-self.mean_img[:,start:end,start:end])])
else:
if self.conf.WITH_CROP:
input_data = np.asarray([(input_image_t[:,start:end,start:end]-self.mean_img[:,start:end,start:end])])
else:
input_image_t=scipy.misc.imresize(input_image_t.transpose(1,2,0),self.conf.IN_DIM).transpose(2,0,1)
input_data = np.asarray([(input_image_t-self.mean_img[:,start:end,start:end])])
return input_data
def getOutput(self,IMAGE_FILE):
self.input_data = None
self.input_data = self.formatInput(IMAGE_FILE)
if self.input_data is None:
return None
self.net.forward(data=self.input_data)
self.out = {}
for layer in self.conf.OUTPUT_LAYERS:
self.out[layer]=self.net.blobs[layer].data
return self.out
@staticmethod
def loadCatList(CAT_FILE):
cat_lists=[]
with open(CAT_FILE,"rt") as fcat:
for line in fcat:
cat_lists.append(line.strip())
print "We have",str(len(cat_lists)),"classes in total."
return np.asarray(cat_lists)
def mapCatList(self):
with open(self.conf.WEAPONS_MAPPING_FILE,"r") as f:
json_map=json.load(f)
self.map_cat_lists=json_map.keys()
self.all_mapped_cat=[]
self.map_cat_pos={}
for key in self.map_cat_lists:
self.map_cat_pos[key]=[]
for cat in json_map[key]:
self.map_cat_pos[key].extend(list(np.where(self.np_cat==cat)[0]))
self.all_mapped_cat.extend(list(np.where(self.np_cat==cat)[0]))
self.map_cat_pos[key]=np.asarray(self.map_cat_pos[key]).squeeze()
self.nonweapons_pos=[i for i in range(self.np_cat.shape[0]) if i not in self.all_mapped_cat]
def show_res_batch(self):
batch_in = self.input_data.shape[0]
prediction = self.out['prob']
for i in range(batch_in):
print "Input #"+str(i)+" classified as:",self.np_cat[prediction[i].argmax()]
ind=np.argsort(prediction[i])[::-1]
print "Top 10 classes are:",self.np_cat[ind[0:10]]
if __name__=="__main__":
if len(sys.argv)<2:
print "Usage: python test_model.py model_folder"
dir_model=sys.argv[1].strip('/')
test_imgs_dir='./result_test_images'
cec = CaffeExtractorConf()
cec.MODEL_FILE = dir_model+"/"+dir_model+'_deploy.prototxt'
cec.PRETRAINED = dir_model+"/"+dir_model+'.caffemodel'
cec.CAT_FILE = dir_model+"/cat_list.txt"
cec.MEAN_FILE = 'imagenet_mean.npy'
cec.WITH_FLIP = False
cec.WITH_CROP = False
cec.OUTPUT_LAYERS=['prob']
cec.CAFFE_MODE = "CPU"
ce = CaffeExtractor(cec)
out_dict={}
for img in os.listdir(test_imgs_dir):
#print img
if img.endswith('.jpg'):
IMAGE_FILE = os.path.join(test_imgs_dir,img)
out = ce.getOutput(IMAGE_FILE)
out_dict[img]=copy.deepcopy(out)
print img,out
ce.show_res_batch()
pickle.dump(out_dict,open(os.path.join(dir_model,"result_test_images.pickle"),"wb"))
np.save(os.path.join(dir_model,"result_test_images.npy"),out_dict)
| bsd-2-clause |
bhargav/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
snario/geopandas | tests/test_geodataframe.py | 7 | 19172 | from __future__ import absolute_import
import json
import os
import tempfile
import shutil
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from shapely.geometry import Point, Polygon
import fiona
from geopandas import GeoDataFrame, read_file, GeoSeries
from .util import unittest, download_nybb, assert_geoseries_equal, connect, \
create_db, validate_boro_df, PANDAS_NEW_SQL_API
class TestDataFrame(unittest.TestCase):
def setUp(self):
N = 10
nybb_filename = download_nybb()
self.df = read_file('/nybb_14a_av/nybb.shp', vfs='zip://' + nybb_filename)
with fiona.open('/nybb_14a_av/nybb.shp', vfs='zip://' + nybb_filename) as f:
self.schema = f.schema
self.tempdir = tempfile.mkdtemp()
self.boros = self.df['BoroName']
self.crs = {'init': 'epsg:4326'}
self.df2 = GeoDataFrame([
{'geometry': Point(x, y), 'value1': x + y, 'value2': x * y}
for x, y in zip(range(N), range(N))], crs=self.crs)
self.df3 = read_file('examples/null_geom.geojson')
self.line_paths = self.df3['Name']
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_df_init(self):
self.assertTrue(type(self.df2) is GeoDataFrame)
self.assertTrue(self.df2.crs == self.crs)
def test_different_geo_colname(self):
data = {"A": range(5), "B": range(-5, 0),
"location": [Point(x, y) for x, y in zip(range(5), range(5))]}
df = GeoDataFrame(data, crs=self.crs, geometry='location')
locs = GeoSeries(data['location'], crs=self.crs)
assert_geoseries_equal(df.geometry, locs)
self.assert_('geometry' not in df)
self.assertEqual(df.geometry.name, 'location')
# internal implementation detail
self.assertEqual(df._geometry_column_name, 'location')
geom2 = [Point(x, y) for x, y in zip(range(5, 10), range(5))]
df2 = df.set_geometry(geom2, crs='dummy_crs')
self.assert_('geometry' in df2)
self.assert_('location' in df2)
self.assertEqual(df2.crs, 'dummy_crs')
self.assertEqual(df2.geometry.crs, 'dummy_crs')
# reset so it outputs okay
df2.crs = df.crs
assert_geoseries_equal(df2.geometry, GeoSeries(geom2, crs=df2.crs))
# for right now, non-geometry comes back as series
assert_geoseries_equal(df2['location'], df['location'],
check_series_type=False, check_dtype=False)
def test_geo_getitem(self):
data = {"A": range(5), "B": range(-5, 0),
"location": [Point(x, y) for x, y in zip(range(5), range(5))]}
df = GeoDataFrame(data, crs=self.crs, geometry='location')
self.assert_(isinstance(df.geometry, GeoSeries))
df['geometry'] = df["A"]
self.assert_(isinstance(df.geometry, GeoSeries))
self.assertEqual(df.geometry[0], data['location'][0])
# good if this changed in the future
self.assert_(not isinstance(df['geometry'], GeoSeries))
self.assert_(isinstance(df['location'], GeoSeries))
data["geometry"] = [Point(x + 1, y - 1) for x, y in zip(range(5), range(5))]
df = GeoDataFrame(data, crs=self.crs)
self.assert_(isinstance(df.geometry, GeoSeries))
self.assert_(isinstance(df['geometry'], GeoSeries))
# good if this changed in the future
self.assert_(not isinstance(df['location'], GeoSeries))
def test_geometry_property(self):
assert_geoseries_equal(self.df.geometry, self.df['geometry'],
check_dtype=True, check_index_type=True)
df = self.df.copy()
new_geom = [Point(x, y) for x, y in zip(range(len(self.df)),
range(len(self.df)))]
df.geometry = new_geom
new_geom = GeoSeries(new_geom, index=df.index, crs=df.crs)
assert_geoseries_equal(df.geometry, new_geom)
assert_geoseries_equal(df['geometry'], new_geom)
# new crs
gs = GeoSeries(new_geom, crs="epsg:26018")
df.geometry = gs
self.assertEqual(df.crs, "epsg:26018")
def test_geometry_property_errors(self):
with self.assertRaises(AttributeError):
df = self.df.copy()
del df['geometry']
df.geometry
# list-like error
with self.assertRaises(ValueError):
df = self.df2.copy()
df.geometry = 'value1'
# list-like error
with self.assertRaises(ValueError):
df = self.df.copy()
df.geometry = 'apple'
# non-geometry error
with self.assertRaises(TypeError):
df = self.df.copy()
df.geometry = list(range(df.shape[0]))
with self.assertRaises(KeyError):
df = self.df.copy()
del df['geometry']
df['geometry']
# ndim error
with self.assertRaises(ValueError):
df = self.df.copy()
df.geometry = df
def test_set_geometry(self):
geom = GeoSeries([Point(x, y) for x, y in zip(range(5), range(5))])
original_geom = self.df.geometry
df2 = self.df.set_geometry(geom)
self.assert_(self.df is not df2)
assert_geoseries_equal(df2.geometry, geom)
assert_geoseries_equal(self.df.geometry, original_geom)
assert_geoseries_equal(self.df['geometry'], self.df.geometry)
# unknown column
with self.assertRaises(ValueError):
self.df.set_geometry('nonexistent-column')
# ndim error
with self.assertRaises(ValueError):
self.df.set_geometry(self.df)
# new crs - setting should default to GeoSeries' crs
gs = GeoSeries(geom, crs="epsg:26018")
new_df = self.df.set_geometry(gs)
self.assertEqual(new_df.crs, "epsg:26018")
# explicit crs overrides self and dataframe
new_df = self.df.set_geometry(gs, crs="epsg:27159")
self.assertEqual(new_df.crs, "epsg:27159")
self.assertEqual(new_df.geometry.crs, "epsg:27159")
# Series should use dataframe's
new_df = self.df.set_geometry(geom.values)
self.assertEqual(new_df.crs, self.df.crs)
self.assertEqual(new_df.geometry.crs, self.df.crs)
def test_set_geometry_col(self):
g = self.df.geometry
g_simplified = g.simplify(100)
self.df['simplified_geometry'] = g_simplified
df2 = self.df.set_geometry('simplified_geometry')
# Drop is false by default
self.assert_('simplified_geometry' in df2)
assert_geoseries_equal(df2.geometry, g_simplified)
# If True, drops column and renames to geometry
df3 = self.df.set_geometry('simplified_geometry', drop=True)
self.assert_('simplified_geometry' not in df3)
assert_geoseries_equal(df3.geometry, g_simplified)
def test_set_geometry_inplace(self):
geom = [Point(x, y) for x, y in zip(range(5), range(5))]
ret = self.df.set_geometry(geom, inplace=True)
self.assert_(ret is None)
geom = GeoSeries(geom, index=self.df.index, crs=self.df.crs)
assert_geoseries_equal(self.df.geometry, geom)
def test_set_geometry_series(self):
# Test when setting geometry with a Series that
# alignment will occur
#
# Reverse the index order
# Set the Series to be Point(i,i) where i is the index
self.df.index = range(len(self.df)-1, -1, -1)
d = {}
for i in range(len(self.df)):
d[i] = Point(i, i)
g = GeoSeries(d)
# At this point, the DataFrame index is [4,3,2,1,0] and the
# GeoSeries index is [0,1,2,3,4]. Make sure set_geometry aligns
# them to match indexes
df = self.df.set_geometry(g)
for i, r in df.iterrows():
self.assertAlmostEqual(i, r['geometry'].x)
self.assertAlmostEqual(i, r['geometry'].y)
def test_to_json(self):
text = self.df.to_json()
data = json.loads(text)
self.assertTrue(data['type'] == 'FeatureCollection')
self.assertTrue(len(data['features']) == 5)
def test_to_json_geom_col(self):
df = self.df.copy()
df['geom'] = df['geometry']
df['geometry'] = np.arange(len(df))
df.set_geometry('geom', inplace=True)
text = df.to_json()
data = json.loads(text)
self.assertTrue(data['type'] == 'FeatureCollection')
self.assertTrue(len(data['features']) == 5)
def test_to_json_na(self):
# Set a value as nan and make sure it's written
self.df.loc[self.df['BoroName']=='Queens', 'Shape_Area'] = np.nan
text = self.df.to_json()
data = json.loads(text)
self.assertTrue(len(data['features']) == 5)
for f in data['features']:
props = f['properties']
self.assertEqual(len(props), 4)
if props['BoroName'] == 'Queens':
self.assertTrue(props['Shape_Area'] is None)
def test_to_json_bad_na(self):
# Check that a bad na argument raises error
with self.assertRaises(ValueError):
text = self.df.to_json(na='garbage')
def test_to_json_dropna(self):
self.df.loc[self.df['BoroName']=='Queens', 'Shape_Area'] = np.nan
self.df.loc[self.df['BoroName']=='Bronx', 'Shape_Leng'] = np.nan
text = self.df.to_json(na='drop')
data = json.loads(text)
self.assertEqual(len(data['features']), 5)
for f in data['features']:
props = f['properties']
if props['BoroName'] == 'Queens':
self.assertEqual(len(props), 3)
self.assertTrue('Shape_Area' not in props)
# Just make sure setting it to nan in a different row
# doesn't affect this one
self.assertTrue('Shape_Leng' in props)
elif props['BoroName'] == 'Bronx':
self.assertEqual(len(props), 3)
self.assertTrue('Shape_Leng' not in props)
self.assertTrue('Shape_Area' in props)
else:
self.assertEqual(len(props), 4)
def test_to_json_keepna(self):
self.df.loc[self.df['BoroName']=='Queens', 'Shape_Area'] = np.nan
self.df.loc[self.df['BoroName']=='Bronx', 'Shape_Leng'] = np.nan
text = self.df.to_json(na='keep')
data = json.loads(text)
self.assertEqual(len(data['features']), 5)
for f in data['features']:
props = f['properties']
self.assertEqual(len(props), 4)
if props['BoroName'] == 'Queens':
self.assertTrue(np.isnan(props['Shape_Area']))
# Just make sure setting it to nan in a different row
# doesn't affect this one
self.assertTrue('Shape_Leng' in props)
elif props['BoroName'] == 'Bronx':
self.assertTrue(np.isnan(props['Shape_Leng']))
self.assertTrue('Shape_Area' in props)
def test_copy(self):
df2 = self.df.copy()
self.assertTrue(type(df2) is GeoDataFrame)
self.assertEqual(self.df.crs, df2.crs)
def test_to_file(self):
""" Test to_file and from_file """
tempfilename = os.path.join(self.tempdir, 'boros.shp')
self.df.to_file(tempfilename)
# Read layer back in
df = GeoDataFrame.from_file(tempfilename)
self.assertTrue('geometry' in df)
self.assertTrue(len(df) == 5)
self.assertTrue(np.alltrue(df['BoroName'].values == self.boros))
# Write layer with null geometry out to file
tempfilename = os.path.join(self.tempdir, 'null_geom.shp')
self.df3.to_file(tempfilename)
# Read layer back in
df3 = GeoDataFrame.from_file(tempfilename)
self.assertTrue('geometry' in df3)
self.assertTrue(len(df3) == 2)
self.assertTrue(np.alltrue(df3['Name'].values == self.line_paths))
def test_to_file_types(self):
""" Test various integer type columns (GH#93) """
tempfilename = os.path.join(self.tempdir, 'int.shp')
int_types = [np.int, np.int8, np.int16, np.int32, np.int64, np.intp,
np.uint8, np.uint16, np.uint32, np.uint64, np.long]
geometry = self.df2.geometry
data = dict((str(i), np.arange(len(geometry), dtype=dtype))
for i, dtype in enumerate(int_types))
df = GeoDataFrame(data, geometry=geometry)
df.to_file(tempfilename)
def test_mixed_types_to_file(self):
""" Test that mixed geometry types raise error when writing to file """
tempfilename = os.path.join(self.tempdir, 'test.shp')
s = GeoDataFrame({'geometry': [Point(0, 0),
Polygon([(0, 0), (1, 0), (1, 1)])]})
with self.assertRaises(ValueError):
s.to_file(tempfilename)
def test_to_file_schema(self):
"""
Ensure that the file is written according to the schema
if it is specified
"""
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
tempfilename = os.path.join(self.tempdir, 'test.shp')
properties = OrderedDict([
('Shape_Leng', 'float:19.11'),
('BoroName', 'str:40'),
('BoroCode', 'int:10'),
('Shape_Area', 'float:19.11'),
])
schema = {'geometry': 'Polygon', 'properties': properties}
# Take the first 2 features to speed things up a bit
self.df.iloc[:2].to_file(tempfilename, schema=schema)
with fiona.open(tempfilename) as f:
result_schema = f.schema
self.assertEqual(result_schema, schema)
def test_bool_index(self):
# Find boros with 'B' in their name
df = self.df[self.df['BoroName'].str.contains('B')]
self.assertTrue(len(df) == 2)
boros = df['BoroName'].values
self.assertTrue('Brooklyn' in boros)
self.assertTrue('Bronx' in boros)
self.assertTrue(type(df) is GeoDataFrame)
def test_transform(self):
df2 = self.df2.copy()
df2.crs = {'init': 'epsg:26918', 'no_defs': True}
lonlat = df2.to_crs(epsg=4326)
utm = lonlat.to_crs(epsg=26918)
self.assertTrue(all(df2['geometry'].geom_almost_equals(utm['geometry'], decimal=2)))
def test_from_features(self):
nybb_filename = download_nybb()
with fiona.open('/nybb_14a_av/nybb.shp',
vfs='zip://' + nybb_filename) as f:
features = list(f)
crs = f.crs
df = GeoDataFrame.from_features(features, crs=crs)
df.rename(columns=lambda x: x.lower(), inplace=True)
validate_boro_df(self, df)
self.assert_(df.crs == crs)
def test_from_features_unaligned_properties(self):
p1 = Point(1, 1)
f1 = {'type': 'Feature',
'properties': {'a': 0},
'geometry': p1.__geo_interface__}
p2 = Point(2, 2)
f2 = {'type': 'Feature',
'properties': {'b': 1},
'geometry': p2.__geo_interface__}
p3 = Point(3, 3)
f3 = {'type': 'Feature',
'properties': {'a': 2},
'geometry': p3.__geo_interface__}
df = GeoDataFrame.from_features([f1, f2, f3])
result = df[['a', 'b']]
expected = pd.DataFrame.from_dict([{'a': 0, 'b': np.nan},
{'a': np.nan, 'b': 1},
{'a': 2, 'b': np.nan}])
assert_frame_equal(expected, result)
def test_from_postgis_default(self):
con = connect('test_geopandas')
if con is None or not create_db(self.df):
raise unittest.case.SkipTest()
try:
sql = "SELECT * FROM nybb;"
df = GeoDataFrame.from_postgis(sql, con)
finally:
if PANDAS_NEW_SQL_API:
# It's not really a connection, it's an engine
con = con.connect()
con.close()
validate_boro_df(self, df)
def test_from_postgis_custom_geom_col(self):
con = connect('test_geopandas')
if con is None or not create_db(self.df):
raise unittest.case.SkipTest()
try:
sql = """SELECT
borocode, boroname, shape_leng, shape_area,
geom AS __geometry__
FROM nybb;"""
df = GeoDataFrame.from_postgis(sql, con, geom_col='__geometry__')
finally:
if PANDAS_NEW_SQL_API:
# It's not really a connection, it's an engine
con = con.connect()
con.close()
validate_boro_df(self, df)
def test_dataframe_to_geodataframe(self):
df = pd.DataFrame({"A": range(len(self.df)), "location":
list(self.df.geometry)}, index=self.df.index)
gf = df.set_geometry('location', crs=self.df.crs)
self.assertIsInstance(df, pd.DataFrame)
self.assertIsInstance(gf, GeoDataFrame)
assert_geoseries_equal(gf.geometry, self.df.geometry)
self.assertEqual(gf.geometry.name, 'location')
self.assert_('geometry' not in gf)
gf2 = df.set_geometry('location', crs=self.df.crs, drop=True)
self.assertIsInstance(df, pd.DataFrame)
self.assertIsInstance(gf2, GeoDataFrame)
self.assertEqual(gf2.geometry.name, 'geometry')
self.assert_('geometry' in gf2)
self.assert_('location' not in gf2)
self.assert_('location' in df)
# should be a copy
df.ix[0, "A"] = 100
self.assertEqual(gf.ix[0, "A"], 0)
self.assertEqual(gf2.ix[0, "A"], 0)
with self.assertRaises(ValueError):
df.set_geometry('location', inplace=True)
def test_geodataframe_geointerface(self):
self.assertEqual(self.df.__geo_interface__['type'], 'FeatureCollection')
self.assertEqual(len(self.df.__geo_interface__['features']),
self.df.shape[0])
def test_geodataframe_geojson_no_bbox(self):
geo = self.df._to_geo(na="null", show_bbox=False)
self.assertFalse('bbox' in geo.keys())
for feature in geo['features']:
self.assertFalse('bbox' in feature.keys())
def test_geodataframe_geojson_bbox(self):
geo = self.df._to_geo(na="null", show_bbox=True)
self.assertTrue('bbox' in geo.keys())
self.assertEqual(len(geo['bbox']), 4)
self.assertTrue(isinstance(geo['bbox'], tuple))
for feature in geo['features']:
self.assertTrue('bbox' in feature.keys())
def test_pickle(self):
filename = os.path.join(self.tempdir, 'df.pkl')
self.df.to_pickle(filename)
unpickled = pd.read_pickle(filename)
assert_frame_equal(self.df, unpickled)
self.assertEqual(self.df.crs, unpickled.crs)
| bsd-3-clause |
cpcloud/bokeh | bokeh/properties.py | 1 | 39861 | """ A set of descriptors that document intended types for attributes on
classes and implement convenience behaviors like default values, etc.
"""
from __future__ import print_function
import re
import datetime
import dateutil.parser
from importlib import import_module
from copy import copy
import inspect
import logging
logger = logging.getLogger(__name__)
from six import integer_types, string_types, add_metaclass, iteritems
import numpy as np
from . import enums
def nice_join(seq, sep=", "):
seq = [str(x) for x in seq]
if len(seq) <= 1:
return sep.join(seq)
else:
return "%s or %s" % (sep.join(seq[:-1]), seq[-1])
class Property(object):
def __init__(self, default=None):
""" This is how the descriptor is created in the class declaration """
self.validate(default)
self.default = default
# This gets set by the class decorator at class creation time
self.name = "unnamed"
def __str__(self):
return self.__class__.__name__
@property
def _name(self):
return "_" + self.name
@classmethod
def autocreate(cls, name=None):
""" Called by the metaclass to create a
new instance of this descriptor
if the user just assigned it to a property without trailing
parentheses.
"""
return cls()
def matches(self, new, old):
# XXX: originally this code warned about not being able to compare values, but that
# doesn't make sense, because most comparisons involving numpy arrays will fail with
# ValueError exception, thus warning about inevitable.
try:
return new == old
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
logger.debug("could not compare %s and %s for property %s (Reason: %s)", new, old, self.name, e)
return False
def transform(self, value):
return value
def validate(self, value):
pass
def is_valid(self, value):
try:
self.validate(value)
except ValueError:
return False
else:
return True
def __get__(self, obj, type=None):
return getattr(obj, self._name, self.default)
def __set__(self, obj, value):
self.validate(value)
value = self.transform(value)
old = self.__get__(obj)
obj._changed_vars.add(self.name)
if self._name in obj.__dict__ and self.matches(value, old):
return
setattr(obj, self._name, value)
obj._dirty = True
if hasattr(obj, '_trigger'):
if hasattr(obj, '_block_callbacks') and obj._block_callbacks:
obj._callback_queue.append((self.name, old, value))
else:
obj._trigger(self.name, old, value)
def __delete__(self, obj):
if hasattr(obj, self._name):
delattr(obj, self._name)
@property
def has_ref(self):
return False
class Include(Property):
def __init__(self, delegate, prefix=None):
self._delegate = delegate
self._prefix = prefix
super(Include, self).__init__()
class DataSpec(Property):
""" Because the BokehJS glyphs support a fixed value or a named
field for most data fields, we capture that in this descriptor.
Fields can have a fixed value, or be a name that is looked up
on the datasource (usually as a column or record array field).
A default value can also be provided for when a particular row
in the datasource has a missing value.
Numerical data can also have units of screen or data space.
We mirror the JS convention in this Python descriptor. For details,
see renderers/properties.coffee in BokehJS, and specifically the
select() function.
There are multiple ways to set a DataSpec, illustrated below with comments
and example code.
Setting DataSpecs
Simple example::
class Foo(HasProps):
x = DataSpec("x", units="data")
f = Foo()
f.x = "fieldname" # Use the datasource field named "fieldname"
f.x = 12 # A fixed value of 12
f.x = ("foo", 16) # a field name, and a default value
Can provide a dict with the fields explicitly named::
f.width = {"name": "foo", "default": 16}
f.size = {"name": "foo", "units": "screen", "default": 16}
Reading DataSpecs
In the cases when the dataspec is set to just a field name or a
fixed value, then those are returned. If the user has overridden
the default value in the DataSpec with a new default value, or
if no values have been set, then the value of to_dict() is returned.
In all cases, to determine the full dict that will be used to
represent this dataspec, use the to_dict() method.
Implementation
The DataSpec instance is stored in the class dict, and acts as a
descriptor. Thus, it is shared between all instances of the class.
Instance-specific data is stored in the instance dict, in a private
variable named _[attrname]. This stores the actual value that the
user last set (and does not exist if the user has not yet set the
value).
"""
def __init__(self, field=None, units="data", default=None, min_value=None):
"""
Parameters
==========
**field** is the string name of a data column to look up.
**units** is either "data" or "screen"
**default** is the default value to use if a datapoint is
missing the field specified in **name**
"""
# Don't use .name because the HasProps metaclass uses that to
# store the attribute name on this descriptor.
self.field = field
self.units = units
self.default = default
self.min_value = min_value
@classmethod
def autocreate(cls, name=None):
# In this case, use the name the user assigned this DataSpec to
# as the default field name.
d = cls(field=name)
return d
def __get__(self, obj, cls=None):
""" Try to implement a "natural" interface: if the user just set
simple values or field names, the getter just returns those.
However, if the user has also overridden the "units" or "default"
settings, then a dictionary is returned.
"""
if hasattr(obj, self._name):
setval = getattr(obj, self._name)
if isinstance(setval, string_types) and self.default is None:
# A string representing the field
return setval
elif not isinstance(setval, dict):
# Typically a number presenting the fixed value
return setval
else:
return self.to_dict(obj)
else:
# If the user hasn't set anything, just return the field name
# if there are not defaults, or a dict with the field name
# and the default value.
if self.default is not None:
return {"field": self.field, "default": self.default}
else:
return self.field
def __set__(self, obj, arg):
if isinstance(arg, tuple):
# Note: tuples of length 2 are assumed to be (field, default)
# other (longer) tuples might be color, e.g.
if len(arg) == 2:
field, default = arg
if not isinstance(field, string_types):
raise RuntimeError("String is required for field name when assigning tuple to a DataSpec")
arg = {"field": field, "default": default}
super(DataSpec, self).__set__(obj, arg)
def to_dict(self, obj):
# Build the complete dict
setval = getattr(obj, self._name, None)
if isinstance(setval, string_types):
d = {"field": setval, "units": self.units}
if self.default is not None:
d["default"] = self.default
elif isinstance(setval, dict):
d = {"units": self.units, "default": self.default}
d.update(setval)
if d["default"] is None:
del d["default"]
if "value" in d and "default" in d:
del d["default"]
elif setval is not None:
# a fixed value of some sort; no need to store the default value
d = {"value": setval, "units": self.units}
else:
# If the user never set a value
d = {"field": self.field, "units": self.units}
if self.default is not None:
d["default"] = self.default
if "value" in d and self.min_value is not None:
if d["value"] < self.min_value:
raise ValueError("value must be greater than %s" % str(self.min_value))
return d
def __repr__(self):
return "DataSpec(field=%r, units=%r, default=%r)" % (
self.field, self.units, self.default)
class ColorSpec(DataSpec):
""" Subclass of DataSpec for specifying colors.
Although this serves the same role as a DataSpec, its usage is somewhat
different because:
* Specifying a fixed value is much more common
* Strings can be both field identifiers or refer to one of the SVG
Named Colors (or be a hex value starting with "#")
* There are no units
For colors, because we support named colors and hex values prefaced
with a "#", when we are handed a string value, there is a little
interpretation: if the value is one of the 147 SVG named colors or
it starts with a "#", then it is interpreted as a value. Otherwise,
it is treated as a field name.
If a 3-tuple is provided, then it is treated as an RGB (0..255).
If a 4-tuple is provided, then it is treated as an RGBa (0..255), with
alpha as a float between 0 and 1. (This follows the HTML5 Canvas API.)
If a 2-tuple is provided, then it is treated as (value/fieldname, default).
This is the same as the behavior in the base class DataSpec.
Unlike DataSpec, ColorSpecs do not have a "units" property.
When reading out a ColorSpec, it returns a tuple, hex value, field name,
or a dict of (field, default).
There are two common use cases for ColorSpec: setting a constant value,
and indicating a field name to look for on the datasource:
>>> class Bar(HasProps):
... col = ColorSpec("green")
... col2 = ColorSpec("colorfield")
... col3 = ColorSpec("colorfield", default="aqua")
>>> b = Bar()
>>> b.col = "red" # sets a fixed value of red
>>> b.col
"red"
>>> b.col = "myfield" # Use the datasource field named "myfield"
>>> b.col
"myfield"
>>> b.col = {"name": "mycolor", "default": "#FF126D"}
For more examples, see tests/test_glyphs.py
"""
NAMEDCOLORS = set(enums.NamedColor._values)
def __init__(self, field_or_value=None, field=None, default=None, value=None):
""" ColorSpec(field_or_value=None, field=None, default=None, value=None)
"""
# The fancy footwork below is so we auto-interpret the first positional
# parameter as either a field or a fixed value. If either "field" or
# "value" are then supplied as keyword arguments, then those will
# override the inferred value from the positional argument.
self.field = field
self.default = default
self.value = value
if field_or_value is not None:
if self.isconst(field_or_value):
self.value = field_or_value
else:
self.field = field_or_value
# We need to distinguish if the user ever explicitly sets the attribute; if
# they explicitly set it to None, we should pass on None in the dict. Otherwise,
# look up a default or value
self._isset = False
@classmethod
def isconst(cls, arg):
""" Returns True if the argument is a literal color. Check for a
well-formed hexadecimal color value.
"""
return isinstance(arg, string_types) and \
((len(arg) == 7 and arg[0] == "#") or arg in cls.NAMEDCOLORS)
def _formattuple(self, colortuple):
if isinstance(colortuple, tuple):
if len(colortuple) == 3:
return "rgb%r" % (colortuple,)
else:
return "rgba%r" % (colortuple,)
else:
return colortuple
def __get__(self, obj, cls=None):
# One key difference in ColorSpec.__get__ from the base class is
# that we do not call self.to_dict() in any circumstance, because
# this could lead to formatting color tuples as "rgb(R,G,B)" instead
# of keeping them as tuples.
if hasattr(obj, self._name):
setval = getattr(obj, self._name)
if self.isconst(setval) or isinstance(setval, tuple):
# Fixed color value
return setval
elif isinstance(setval, string_types):
if self.default is None:
# Field name
return setval
else:
return {"field": setval, "default": self.default}
elif setval is None:
return None
return {"value": None}
else:
# setval should be a dict at this point
assert(isinstance(setval, dict))
return setval
else:
if self.value is not None:
return self.value
elif self.default is not None:
return {"field": self.field, "default": self.default}
else:
return self.field
def __set__(self, obj, arg):
self._isset = True
if isinstance(arg, tuple):
if len(arg) == 2:
if not isinstance(arg[0], string_types):
raise RuntimeError("String is required for field name when assigning 2-tuple to ColorSpec")
arg = {"field": arg[0], "default": arg[1]}
elif len(arg) in (3, 4):
# RGB or RGBa
pass
else:
raise RuntimeError("Invalid tuple being assigned to ColorSpec; must be length 2, 3, or 4.")
elif hasattr(arg, "toCSS"):
arg = arg.toCSS()
super(ColorSpec, self).__set__(obj, arg)
def to_dict(self, obj):
setval = getattr(obj, self._name, None)
if setval is not None:
if self.isconst(setval):
# Hexadecimal or named color
return {"value": setval}
elif isinstance(setval, tuple):
# RGB or RGBa
# TODO: Should we validate that alpha is between 0..1?
return {"value": self._formattuple(setval)}
elif isinstance(setval, string_types):
d = {"field": setval}
if self.default is not None:
d["default"] = self._formattuple(self.default)
return d
elif isinstance(setval, dict):
# this is considerably simpler than the DataSpec case because
# there are no units involved, and we've handled all of the
# value cases above.
d = setval.copy()
if isinstance(d.get("default", None), tuple):
d["default"] = self._formattuple(d["default"])
return d
else:
if self._isset:
return {"value": None}
# If the user never set a value
if self.value is not None:
return {"value": self.value}
else:
d = {"field": self.field}
if self.default is not None:
d["default"] = self._formattuple(self.default)
return d
def __repr__(self):
return "ColorSpec(field=%r, default=%r)" % (self.field, self.default)
class MetaHasProps(type):
def __new__(cls, class_name, bases, class_dict):
names = set()
names_with_refs = set()
container_names = set()
# First pre-process to handle all the Includes
includes = {}
removes = set()
for name, prop in class_dict.items():
if not isinstance(prop, Include):
continue
delegate = prop._delegate
if not (isinstance(delegate,type) and issubclass(delegate,HasProps)):
continue
if prop._prefix is None:
prefix = name + "_"
else:
prefix = prop._prefix + "_"
for subpropname in delegate.class_properties(withbases=False):
fullpropname = prefix + subpropname
subprop = lookup_descriptor(delegate, subpropname)
if isinstance(subprop, Property):
# If it's an actual instance, then we need to make a copy
# so two properties don't write to the same hidden variable
# inside the instance.
subprop = copy(subprop)
includes[fullpropname] = subprop
# Remove the name of the Include attribute itself
removes.add(name)
# Update the class dictionary, taking care not to overwrite values
# from the delegates that the subclass may have explicitly defined
for key, val in includes.items():
if key not in class_dict:
class_dict[key] = val
for tmp in removes:
del class_dict[tmp]
dataspecs = {}
for name, prop in class_dict.items():
if isinstance(prop, Property):
prop.name = name
if prop.has_ref:
names_with_refs.add(name)
elif isinstance(prop, ContainerProperty):
container_names.add(name)
names.add(name)
if isinstance(prop, DataSpec):
dataspecs[name] = prop
elif isinstance(prop, type) and issubclass(prop, Property):
# Support the user adding a property without using parens,
# i.e. using just the Property subclass instead of an
# instance of the subclass
newprop = prop.autocreate(name=name)
class_dict[name] = newprop
newprop.name = name
names.add(name)
# Process dataspecs
if issubclass(prop, DataSpec):
dataspecs[name] = newprop
class_dict["__properties__"] = names
class_dict["__properties_with_refs__"] = names_with_refs
class_dict["__container_props__"] = container_names
if dataspecs:
class_dict["_dataspecs"] = dataspecs
return type.__new__(cls, class_name, bases, class_dict)
def accumulate_from_subclasses(cls, propname):
s = set()
for c in inspect.getmro(cls):
if issubclass(c, HasProps):
s.update(getattr(c, propname))
return s
def lookup_descriptor(cls, propname):
for c in inspect.getmro(cls):
if issubclass(c, HasProps) and propname in c.__dict__:
return c.__dict__[propname]
raise KeyError("Property '%s' not found on class '%s'" % (propname, cls))
@add_metaclass(MetaHasProps)
class HasProps(object):
def __init__(self, **kwargs):
""" Set up a default initializer handler which assigns all kwargs
that have the same names as Properties on the class
"""
# Initialize the mutated property handling
self._changed_vars = set()
props = self.properties()
for key, value in kwargs.items():
if key in props:
setattr(self, key, value)
else:
raise AttributeError("unexpected attribute '%s' to %s, possible attributes are %s" %
(key, self.__class__.__name__, nice_join(props)))
super(HasProps, self).__init__()
def to_dict(self):
return dict((prop, getattr(self, prop)) for prop in self.properties())
def clone(self):
""" Returns a duplicate of this object with all its properties
set appropriately. Values which are containers are shallow-copied.
"""
return self.__class__(**self.to_dict())
@classmethod
def properties_with_refs(cls):
""" Returns a set of the names of this object's properties that
have references. We traverse the class hierarchy and
pull together the full list of properties.
"""
if not hasattr(cls, "__cached_allprops_with_refs"):
s = accumulate_from_subclasses(cls, "__properties_with_refs__")
cls.__cached_allprops_with_refs = s
return cls.__cached_allprops_with_refs
@classmethod
def properties_containers(cls):
""" Returns a list of properties that are containers
"""
if not hasattr(cls, "__cached_allprops_containers"):
s = accumulate_from_subclasses(cls, "__container_props__")
cls.__cached_allprops_containers = s
return cls.__cached_allprops_containers
@classmethod
def properties(cls):
""" Returns a set of the names of this object's properties. We
traverse the class hierarchy and pull together the full
list of properties.
"""
if not hasattr(cls, "__cached_allprops"):
s = cls.class_properties()
cls.__cached_allprops = s
return cls.__cached_allprops
@classmethod
def dataspecs(cls):
""" Returns a set of the names of this object's dataspecs (and
dataspec subclasses). Traverses the class hierarchy.
"""
if not hasattr(cls, "__cached_dataspecs"):
dataspecs = set()
for c in reversed(inspect.getmro(cls)):
if hasattr(c, "_dataspecs"):
dataspecs.update(c._dataspecs.keys())
cls.__cached_dataspecs = dataspecs
return cls.__cached_dataspecs
@classmethod
def dataspecs_with_refs(cls):
dataspecs = {}
for c in reversed(inspect.getmro(cls)):
if hasattr(c, "_dataspecs"):
dataspecs.update(c._dataspecs)
return dataspecs
def changed_vars(self):
""" Returns which variables changed since the creation of the object,
or the last called to reset_changed_vars().
"""
return set.union(self._changed_vars, self.properties_with_refs(),
self.properties_containers())
def reset_changed_vars(self):
self._changed_vars = set()
def properties_with_values(self):
return dict([ (attr, getattr(self, attr)) for attr in self.properties() ])
def changed_properties_with_values(self):
return dict([ (attr, getattr(self, attr)) for attr in self.changed_vars() ])
@classmethod
def class_properties(cls, withbases=True):
if withbases:
return accumulate_from_subclasses(cls, "__properties__")
else:
return set(cls.__properties__)
def set(self, **kwargs):
""" Sets a number of properties at once """
for kw in kwargs:
setattr(self, kw, kwargs[kw])
def pprint_props(self, indent=0):
""" Prints the properties of this object, nicely formatted """
for key, value in self.properties_with_values().items():
print("%s%s: %r" % (" "*indent, key, value))
class PrimitiveProperty(Property):
_underlying_type = None
def validate(self, value):
super(PrimitiveProperty, self).validate(value)
if not (value is None or isinstance(value, self._underlying_type)):
raise ValueError("expected a value of type %s, got %s of type %s" %
(nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))
class Bool(PrimitiveProperty):
_underlying_type = (bool,)
class Int(PrimitiveProperty):
_underlying_type = integer_types
class Float(PrimitiveProperty):
_underlying_type = (float,) + integer_types
class Complex(PrimitiveProperty):
_underlying_type = (complex, float) + integer_types
class String(PrimitiveProperty):
_underlying_type = string_types
class Regex(String):
def __init__(self, regex, default=None):
self.regex = re.compile(regex)
super(Regex, self).__init__(default=default)
def validate(self, value):
super(Regex, self).validate(value)
if not (value is None or self.regex.match(value) is not None):
raise ValueError("expected a string matching %r pattern, got %r" % (self.regex.pattern, value))
def __str__(self):
return "%s(%r)" % (self.__class__.__name__, self.regex.pattern)
class ParameterizedProperty(Property):
"""Property that has type parameters, e.g. `List(String)`. """
def _validate_type_param(self, type_param):
if isinstance(type_param, type):
if issubclass(type_param, Property):
return type_param()
else:
type_param = type_param.__name__
elif isinstance(type_param, Property):
return type_param
raise ValueError("expected a property as type parameter, got %s" % type_param)
@property
def type_params(self):
raise NotImplementedError("abstract method")
@property
def has_ref(self):
return any(type_param.has_ref for type_param in self.type_params)
class ContainerProperty(ParameterizedProperty):
# Base class for container-like things; this helps the auto-serialization
# and attribute change detection code
pass
class List(ContainerProperty):
""" If a default value is passed in, then a shallow copy of it will be
used for each new use of this property.
People will also frequently pass in some other kind of property or a
class (to indicate a list of instances). In those cases, we want to
just create an empty list
"""
def __init__(self, item_type, default=None):
self.item_type = self._validate_type_param(item_type)
super(List, self).__init__(default=default)
@property
def type_params(self):
return [self.item_type]
def validate(self, value):
super(List, self).validate(value)
if value is not None:
if not (isinstance(value, list) and \
all(self.item_type.is_valid(item) for item in value)):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.item_type)
def __get__(self, obj, type=None):
if hasattr(obj, self._name):
return getattr(obj, self._name)
if self.default is None:
val = []
elif isinstance(self.default, list):
val = copy(self.default)
else:
val = self.default
setattr(obj, self._name, val)
return val
class Dict(ContainerProperty):
""" If a default value is passed in, then a shallow copy of it will be
used for each new use of this property.
"""
def __init__(self, keys_type, values_type, default={}):
self.keys_type = self._validate_type_param(keys_type)
self.values_type = self._validate_type_param(values_type)
super(Dict, self).__init__(default=default)
@property
def type_params(self):
return [self.keys_type, self.values_type]
def __get__(self, obj, type=None):
if not hasattr(obj, self._name) and isinstance(self.default, dict):
setattr(obj, self._name, copy(self.default))
return getattr(obj, self._name)
else:
return getattr(obj, self._name, self.default)
def validate(self, value):
super(Dict, self).validate(value)
if value is not None:
if not (isinstance(value, dict) and \
all(self.keys_type.is_valid(key) and self.values_type.is_valid(val) for key, val in iteritems(value))):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.keys_type, self.values_type)
class Tuple(ContainerProperty):
def __init__(self, tp1, tp2, *type_params, **kwargs):
self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))
super(Tuple, self).__init__(default=kwargs.get("default", None))
@property
def type_params(self):
return self._type_params
def validate(self, value):
super(Tuple, self).validate(value)
if value is not None:
if not (isinstance(value, (tuple, list)) and len(self.type_params) == len(value) and \
all(type_param.is_valid(item) for type_param, item in zip(self.type_params, value))):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params)))
class Array(ContainerProperty):
""" Whatever object is passed in as a default value, np.asarray() is
called on it to create a copy for the default value for each use of
this property.
"""
def __init__(self, item_type, default=None):
self.item_type = self._validate_type_param(item_type)
super(Array, self).__init__(default=default)
@property
def type_params(self):
return [self.item_type]
def __get__(self, obj, type=None):
if not hasattr(obj, self._name) and self.default is not None:
setattr(obj, self._name, np.asarray(self.default))
return getattr(obj, self._name)
else:
return getattr(obj, self._name, self.default)
class Instance(Property):
def __init__(self, instance_type, default=None):
if not isinstance(instance_type, (type,) + string_types):
raise ValueError("expected a type or string, got %s" % instance_type)
if isinstance(instance_type, type) and not issubclass(instance_type, HasProps):
raise ValueError("expected a subclass of HasProps, got %s" % instance_type)
self._instance_type = instance_type
super(Instance, self).__init__(default=default)
@property
def instance_type(self):
if isinstance(self._instance_type, str):
module, name = self._instance_type.rsplit(".", 1)
self._instance_type = getattr(import_module(module, "bokeh"), name)
return self._instance_type
@property
def has_ref(self):
return True
def __get__(self, obj, type=None):
# If the constructor for Instance() supplied a class name, we should
# instantiate that class here, instead of returning the class as the
# default object
if not hasattr(obj, self._name):
if type and self.default and isinstance(self.default, type):
setattr(obj, self._name, self.default())
return getattr(obj, self._name, None)
def validate(self, value):
super(Instance, self).validate(value)
if value is not None:
if not isinstance(value, self.instance_type):
raise ValueError("expected an instance of type %s, got %s of type %s" %
(self.instance_type.__name__, value, type(value).__name__))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.instance_type.__name__)
class This(Property):
""" A reference to an instance of the class being defined. """
pass
# Fake types, ABCs
class Any(Property): pass
class Function(Property): pass
class Event(Property): pass
class Range(ParameterizedProperty):
def __init__(self, range_type, start, end, default=None):
self.range_type = self._validate_type_param(range_type)
self.range_type.validate(start)
self.range_type.validate(end)
self.start = start
self.end = end
super(Range, self).__init__(default=default)
@property
def type_params(self):
return [self.range_type]
def validate(self, value):
super(Range, self).validate(value)
if not (value is None or self.range_type.is_valid(value) and value >= self.start and value <= self.end):
raise ValueError("expected a value of type %s in range [%s, %s], got %r" % (self.range_type, self.start, self.end, value))
def __str__(self):
return "%s(%s, %r, %r)" % (self.__class__.__name__, self.range_type, self.start, self.end)
class Byte(Range):
def __init__(self, default=0):
super(Byte, self).__init__(Int, 0, 255, default=default)
class Either(ParameterizedProperty):
""" Takes a list of valid properties and validates against them in succession. """
def __init__(self, tp1, tp2, *type_params, **kwargs):
self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))
default = kwargs.get("default", self._type_params[0].default)
super(Either, self).__init__(default=default)
@property
def type_params(self):
return self._type_params
def validate(self, value):
super(Either, self).validate(value)
if not (value is None or any(param.is_valid(value) for param in self.type_params)):
raise ValueError("expected an element of either %s, got %r" % (nice_join(self.type_params), value))
def transform(self, value):
for param in self.type_params:
try:
return param.transform(value)
except ValueError:
pass
raise ValueError("Could not transform %r" % value)
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params)))
class Enum(Property):
""" An Enum with a list of allowed values. The first value in the list is
the default value, unless a default is provided with the "default" keyword
argument.
"""
def __init__(self, enum, *values, **kwargs):
if not (not values and isinstance(enum, enums.Enumeration)):
enum = enums.enumeration(enum, *values)
self.allowed_values = enum._values
default = kwargs.get("default", enum._default)
super(Enum, self).__init__(default=default)
def validate(self, value):
super(Enum, self).validate(value)
if not (value is None or value in self.allowed_values):
raise ValueError("invalid value %r, allowed values are %s" % (value, nice_join(self.allowed_values)))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, self.allowed_values)))
# Properties useful for defining visual attributes
class Color(Either):
""" Accepts color definition in a variety of ways, and produces an
appropriate serialization of its value for whatever backend.
For colors, because we support named colors and hex values prefaced
with a "#", when we are handed a string value, there is a little
interpretation: if the value is one of the 147 SVG named colors or
it starts with a "#", then it is interpreted as a value.
If a 3-tuple is provided, then it is treated as an RGB (0..255).
If a 4-tuple is provided, then it is treated as an RGBa (0..255), with
alpha as a float between 0 and 1. (This follows the HTML5 Canvas API.)
"""
def __init__(self, default=None):
types = (Enum(enums.NamedColor),
Regex("^#[0-9a-fA-F]{6}$"),
Tuple(Byte, Byte, Byte),
Tuple(Byte, Byte, Byte, Percent))
super(Color, self).__init__(*types, default=default)
def __str__(self):
return self.__class__.__name__
class Align(Property):
pass
class DashPattern(Either):
"""
This is a property that expresses line dashes. It can be specified in
a variety of forms:
* "solid", "dashed", "dotted", "dotdash", "dashdot"
* A tuple or list of integers in the HTML5 Canvas dash specification
style: http://www.w3.org/html/wg/drafts/2dcontext/html5_canvas/#dash-list
Note that if the list of integers has an odd number of elements, then
it is duplicated, and that duplicated list becomes the new dash list.
If dash is turned off, then the dash pattern is the empty list [].
"""
_dash_patterns = {
"solid": [],
"dashed": [6],
"dotted": [2,4],
"dotdash": [2,4,6,4],
"dashdot": [6,4,2,4],
}
def __init__(self, default=[]):
types = Enum(enums.DashPattern), Regex(r"^(\d+(\s+\d+)*)?$"), List(Int)
super(DashPattern, self).__init__(*types, default=default)
def transform(self, value):
value = super(DashPattern, self).transform(value)
if isinstance(value, string_types):
try:
return self._dash_patterns[value]
except KeyError:
return [int(x) for x in value.split()]
else:
return value
def __str__(self):
return self.__class__.__name__
class Size(Float):
""" Equivalent to an unsigned int """
def validate(self, value):
super(Size, self).validate(value)
if not (value is None or 0.0 <= value):
raise ValueError("expected a non-negative number, got %r" % value)
class Percent(Float):
""" Percent is useful for alphas and coverage and extents; more
semantically meaningful than Float(0..1)
"""
def validate(self, value):
super(Percent, self).validate(value)
if not (value is None or 0.0 <= value <= 1.0):
raise ValueError("expected a value in range [0, 1], got %r" % value)
class Angle(Float):
pass
class Date(Property):
def __init__(self, default=datetime.date.today()):
super(Date, self).__init__(default=default)
def validate(self, value):
super(Date, self).validate(value)
if not (value is None or isinstance(value, (datetime.date,) + string_types + (float,) + integer_types)):
raise ValueError("expected a date, string or timestamp, got %r" % value)
def transform(self, value):
value = super(Date, self).transform(value)
if isinstance(value, (float,) + integer_types):
try:
value = datetime.date.fromtimestamp(value)
except ValueError:
value = datetime.date.fromtimestamp(value/1000)
elif isinstance(value, string_types):
value = dateutil.parser.parse(value).date()
return value
class Datetime(Property):
def __init__(self, default=datetime.date.today()):
super(Datetime, self).__init__(default=default)
def validate(self, value):
super(Datetime, self).validate(value)
if (isinstance(value, (datetime.datetime, datetime.date, np.datetime64))):
return
try:
import pandas
if isinstance(value, (pandas.Timestamp)):
return
except ImportError:
pass
raise ValueError("Expected a datetime instance, got %r" % value)
def transform(self, value):
value = super(Datetime, self).transform(value)
return value
# Handled by serialization in protocol.py for now
class RelativeDelta(Dict):
def __init__(self, default={}):
keys = Enum("years", "months", "days", "hours", "minutes", "seconds", "microseconds")
values = Int
super(RelativeDelta, self).__init__(keys, values, default=default)
def __str__(self):
return self.__class__.__name__
| bsd-3-clause |
timothydmorton/bokeh | bokeh/compat/mplexporter/renderers/vincent_renderer.py | 64 | 1922 | import warnings
from .base import Renderer
from ..exporter import Exporter
class VincentRenderer(Renderer):
def open_figure(self, fig, props):
self.chart = None
self.figwidth = int(props['figwidth'] * props['dpi'])
self.figheight = int(props['figheight'] * props['dpi'])
def draw_line(self, data, coordinates, style, label, mplobj=None):
import vincent # only import if VincentRenderer is used
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
linedata = {'x': data[:, 0],
'y': data[:, 1]}
line = vincent.Line(linedata, iter_idx='x',
width=self.figwidth, height=self.figheight)
# TODO: respect the other style settings
line.scales['color'].range = [style['color']]
if self.chart is None:
self.chart = line
else:
warnings.warn("Multiple plot elements not yet supported")
def draw_markers(self, data, coordinates, style, label, mplobj=None):
import vincent # only import if VincentRenderer is used
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
markerdata = {'x': data[:, 0],
'y': data[:, 1]}
markers = vincent.Scatter(markerdata, iter_idx='x',
width=self.figwidth, height=self.figheight)
# TODO: respect the other style settings
markers.scales['color'].range = [style['facecolor']]
if self.chart is None:
self.chart = markers
else:
warnings.warn("Multiple plot elements not yet supported")
def fig_to_vincent(fig):
"""Convert a matplotlib figure to a vincent object"""
renderer = VincentRenderer()
exporter = Exporter(renderer)
exporter.run(fig)
return renderer.chart
| bsd-3-clause |
orlox/massive_bins_2015 | 2016_ULX/scripts/NSBH/kick_dist.py | 1 | 2053 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import matplotlib as mpl
from pylab import *
import numpy as np
import sys
sys.path.insert(0, '../')
import kicks
from scipy.stats import maxwell
params = {'backend': 'pdf',
'figure.figsize': [4.3, 2.2],
'font.family':'serif',
'font.size':10,
'font.serif': 'Times Roman',
'axes.titlesize': 'medium',
'axes.labelsize': 'medium',
'legend.fontsize': 8,
'legend.frameon' : False,
'text.usetex': True,
'figure.dpi': 600,
'lines.markersize': 4,
'lines.linewidth': 3,
'lines.antialiased': False,
'path.simplify': False,
'legend.handlelength':3,
'figure.subplot.bottom':0.2,
'figure.subplot.top':0.95,
'figure.subplot.left':0.15,
'figure.subplot.right':0.92}
hexcols = ['#332288', '#88CCEE', '#44AA99', '#117733', '#999933', '#DDCC77',\
'#CC6677', '#882255', '#AA4499', '#661100', '#6699CC', '#AA4466','#4477AA']
mpl.rcParams.update(params)
A=np.array([np.append([vkick],kicks.sample_kick_distribution_P(23,5.5,55,1.4,vdist=lambda x:[float(vkick)], num_v=5, num_theta=400,num_phi=100)) for vkick in range(0,701,5)])
print(A)
print(A[:,0])
print(A[:,1])
print(A[:,2])
fig, axes= plt.subplots(1)
maxw = axes.fill_between(A[:,0],0,maxwell.pdf(A[:,0], scale=265.)/max(maxwell.pdf(A[:,0],scale=265.)),color="b", alpha=0.2, label="Maxwellian, $\\sigma=265~\\rm km~s^{-1}$")
merge, = axes.plot(A[:,0],10*A[:,1], color=hexcols[2],label="GW merge fraction $\\times$ 10")
disrupt, = axes.plot(A[:,0],A[:,2], color=hexcols[8],ls="--", label="Disrupt fraction")
axes.set_xlabel("$v_{\\rm kick}~\\rm[km~s^{-1}]$")
axes.set_ylabel("fraction")
#axes.set_xlim([0,50])
axes.set_ylim([0,1.19])
axes.legend([maxw,merge,disrupt],["Maxwellian, $\\sigma=265~\\rm km~s^{-1}$", "GW merge fraction $\\times$ 10", "Disrupt fraction"], loc="upper left", fontsize=7)
plt.savefig("kick_dist.pdf")
#plt.clf()
#plt.close(plt.gcf())
| gpl-3.0 |
PrashntS/scikit-learn | examples/svm/plot_rbf_parameters.py | 132 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
Srisai85/scikit-learn | sklearn/utils/tests/test_class_weight.py | 140 | 11909 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
google-research/google-research | dp_multiq/experiment.py | 1 | 17930 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for running multiquantiles experiments and plotting the results."""
import enum
import functools
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from dp_multiq import base
from dp_multiq import csmooth
from dp_multiq import ind_exp
from dp_multiq import joint_exp
from dp_multiq import smooth
from dp_multiq import tree
class ErrorMetric(enum.Enum):
MISCLASSIFIED_POINTS = 1
DISTANCE = 2
_ERROR_FUNCS = {
ErrorMetric.MISCLASSIFIED_POINTS:
base.misclassified_points_error,
ErrorMetric.DISTANCE:
lambda _, true_qs, est_qs: base.distance_error(true_qs, est_qs)
}
_ERROR_LABELS = {
ErrorMetric.MISCLASSIFIED_POINTS: "avg # misclassified points",
ErrorMetric.DISTANCE: "avg distance"
}
class QuantilesEstimationMethod(enum.Enum):
JOINT_EXP = 1
IND_EXP = 2
APP_IND_EXP = 3
SMOOTH = 4
CSMOOTH = 5
LAP_TREE = 6
GAUSS_TREE = 7
_PARTIAL_METHODS = {
QuantilesEstimationMethod.JOINT_EXP: joint_exp.joint_exp,
QuantilesEstimationMethod.IND_EXP: ind_exp.ind_exp,
QuantilesEstimationMethod.APP_IND_EXP: ind_exp.ind_exp,
QuantilesEstimationMethod.SMOOTH: smooth.smooth,
QuantilesEstimationMethod.CSMOOTH: csmooth.csmooth,
QuantilesEstimationMethod.LAP_TREE: tree.tree,
QuantilesEstimationMethod.GAUSS_TREE: tree.tree
}
_PLOT_LABELS = {
QuantilesEstimationMethod.JOINT_EXP: "JointExp",
QuantilesEstimationMethod.IND_EXP: "IndExp",
QuantilesEstimationMethod.APP_IND_EXP: "AppIndExp",
QuantilesEstimationMethod.SMOOTH: "Smooth",
QuantilesEstimationMethod.CSMOOTH: "CSmooth",
QuantilesEstimationMethod.LAP_TREE: "LapTree",
QuantilesEstimationMethod.GAUSS_TREE: "GaussTree"
}
_PLOT_LINESTYLES = {
QuantilesEstimationMethod.JOINT_EXP: "-",
QuantilesEstimationMethod.IND_EXP: "--",
QuantilesEstimationMethod.APP_IND_EXP: "--",
QuantilesEstimationMethod.SMOOTH: "-.",
QuantilesEstimationMethod.CSMOOTH: "-.",
QuantilesEstimationMethod.LAP_TREE: ":",
QuantilesEstimationMethod.GAUSS_TREE: ":"
}
_PLOT_COLORS = {
QuantilesEstimationMethod.JOINT_EXP: "lightseagreen",
QuantilesEstimationMethod.IND_EXP: "mediumpurple",
QuantilesEstimationMethod.APP_IND_EXP: "darkorange",
QuantilesEstimationMethod.SMOOTH: "cornflowerblue",
QuantilesEstimationMethod.CSMOOTH: "violet",
QuantilesEstimationMethod.LAP_TREE: "firebrick",
QuantilesEstimationMethod.GAUSS_TREE: "peru"
}
def synthetic_comparison(methods, error_func, data_type, num_samples, data_low,
data_high, num_trials, num_quantiles_range, eps, delta,
swap, ts_matrix):
"""Returns errors and times from running experients on synthetic data.
Args:
methods: Array of private quantiles algorithms to test.
error_func: Function for computing quantile estimation error.
data_type: Type of synthetic data to use, either uniform or gaussian.
num_samples: Number of samples to use in each trial.
data_low: Lower bound for data, used by private quantiles algorithms.
data_high: Upper bound for data, used by private quantiles algorithms.
num_trials: Number of trials to average over.
num_quantiles_range: Array of numbers of quantiles to estimate.
eps: Privacy parameter epsilon.
delta: Privacy parameter delta, used only by smooth.
swap: If true, uses swap privacy definition. Otherwise uses add-remove.
ts_matrix: Matrix of smooth sensitivity parameters passed to CSmooth, where
ts_matrix[i,j] corresponds to quantile j+1 of num_quantiles_range[i]
quantiles.
Returns:
Arrays errors and times storing, respectively, average number of
misclassified points and time in seconds for each of the five methods and
each num_quantiles in num_quantiles_range, for the specified synthetic data.
"""
max_num_quantiles = len(num_quantiles_range)
num_methods = len(methods)
errors = np.zeros((num_methods, max_num_quantiles))
times = np.zeros((num_methods, max_num_quantiles))
for num_quantiles_idx in range(max_num_quantiles):
num_quantiles = num_quantiles_range[num_quantiles_idx]
qs = np.linspace(0, 1, num_quantiles + 2)[1:-1]
ts = ts_matrix[num_quantiles_idx]
errors[:, num_quantiles_idx], times[:, num_quantiles_idx] = comparison(
methods, error_func, np.empty(0), data_type, num_samples, data_low,
data_high, num_trials, qs, eps, delta, swap, ts)
print("Finished num_quantiles = " + str(num_quantiles))
return errors, times
def real_comparison(methods, error_func, data_type, num_samples, data_low,
data_high, num_trials, num_quantiles_range, eps, delta,
swap, ts_matrix):
"""Returns errors and times from running experiments on real data.
Args:
methods: Array of private quantiles algorithms to test.
error_func: Function for computing quantile estimation error.
data_type: Type of real data to use, either ratings or pages.
num_samples: Number of samples to use in each trial.
data_low: Lower bound for data, used by private quantiles algorithms.
data_high: Upper bound for data, used by private quantiles algorithms.
num_trials: Number of trials to average over.
num_quantiles_range: Array of number of quantiles to estimate.
eps: Privacy parameter epsilon.
delta: Privacy parameter delta, used only by Smooth.
swap: If true, uses swap privacy definition. Otherwise uses add-remove.
ts_matrix: Matrix of smooth sensitivity parameters passed to CSmooth, where
ts_matrix[i,j] corresponds to quantile j+1 of num_quantiles_range[i]
quantiles.
Returns:
Arrays errors and times storing, respectively, average number of
misclassified points and time in seconds for each of the five methods and
each num_quantiles in num_quantiles_range, for the specified real data.
"""
max_num_quantiles = len(num_quantiles_range)
num_methods = len(methods)
errors = np.zeros((num_methods, max_num_quantiles))
times = np.zeros((num_methods, max_num_quantiles))
if data_type == "ratings":
data = pd.read_csv("books.csv", usecols=["average_rating"])
data = pd.to_numeric(data["average_rating"], errors="coerce").to_numpy()
data = data[~np.isnan(data)]
else:
data = pd.read_csv("books.csv", usecols=[" num_pages"])
data = pd.to_numeric(data[" num_pages"], errors="coerce").to_numpy()
data = data[~np.isnan(data)]
data = data / 100
for num_quantiles_idx in range(max_num_quantiles):
num_quantiles = num_quantiles_range[num_quantiles_idx]
qs = np.linspace(0, 1, num_quantiles + 2)[1:-1]
ts = ts_matrix[num_quantiles_idx]
errors[:, num_quantiles_idx], times[:, num_quantiles_idx] = comparison(
methods, error_func, data, "", num_samples, data_low, data_high,
num_trials, qs, eps, delta, swap, ts)
print("Finished num_quantiles = " + str(num_quantiles))
return errors, times
def comparison(methods, error_func, fixed_data, distribution, num_samples,
data_low, data_high, num_trials, qs, eps, delta, swap, ts):
"""Helper function to run the trials set up by synthetic/real_comparison.
Args:
methods: Array of private quantiles algorithms to test.
error_func: Function for computing quantile estimation error.
fixed_data: In the case of real data, an array of data to subsample in each
trial. In the case of synthetic data, an empty array.
distribution: In the case of real data, an empty string. In the case of
synthetic data, either "gaussian" or "uniform".
num_samples: Number of samples to use in each trial.
data_low: Lower bound for data, used by private quantiles algorithms.
data_high: Upper bound for data, used by private quantiles algorithms.
num_trials: Number of trials to average over.
qs: Array of quantiles to estimate.
eps: Privacy parameter epsilon.
delta: Privacy parameter delta, used only by Smooth.
swap: If true, uses swap privacy definition. Otherwise uses add-remove.
ts: Matrix of smooth sensitivity parameters passed to CSmooth.
Returns:
Arrays errors and times storing, respectively, average number of
misclassified points and time in seconds for each of the methods.
Throws:
ValueError if the Smooth or CSmooth method is used in conjunction with
swap=False, or if one of the specified methods is unrecognized.
"""
# Create an array of DP quantile functions from the array of method names.
quant_funcs = []
for method in methods:
quant_func = functools.partial(
_PARTIAL_METHODS[method], data_low=data_low, data_high=data_high, qs=qs)
if method == QuantilesEstimationMethod.JOINT_EXP:
quant_func = functools.partial(quant_func, eps=eps, swap=swap)
elif method == QuantilesEstimationMethod.IND_EXP:
quant_func = functools.partial(
quant_func, divided_eps=eps / len(qs), swap=swap)
elif method == QuantilesEstimationMethod.APP_IND_EXP:
quant_func = functools.partial(
quant_func,
divided_eps=ind_exp.opt_comp_calculator(eps, delta, len(qs)),
swap=swap)
elif method == QuantilesEstimationMethod.SMOOTH:
if not swap:
raise ValueError("Smooth method is only implemented for swap DP.")
quant_func = functools.partial(
quant_func, divided_eps=eps / len(qs), divided_delta=delta / len(qs))
elif method == QuantilesEstimationMethod.CSMOOTH:
if not swap:
raise ValueError("CSmooth method is only implemented for swap DP.")
quant_func = functools.partial(
quant_func, divided_eps=eps / np.sqrt(len(qs)), ts=ts)
elif method == QuantilesEstimationMethod.LAP_TREE:
quant_func = functools.partial(quant_func, eps=eps, delta=0, swap=swap)
elif method == QuantilesEstimationMethod.GAUSS_TREE:
quant_func = functools.partial(
quant_func, eps=eps, delta=delta, swap=swap)
else:
raise ValueError("Unrecognized method name: {}".format(method))
quant_funcs.append(quant_func)
num_methods = len(methods)
if len(quant_funcs) != num_methods:
raise ValueError(
"Quantile functions array length does not match methods array length.")
errors = np.zeros(num_methods)
times = np.zeros(num_methods)
for _ in range(num_trials):
# Sample a dataset.
if fixed_data.size > 0:
sampled_data = np.sort(
np.random.choice(fixed_data, num_samples, replace=False))
elif distribution == "gaussian":
sampled_data = base.gen_gaussian(num_samples, 0, 5)
elif distribution == "uniform":
sampled_data = base.gen_uniform(num_samples, -5, 5)
true_quantiles = base.quantiles(sampled_data, qs)
for method_num in range(num_methods):
quant_func = quant_funcs[method_num]
begin = time.time()
estimates = quant_func(sampled_data)
end = time.time()
times[method_num] = (end - begin) / num_trials
errors[method_num] += error_func(sampled_data, true_quantiles,
estimates) / num_trials
return errors, times
def tune_ts_plot(eps, avg_ts, num_quantiles_range, file_name):
"""Shows the specified plot of tuned t parameters for CSmooth.
Args:
eps: Privacy parameter epsilon.
avg_ts: Array of arrays of selected ts, one array for each number of number
of quantiles.
num_quantiles_range: Array of number of quantiles used to tune.
file_name: File name for saving plot.
Returns:
Saves the specified plot as file_name.png.
"""
for num_quantiles_idx in range(len(num_quantiles_range)):
num_quantiles = num_quantiles_range[num_quantiles_idx]
plt.scatter(
np.linspace(0, 1, num_quantiles + 2)[1:-1],
avg_ts[num_quantiles_idx],
label=str(num_quantiles))
plt.title("tuned t per quantile range, eps = " + str(eps))
plt.ylabel("tuned t")
plt.xlabel("quantile")
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.savefig(file_name + ".png")
plt.close()
def plot(methods, y_label, title, x_array, y_arrays, log_scale, legend,
plot_name):
"""Constructs and saves the specified plot as plot_name.png.
Args:
methods: Array of private quantiles algorithms to test.
y_label: Label for plot's y-axis.
title: Title to display at the top of the plot.
x_array: Array of quantiles to use for the x-axis.
y_arrays: len(methods) x len(x_array) array of points to plot.
log_scale: If true, scales y-axis logarithmically.
legend: If true, displays legend.
plot_name: File name to use for saving plot.
Throws: ValueError if methods and y_arrays size does not match methods and
x_array.
"""
num_methods = len(methods)
if num_methods != y_arrays.shape[0]:
raise ValueError(
"Length of methods does not match first dimension of y_arrays.")
if len(x_array) != y_arrays.shape[1]:
raise ValueError(
"Length of x_array does not match second dimension of y_arrays.")
for index in range(num_methods):
y_array = y_arrays[index]
method = methods[index]
plt.plot(
x_array,
y_array,
linestyle=_PLOT_LINESTYLES[method],
label=_PLOT_LABELS[method],
color=_PLOT_COLORS[method],
linewidth=3)
plt.title(title, fontsize=18)
plt.ylabel(y_label, fontsize=18)
if log_scale:
plt.yscale("log")
plt.xlabel("# quantiles", fontsize=18)
if legend:
legend = plt.legend(
loc="lower center",
bbox_to_anchor=(0.5, -0.5),
ncol=3,
frameon=False,
fontsize=16)
plt.savefig(
plot_name + ".png", bbox_extra_artists=(legend,), bbox_inches="tight")
else:
plt.savefig(plot_name + ".png", bbox_inches="tight")
plt.close()
def experiment(methods,
error_metric=ErrorMetric.MISCLASSIFIED_POINTS,
data_low=-100,
data_high=100,
num_samples=1000,
eps=1,
delta=1e-6,
swap=True,
num_quantiles_range=range(1, 30),
est_num_trials=10,
ts_num_trials=2,
ts_plot_name="eps_1_ts",
error_plot_prefix="eps_1_error",
time_plot_prefix="eps_1_time"):
"""Runs trials and saves relevant plots for the specified experiment.
Args:
methods: Array of private quantiles algorithms to test. Available methods
are defined in the QuantilesEstimationMethod enum.
error_metric: Available metrics are defined in the ErrorMetric enum.
data_low: Lower bound for data, used by private quantiles algorithms.
data_high: Upper bound for data, used by private quantiles algorithms.
num_samples: Number of samples for each trial.
eps: Privacy parameter epsilon.
delta: Privacy parameter delta.
swap: If true, uses swap privacy definition. Otherwise uses add-remove.
num_quantiles_range: Array of numbers of quantiles to estimate.
est_num_trials: Number of trials to average for error and time experiments.
ts_num_trials: Number of trials to average for tuning ts experiments.
ts_plot_name: Name for saving the tuning ts plot.
error_plot_prefix: File prefix for the error plots. For example, the
Gaussian error plot will have name error_plot_prefix_gaussian.
time_plot_prefix: File prefix for the time plots.
Returns:
Saves the generated ts/error/time plots.
"""
if QuantilesEstimationMethod.CSMOOTH in methods:
tuned_ts = csmooth.csmooth_tune_t_experiment(eps, num_samples,
ts_num_trials,
num_quantiles_range, data_low,
data_high, -2, 0, 50)
tune_ts_plot(eps, tuned_ts, num_quantiles_range, ts_plot_name)
print("Finished tuning t for CSmooth.")
else:
tuned_ts = [
np.empty(num_quantiles) for num_quantiles in num_quantiles_range
]
error_func = _ERROR_FUNCS[error_metric]
error_label = _ERROR_LABELS[error_metric]
for data in ["gaussian", "uniform"]:
errors, times = synthetic_comparison(methods, error_func, data, num_samples,
data_low, data_high, est_num_trials,
num_quantiles_range, eps, delta, swap,
tuned_ts)
plot(methods, error_label, data + " error", num_quantiles_range, errors,
True, True, error_plot_prefix + data)
plot(methods, "time (secs)", data + " time", num_quantiles_range, times,
True, True, time_plot_prefix + data)
print("Finished " + data + " trials.")
for data in ["ratings", "pages"]:
errors, times = real_comparison(methods, error_func, data, num_samples,
data_low, data_high, est_num_trials,
num_quantiles_range, eps, delta, swap,
tuned_ts)
plot(methods, error_label, data + " error", num_quantiles_range, errors,
True, True, error_plot_prefix + data)
plot(methods, "time (secs)", data + " time", num_quantiles_range, times,
True, True, time_plot_prefix + data)
print("Finished " + data + " trials.")
| apache-2.0 |
shaunwbell/FOCI_Analysis | ReanalysisRetreival_orig/UnimakPass/UP_HRV2OISST_model_prep.py | 1 | 8998 | #!/usr/bin/env
"""
UP_HRV2OISST_model_prep.py
DataSource: ftp://ftp.cdc.noaa.gov/Datasets/noaa.oisst.v2.highres/
NOAA High Resolution SST data provided by the NOAA/OAR/ESRL PSD,
Boulder, Colorado, USA, from their Web site at http://www.esrl.noaa.gov/psd/
"""
#System Stack
import datetime
import sys
#Science Stack
import numpy as np
from netCDF4 import Dataset
from netCDF4 import date2num
# User Stack
import utilities.haversine as sphered
from utilities import ncutilities as ncutil
# Visual Stack
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, shiftgrid
__author__ = 'Shaun Bell'
__email__ = '[email protected]'
__created__ = datetime.datetime(2014, 01, 13)
__modified__ = datetime.datetime(2014, 01, 13)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'NARR','Unimak', 'Shumagin','3hr filtered', 'U,V','Winds', 'Gulf of Alaska'
"""------------------------General Modules-------------------------------------------"""
def from_netcdf(infile):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
ncdata = ncutil.ncreadfile_dic(nchandle, params)
ncutil.ncclose(nchandle)
return (ncdata, params)
def from_netcdf_1dsplice(infile, height_ind, lat_ind, lon_ind):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
print "Parameters available: "
print params
ncdata = ncutil.ncreadfile_dic_slice(nchandle, params, height_ind=height_ind, lat_ind=lat_ind, lon_ind=lon_ind)
ncutil.ncclose(nchandle)
return ncdata
def latlon_grid(infile):
nchandle = ncutil.ncopen(infile)
lat_lon = ncutil.get_geocoords(nchandle)
ncutil.ncclose(nchandle)
return (lat_lon)
def write2epic( file_name, stationid, time, lat_lon, data ):
ncinstance = ncutil.EPIC_NC_SST(savefile=file_name)
ncinstance.file_create()
ncinstance.sbeglobal_atts()
ncinstance.PMELglobal_atts(Station_Name=stationid, file_name=( __file__.split('/')[-1]) )
ncinstance.dimension_init(len_time=len(time[0]))
ncinstance.variable_init()
ncinstance.add_coord_data(time1=time[0], time2=time[1], latitude=lat_lon[0], longitude=-1 * lat_lon[1], \
depth_level=0. )
ncinstance.add_data('T_25', data[0])
ncinstance.close()
def date2pydate(file_time, file_time2=None, file_flag='EPIC'):
""" Ingest EPIC date or NCEP Date and provide python serial date"""
if file_flag == 'EPIC':
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
try: #if input is an array
python_time = [None] * len(file_time)
for i, val in enumerate(file_time):
pyday = file_time[i] - offset
pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time[i] = (pyday + pyfrac)
except:
pyday = file_time - offset
pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time = (pyday + pyfrac)
elif file_flag == 'NARR':
""" days since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time + base_date
elif file_flag == 'NCEP':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
else:
print "time flag not recognized"
sys.exit()
return np.array(python_time)
def pydate2EPIC(file_time):
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
time1 = np.floor(file_time) + offset #truncate to get day and add 2440000 for true julian day
time2 = ( file_time - np.floor(file_time) ) * (1000. * 60. * 60.* 24.) #milliseconds since 0000GMT
return(time1, time2)
def pythondate2str(pdate):
(year,month,day) = datetime.datetime.fromordinal(int(pdate)).strftime('%Y-%b-%d').split('-')
delta_t = pdate - int(pdate)
dhour = str(int(np.floor(24 * (delta_t))))
dmin = str(int(np.floor(60 * ((24 * (delta_t)) - np.floor(24 * (delta_t))))))
dsec = str(int(np.floor(60 * ((60 * ((24 * (delta_t)) - np.floor(24 * (delta_t)))) - \
np.floor(60 * ((24 * (delta_t)) - np.floor(24 * (delta_t))))))))
#add zeros to time
if len(dhour) == 1:
dhour = '0' + dhour
if len(dmin) == 1:
dmin = '0' + dmin
if len(dsec) == 1:
dsec = '0' + dsec
return year + '-' + month + '-' + day + ' ' + dhour+':'+dmin+':'+dsec
"""------------------------- Topo Modules -------------------------------------------"""
def etopo5_data():
""" read in etopo5 topography/bathymetry. """
file = '../data/etopo5.nc'
etopodata = Dataset(file)
topoin = etopodata.variables['bath'][:]
lons = etopodata.variables['X'][:]
lats = etopodata.variables['Y'][:]
etopodata.close()
topoin,lons = shiftgrid(0.,topoin,lons,start=False) # -360 -> 0
lons, lats = np.meshgrid(lons, lats)
return(topoin, lats, lons)
"""------------------------- Main Modules -------------------------------------------"""
### list of files
NARR = '/Users/bell/in_and_outbox/data_sets/reanalyis_data/OISSTV2/'
infile = [NARR + 'sst.day.anom.2019.v2.nc']
### Grab grid points for future slicing - assume grid is same in all model output
lat_lon = latlon_grid(infile[0])
#stn ['1','2']
station_name = ['ShumaginDown']
sta_lat = [54.5]
sta_long = [161]
#Find NCEP nearest point to moorings - haversine formula
# NCEP data is 0->360 (positive east), Moorings are usually expressed +W for FOCI
stn1_pt = sphered.nearest_point([sta_lat[0],-1 * sta_long[0]],lat_lon['lat'],lat_lon['lon'], '1d')
stn1_modelpt = [lat_lon['lat'][stn1_pt[3]],lat_lon['lon'][stn1_pt[4]]]
print "stn1 nearest point to %s, %s which is lat:%s , lon:%s" \
% (sta_lat[0], sta_long[0], stn1_modelpt[0], stn1_modelpt[1])
stn1_modelpt[1] = -1.*((180 - stn1_modelpt[1]) + 180)
print "thus converting lon to degrees W positive {0}".format(stn1_modelpt[1])
#loop over all requested data
years = range(2016,2019)
for yy in years:
# retrieve only these location's data
# uwnd
infile = NARR + 'sst.day.anom.'+ str(yy) + '.v2.nc'
print "Working on file " + infile
stn1_data = from_netcdf_1dsplice(infile, None, stn1_pt[3], stn1_pt[4])
stn1_sst = stn1_data['anom']
#convert to EPIC time
pydate = date2pydate(stn1_data['time'], file_flag='NARR')
epic_time, epic_time1 = pydate2EPIC(pydate)
# output u,v wind components from model grid points
save_to_nc = True
if save_to_nc:
# write to NetCDF
outfile = 'data/NOAA_OI_SST_V2_anom_stn1_' + str(yy) + '.nc'
print "Writing to Epic NetCDF " + outfile
write2epic( outfile, station_name[0], [epic_time, epic_time1], stn1_modelpt, [stn1_sst,])
plot_geoloc = True
if plot_geoloc:
(topoin, elats, elons) = etopo5_data()
fig = plt.figure()
ax = plt.subplot(111)
m = Basemap(resolution='i',projection='merc', llcrnrlat=52, \
urcrnrlat=58,llcrnrlon=-165,urcrnrlon=-155, lat_ts=45)
# Mooring Data
x_moor, y_moor = m(-1. * sta_long[0],sta_lat[0])
x_close, y_close = m(stn1_modelpt[1], stn1_modelpt[0])
#ETOPO 5 contour data
ex, ey = m(elons, elats)
CS = m.contourf(ex,ey,topoin, levels=range(250,5000,250), cmap='gray_r', alpha=.75) #colors='black'
CS = m.contour(ex,ey,topoin, levels=range(250,5000,250), linewidths=0.2, colors='black', alpha=.75) #
CS = m.contour(ex,ey,topoin, levels=[-1000, -200, -100], linestyle='--', linewidths=0.2, colors='black', alpha=.75) #
#plot points
m.scatter(x_close,y_close,20,marker='+',color='b')
m.scatter(x_moor,y_moor,20,marker='o',color='g')
m.drawcountries(linewidth=0.5)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(50,62,2.),labels=[1,0,0,0],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw parallels
m.drawmeridians(np.arange(-165,-145,2.),labels=[0,0,0,1],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw meridians
#m.fillcontinents(color='black')
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]) )
plt.savefig('images/shumigans_region.png', bbox_inches='tight', dpi = (100))
plt.close()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.