repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
sinhrks/scikit-learn | sklearn/preprocessing/tests/test_label.py | 12 | 17807 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
le.fit(["apple", "orange"])
msg = "bad input shape"
assert_raise_message(ValueError, msg, le.transform, "apple")
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
vermouthmjl/scikit-learn | examples/exercises/plot_cv_digits.py | 135 | 1223 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn import datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
| bsd-3-clause |
danoan/image-processing | denoise.py | 1 | 3062 | import os,sys
PROJECT_FOLDER=os.path.dirname(os.path.realpath(__file__))
sys.path.append( "{}/packages".format(PROJECT_FOLDER) )
import argparse
import numpy as np
from scipy import misc
import matplotlib.pyplot as plt
from improc.denoise import chambolle,rof,tikhonov,fista,rof_modified_curvature
def read_input():
parser = argparse.ArgumentParser(description="Denoising tools")
parser.add_argument("input_image",type=str,action="store",help="Image to be denoised.")
parser.add_argument("algorithm",type=str,action="store", help="Choose among {tikhonov, rof, chambolle, fista}.")
parser.add_argument("-l",dest="lbda",type=float,action="store",default=0.12,help="Regularization weight.")
parser.add_argument("-t",dest="tolerance",type=float,action="store",default=1e-5,help="Stop if the new energy value differs of less than [tolerance].")
parser.add_argument("-e",dest="ev_stop",type=float,action="store",default=None,help="Stop if energy reaches this value.")
parser.add_argument("-i",dest="max_iterations",type=int,action="store",default=100,help="Stop after the i-th iteration.")
parser.add_argument("-o",dest="output_image",type=str,action="store",help="Output image filepath.")
parser.add_argument("-v",dest="verbose",action="store_true",help="Print algorithms outputs.")
args = parser.parse_args()
return args
def make3channel(img):
if len(img.shape)==2:
_img = np.zeros( img.shape + (3,) )
for c in range(3):
_img[:,:,c] = img.copy()
img = _img
return img
def set_plot_no_axes():
plt.gca().set_axis_off()
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0,
hspace = 0, wspace = 0)
plt.margins(0,0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
def main():
inp = read_input()
noisy_img = np.asfarray( misc.imread(inp.input_image) )
noisy_img /= 255.0
noisy_img = make3channel(noisy_img)
if inp.max_iterations<0:
inp.max_iterations=1e10
if inp.algorithm=="chambolle":
dimg = chambolle.denoise_image(noisy_img, inp.lbda, inp.tolerance, inp.max_iterations,inp.verbose)
elif inp.algorithm=="rof":
dimg = rof.denoise_image(noisy_img, inp.lbda, inp.tolerance, inp.max_iterations, inp.verbose,inp.ev_stop)
elif inp.algorithm=="fista":
dimg = fista.denoise_image(noisy_img, inp.lbda, inp.max_iterations)
elif inp.algorithm=="tikhonov":
dimg = tikhonov.denoise_image(noisy_img, inp.lbda, inp.max_iterations,inp.verbose)
elif inp.algorithm=="rof_curvature":
dimg = rof_modified_curvature.denoise_image(noisy_img, inp.lbda,inp.tolerance, inp.max_iterations,inp.verbose,inp.ev_stop)
if inp.output_image is not None:
dirname = os.path.dirname(inp.output_image)
if not os.path.exists(os.path.dirname(inp.output_image)):
os.makedirs(dirname)
set_plot_no_axes()
plt.imshow(dimg)
plt.savefig(inp.output_image,bbox_inches = 'tight',pad_inches = 0)
else:
fig,axs=plt.subplots(1,2)
axNoisy,axDenoise = axs
axNoisy.imshow(noisy_img)
axDenoise.imshow(dimg)
plt.show()
if __name__=="__main__":
main()
| mit |
nrhine1/scikit-learn | examples/neighbors/plot_regression.py | 349 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
victorbergelin/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
BhallaLab/moose-full | moose-examples/snippets/MULTI/minchan.py | 3 | 12176 | # minimal.py ---
# Upi Bhalla, NCBS Bangalore 2014.
#
# Commentary:
#
# Minimal model for loading rdesigneur: reac-diff elec signaling in neurons
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
# Code:
import sys
sys.path.append('../../python')
import os
os.environ['NUMPTHREADS'] = '1'
import math
import numpy
import matplotlib.pyplot as plt
import moose
import proto18
EREST_ACT = -70e-3
def loadElec():
library = moose.Neutral( '/library' )
moose.setCwe( '/library' )
proto18.make_Ca()
proto18.make_Ca_conc()
proto18.make_K_AHP()
proto18.make_K_C()
proto18.make_Na()
proto18.make_K_DR()
proto18.make_K_A()
proto18.make_glu()
proto18.make_NMDA()
proto18.make_Ca_NMDA()
proto18.make_NMDA_Ca_conc()
proto18.make_axon()
moose.setCwe( '/library' )
model = moose.Neutral( '/model' )
cellId = moose.loadModel( 'mincell2.p', '/model/elec', "Neutral" )
return cellId
def loadChem( diffLength ):
chem = moose.Neutral( '/model/chem' )
neuroCompt = moose.NeuroMesh( '/model/chem/kinetics' )
neuroCompt.separateSpines = 1
neuroCompt.geometryPolicy = 'cylinder'
spineCompt = moose.SpineMesh( '/model/chem/compartment_1' )
moose.connect( neuroCompt, 'spineListOut', spineCompt, 'spineList', 'OneToOne' )
psdCompt = moose.PsdMesh( '/model/chem/compartment_2' )
#print 'Meshvolume[neuro, spine, psd] = ', neuroCompt.mesh[0].volume, spineCompt.mesh[0].volume, psdCompt.mesh[0].volume
moose.connect( neuroCompt, 'psdListOut', psdCompt, 'psdList', 'OneToOne' )
modelId = moose.loadModel( 'minimal.g', '/model/chem', 'ee' )
neuroCompt.name = 'dend'
spineCompt.name = 'spine'
psdCompt.name = 'psd'
def makeNeuroMeshModel():
diffLength = 6e-6 # Aim for 2 soma compartments.
elec = loadElec()
loadChem( diffLength )
neuroCompt = moose.element( '/model/chem/dend' )
neuroCompt.diffLength = diffLength
neuroCompt.cellPortion( elec, '/model/elec/#' )
for x in moose.wildcardFind( '/model/chem/##[ISA=PoolBase]' ):
if (x.diffConst > 0):
x.diffConst = 1e-11
for x in moose.wildcardFind( '/model/chem/##/Ca' ):
x.diffConst = 1e-10
# Put in dend solvers
ns = neuroCompt.numSegments
ndc = neuroCompt.numDiffCompts
print 'ns = ', ns, ', ndc = ', ndc
assert( neuroCompt.numDiffCompts == neuroCompt.mesh.num )
assert( ns == 1 ) # soma/dend only
assert( ndc == 2 ) # split into 2.
nmksolve = moose.Ksolve( '/model/chem/dend/ksolve' )
nmdsolve = moose.Dsolve( '/model/chem/dend/dsolve' )
nmstoich = moose.Stoich( '/model/chem/dend/stoich' )
nmstoich.compartment = neuroCompt
nmstoich.ksolve = nmksolve
nmstoich.dsolve = nmdsolve
nmstoich.path = "/model/chem/dend/##"
print 'done setting path, numPools = ', nmdsolve.numPools
assert( nmdsolve.numPools == 1 )
assert( nmdsolve.numAllVoxels == 2 )
assert( nmstoich.numAllPools == 1 )
# oddly, numLocalFields does not work.
ca = moose.element( '/model/chem/dend/DEND/Ca' )
assert( ca.numData == ndc )
# Put in spine solvers. Note that these get info from the neuroCompt
spineCompt = moose.element( '/model/chem/spine' )
sdc = spineCompt.mesh.num
print 'sdc = ', sdc
assert( sdc == 1 )
smksolve = moose.Ksolve( '/model/chem/spine/ksolve' )
smdsolve = moose.Dsolve( '/model/chem/spine/dsolve' )
smstoich = moose.Stoich( '/model/chem/spine/stoich' )
smstoich.compartment = spineCompt
smstoich.ksolve = smksolve
smstoich.dsolve = smdsolve
smstoich.path = "/model/chem/spine/##"
assert( smstoich.numAllPools == 3 )
assert( smdsolve.numPools == 3 )
assert( smdsolve.numAllVoxels == 1 )
# Put in PSD solvers. Note that these get info from the neuroCompt
psdCompt = moose.element( '/model/chem/psd' )
pdc = psdCompt.mesh.num
assert( pdc == 1 )
pmksolve = moose.Ksolve( '/model/chem/psd/ksolve' )
pmdsolve = moose.Dsolve( '/model/chem/psd/dsolve' )
pmstoich = moose.Stoich( '/model/chem/psd/stoich' )
pmstoich.compartment = psdCompt
pmstoich.ksolve = pmksolve
pmstoich.dsolve = pmdsolve
pmstoich.path = "/model/chem/psd/##"
assert( pmstoich.numAllPools == 3 )
assert( pmdsolve.numPools == 3 )
assert( pmdsolve.numAllVoxels == 1 )
foo = moose.element( '/model/chem/psd/Ca' )
print 'PSD: numfoo = ', foo.numData
print 'PSD: numAllVoxels = ', pmksolve.numAllVoxels
# Put in junctions between the diffusion solvers
nmdsolve.buildNeuroMeshJunctions( smdsolve, pmdsolve )
"""
CaNpsd = moose.vec( '/model/chem/psdMesh/PSD/PP1_PSD/CaN' )
print 'numCaN in PSD = ', CaNpsd.nInit, ', vol = ', CaNpsd.volume
CaNspine = moose.vec( '/model/chem/spine/SPINE/CaN_BULK/CaN' )
print 'numCaN in spine = ', CaNspine.nInit, ', vol = ', CaNspine.volume
"""
# set up adaptors
aCa = moose.Adaptor( '/model/chem/dend/DEND/adaptCa', ndc )
adaptCa = moose.vec( '/model/chem/dend/DEND/adaptCa' )
chemCa = moose.vec( '/model/chem/dend/DEND/Ca' )
print 'aCa = ', aCa, ' foo = ', foo, "len( ChemCa ) = ", len( chemCa ), ", numData = ", chemCa.numData, "len( adaptCa ) = ", len( adaptCa )
assert( len( adaptCa ) == ndc )
assert( len( chemCa ) == ndc )
path = '/model/elec/soma/Ca_conc'
elecCa = moose.element( path )
print "=========="
print elecCa
print adaptCa
print chemCa
moose.connect( elecCa, 'concOut', adaptCa[0], 'input', 'Single' )
moose.connect( adaptCa, 'output', chemCa, 'setConc', 'OneToOne' )
adaptCa.inputOffset = 0.0 #
adaptCa.outputOffset = 0.00008 # 80 nM offset in chem.
adaptCa.scale = 1e-3 # 520 to 0.0052 mM
#print adaptCa.outputOffset
#print adaptCa.scale
def addPlot( objpath, field, plot ):
#assert moose.exists( objpath )
if moose.exists( objpath ):
tab = moose.Table( '/graphs/' + plot )
obj = moose.element( objpath )
if obj.className == 'Neutral':
print "addPlot failed: object is a Neutral: ", objpath
return moose.element( '/' )
else:
#print "object was found: ", objpath, obj.className
moose.connect( tab, 'requestOut', obj, field )
return tab
else:
print "addPlot failed: object not found: ", objpath
return moose.element( '/' )
def makeElecPlots():
graphs = moose.Neutral( '/graphs' )
elec = moose.Neutral( '/graphs/elec' )
addPlot( '/model/elec/soma', 'getVm', 'elec/somaVm' )
addPlot( '/model/elec/spine_head', 'getVm', 'elec/spineVm' )
addPlot( '/model/elec/soma/Ca_conc', 'getCa', 'elec/somaCa' )
def makeChemPlots():
graphs = moose.Neutral( '/graphs' )
chem = moose.Neutral( '/graphs/chem' )
addPlot( '/model/chem/psd/Ca_CaM', 'getConc', 'chem/psdCaCam' )
addPlot( '/model/chem/psd/Ca', 'getConc', 'chem/psdCa' )
addPlot( '/model/chem/spine/Ca_CaM', 'getConc', 'chem/spineCaCam' )
addPlot( '/model/chem/spine/Ca', 'getConc', 'chem/spineCa' )
addPlot( '/model/chem/dend/DEND/Ca', 'getConc', 'chem/dendCa' )
def testNeuroMeshMultiscale():
elecDt = 50e-6
chemDt = 0.01
ePlotDt = 0.5e-3
cPlotDt = 0.01
plotName = 'nm.plot'
makeNeuroMeshModel()
print "after model is completely done"
for i in moose.wildcardFind( '/model/chem/#/#/#/transloc#' ):
print i[0].name, i[0].Kf, i[0].Kb, i[0].kf, i[0].kb
"""
for i in moose.wildcardFind( '/model/chem/##[ISA=PoolBase]' ):
if ( i[0].diffConst > 0 ):
grandpaname = i.parent[0].parent.name + '/'
paname = i.parent[0].name + '/'
print grandpaname + paname + i[0].name, i[0].diffConst
print 'Neighbors:'
for t in moose.element( '/model/chem/spine/ksolve/junction' ).neighbors['masterJunction']:
print 'masterJunction <-', t.path
for t in moose.wildcardFind( '/model/chem/#/ksolve' ):
k = moose.element( t[0] )
print k.path + ' localVoxels=', k.numLocalVoxels, ', allVoxels= ', k.numAllVoxels
"""
'''
moose.useClock( 4, '/model/chem/dend/dsolve', 'process' )
moose.useClock( 5, '/model/chem/dend/ksolve', 'process' )
moose.useClock( 5, '/model/chem/spine/ksolve', 'process' )
moose.useClock( 5, '/model/chem/psd/ksolve', 'process' )
'''
makeChemPlots()
makeElecPlots()
moose.setClock( 0, elecDt )
moose.setClock( 1, elecDt )
moose.setClock( 2, elecDt )
moose.setClock( 4, chemDt )
moose.setClock( 5, chemDt )
moose.setClock( 6, chemDt )
moose.setClock( 7, cPlotDt )
moose.setClock( 8, ePlotDt )
moose.useClock( 0, '/model/elec/##[ISA=Compartment]', 'init' )
moose.useClock( 1, '/model/elec/##[ISA=Compartment]', 'process' )
moose.useClock( 1, '/model/elec/##[ISA=SpikeGen]', 'process' )
moose.useClock( 2, '/model/elec/##[ISA=ChanBase],/model/##[ISA=SynBase],/model/##[ISA=CaConc]','process')
#moose.useClock( 5, '/model/chem/##[ISA=PoolBase],/model/##[ISA=ReacBase],/model/##[ISA=EnzBase]', 'process' )
#moose.useClock( 4, '/model/chem/##[ISA=Adaptor]', 'process' )
moose.useClock( 4, '/model/chem/#/dsolve', 'process' )
moose.useClock( 5, '/model/chem/#/ksolve', 'process' )
moose.useClock( 6, '/model/chem/dend/DEND/adaptCa', 'process' )
moose.useClock( 7, '/graphs/chem/#', 'process' )
moose.useClock( 8, '/graphs/elec/#', 'process' )
#hsolve = moose.HSolve( '/model/elec/hsolve' )
#moose.useClock( 1, '/model/elec/hsolve', 'process' )
#hsolve.dt = elecDt
#hsolve.target = '/model/elec/compt'
#moose.reinit()
moose.element( '/model/elec/spine_head' ).inject = 5e-12
moose.element( '/model/chem/psd/Ca' ).concInit = 0.001
moose.element( '/model/chem/spine/Ca' ).concInit = 0.002
moose.element( '/model/chem/dend/DEND/Ca' ).concInit = 0.003
moose.reinit()
"""
print 'pre'
eca = moose.vec( '/model/chem/psd/PSD/CaM/Ca' )
for i in range( 3 ):
print eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
print 'dend'
eca = moose.vec( '/model/chem/dend/DEND/Ca' )
#for i in ( 0, 1, 2, 30, 60, 90, 120, 144 ):
for i in range( 13 ):
print i, eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
print 'PSD'
eca = moose.vec( '/model/chem/psd/PSD/CaM/Ca' )
for i in range( 3 ):
print eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
print 'spine'
eca = moose.vec( '/model/chem/spine/SPINE/CaM/Ca' )
for i in range( 3 ):
print eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
"""
moose.start( 0.5 )
plt.ion()
fig = plt.figure( figsize=(8,8) )
chem = fig.add_subplot( 211 )
chem.set_ylim( 0, 0.004 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'time (seconds)' )
for x in moose.wildcardFind( '/graphs/chem/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * cPlotDt
line1, = chem.plot( pos, x.vector, label=x.name )
plt.legend()
elec = fig.add_subplot( 212 )
plt.ylabel( 'Vm (V)' )
plt.xlabel( 'time (seconds)' )
for x in moose.wildcardFind( '/graphs/elec/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * ePlotDt
line1, = elec.plot( pos, x.vector, label=x.name )
plt.legend()
fig.canvas.draw()
raw_input()
'''
for x in moose.wildcardFind( '/graphs/##[ISA=Table]' ):
t = numpy.arange( 0, x.vector.size, 1 )
pylab.plot( t, x.vector, label=x.name )
pylab.legend()
pylab.show()
'''
pylab.show()
print 'All done'
def main():
testNeuroMeshMultiscale()
if __name__ == '__main__':
main()
#
# minimal.py ends here.
| gpl-2.0 |
lehinevych/Dato-Core | src/unity/python/graphlab/deps/__init__.py | 13 | 1294 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
from distutils.version import StrictVersion
import logging
def __get_version(version):
if 'dev' in str(version):
version = version[:version.find('.dev')]
return StrictVersion(version)
HAS_PANDAS = True
PANDAS_MIN_VERSION = '0.13.0'
try:
import pandas
if __get_version(pandas.__version__) < StrictVersion(PANDAS_MIN_VERSION):
HAS_PANDAS = False
logging.warn(('Pandas version %s is not supported. Minimum required version: %s. '
'Pandas support will be disabled.')
% (pandas.__version__, PANDAS_MIN_VERSION) )
except:
HAS_PANDAS = False
import pandas_mock as pandas
HAS_NUMPY = True
NUMPY_MIN_VERSION = '1.8.0'
try:
import numpy
if __get_version(numpy.__version__) < StrictVersion(NUMPY_MIN_VERSION):
HAS_NUMPY = False
logging.warn(('Numpy version %s is not supported. Minimum required version: %s. '
'Numpy support will be disabled.')
% (numpy.__version__, NUMPY_MIN_VERSION) )
except:
HAS_NUMPY = False
import numpy_mock as numpy
| agpl-3.0 |
perrygeo/pydem | pydem/reader/my_types.py | 3 | 21075 | """
Copyright 2015 Creare
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from numpy.linalg import inv
from traits.api import *
import numpy as np
import gdal
import gdalconst
import matplotlib, matplotlib.cm
NO_DATA_VALUE = -9999
d_name_to_wkt = {'WGS84' : r'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4326"]]',
'NAD83' : r'GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.2572221010002,AUTHORITY["EPSG","7019"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4269"]]',
}
d_name_to_epsg = {'WGS84' : 4326,
'NAD83': 4269
}
d_wkt_to_name = {v:k for k, v in d_name_to_wkt.iteritems()}
d_wkt_to_name[r'GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.2572221010002,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4269"]]'] = 'NAD83'
d_wkt_to_name[r'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4326"]]'] = 'WGS84' # afghanistan dem
d_wkt_to_name[r'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS84",6378137,298.2572235604902,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4326"]]'] = 'WGS84'
d_wkt_to_name[r'GEOGCS["WGS 84",DATUM["unknown",SPHEROID["WGS84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]]'] = 'WGS84'
d_epsg_to_name = {4326: 'WGS84',
4269: 'NAD83',
}
# This trait maps a user-friendly name (e.g., WGS84) to an official WKT.
projection_wkt_trait = Trait('WGS84',
d_name_to_wkt)
class Point(HasStrictTraits):
"""
Point(Lat, Lon)
A simple convenience class to deal with latitude and longitude, which
can be error-prone to convert otherwise.
Examples
--------
>>> Point(-1.426667, -20.583611)
<Lat,Lon: -1.427, -20.584>
>>> Point((1, 25, 36), (20, 35, 1), name="Site A")
<Site A: 1.427, 20.584>
>>> p = Point(-1.426667, -20.583611)
>>> p.lat
-1.426667
>>> p.lat_dms
(1.0, 25.0, 36.001199999999599)
>>> p.lat_dms = (1.0, 2.0, 3.0)
>>> p
<Lat,Lon: 1.034, -20.584>
Conventions
-----------
Following ISO 6709,
* LAT, LON
* North (lat) and East (lon) are positive.
* South (lat) and West (lon) are negative.
* Decimal representation is preferred; Sexagesimal (base-60) is allowed.
"""
name = Str()
wkt = projection_wkt_trait
lon = Float()
lon_dms = Property(Tuple((Float, Float, Float)))
def _set_lon_dms(self, dms):
deg, min, sec = dms
self.lon = np.sign(deg) * (abs(deg) + min / 60.0 + sec / 3600.0)
def _get_lon_dms(self):
deg = np.floor(abs(self.lon))
min = np.floor((abs(self.lon) - deg) * 60.0)
sec = np.round((abs(self.lon) - deg - min / 60.0) * 3600.0,
4) # round to 4 decimal places.
return (np.sign(self.lon) * deg, min, sec)
lat = Float()
lat_dms = Property(Tuple((Float, Float, Float)))
def _set_lat_dms(self, dms):
deg, min, sec = dms
self.lat = np.sign(deg) * (abs(deg) + min / 60.0 + sec / 3600.0)
def _get_lat_dms(self):
deg = np.floor(abs(self.lat))
min = np.floor((abs(self.lat) - deg) * 60.0)
sec = np.round((abs(self.lat) - deg - min / 60.0) * 3600.0,
4) # round to 4 decimal places.
return (np.sign(self.lat) * deg, min, sec)
def to_wkt(self, target_wkt):
# If we're going from WGS84 -> Spherical Mercator, use PyProj, because
# there seems to be a bug in OGR that gives us an offset. (GDAL
# does fine, though.
if target_wkt == self.wkt:
return self
import osr
dstSpatialRef = osr.SpatialReference()
dstSpatialRef.ImportFromEPSG(d_name_to_epsg[target_wkt])
# dstSpatialRef.ImportFromWkt(d_name_to_wkt[target_wkt])
srcSpatialRef = osr.SpatialReference()
srcSpatialRef.ImportFromEPSG(d_name_to_epsg[self.wkt])
# srcSpatialRef.ImportFromWkt(self.wkt_)
coordTransform = osr.CoordinateTransformation(srcSpatialRef, dstSpatialRef)
a, b, c = coordTransform.TransformPoint(self.lon, self.lat)
return Point(b, a, wkt=target_wkt)
def __str__(self):
return "<%s (%s): %02.3f, %03.3f>" % (self.name if self.name else 'Lat,Lon',
self.wkt,
self.lat, self.lon)
def __repr__(self):
s = ("Point(%02.3f, %02.3f, wkt='%s'" %
(self.lat, self.lon, self.wkt))
if self.name:
s += ", name='%s'" % self.name
s += ")"
return s
def __init__(self, lat=None, lon=None, **kwargs):
HasStrictTraits.__init__(self, **kwargs)
if lon is not None:
try:
self.lon = lon # float
except:
self.lon_dms = lon # tuple
if lat is not None:
try:
self.lat = lat # float
except:
self.lat_dms = lat # tuple
def grid_coords_from_corners(upper_left_corner, lower_right_corner, size):
''' Points are the outer edges of the UL and LR pixels. Size is rows, columns.
GC projection type is taken from Points. '''
assert upper_left_corner.wkt == lower_right_corner.wkt
geotransform = np.array([upper_left_corner.lon, -(upper_left_corner.lon - lower_right_corner.lon) / float(size[1]), 0,
upper_left_corner.lat, 0, -(upper_left_corner.lat - lower_right_corner.lat) / float(size[0])])
return GridCoordinates(geotransform=geotransform,
wkt=upper_left_corner.wkt,
y_size=size[0],
x_size=size[1])
class GridCoordinates(HasStrictTraits):
"""
Defines mapping of input layers to real-world time and space.
"""
date = Date()
time = Time()
geotransform = Array('float', [6],)
wkt = projection_wkt_trait
x_size = Int()
y_size = Int()
x_axis = Property(Array(), depends_on='geotransform, x_size')
y_axis = Property(Array(), depends_on='geotransform, y_size')
ULC = Property(Instance(Point), depends_on='x_axis, y_axis')
URC = Property(Instance(Point), depends_on='x_axis, y_axis')
LLC = Property(Instance(Point), depends_on='x_axis, y_axis')
LRC = Property(Instance(Point), depends_on='x_axis, y_axis')
def _get_ULC(self): return Point(self.geotransform[3], self.geotransform[0], wkt=self.wkt)
def _get_URC(self): return Point(self.geotransform[3], self.geotransform[0] + self.geotransform[1] * self.x_size, wkt=self.wkt)
def _get_LLC(self): return Point(self.geotransform[3] + self.geotransform[5] * self.y_size, self.geotransform[0], wkt=self.wkt)
def _get_LRC(self): return Point(self.geotransform[3] + self.geotransform[5] * self.y_size, self.geotransform[0] + self.geotransform[1] * self.x_size, wkt=self.wkt)
projection_wkt = Property(Str) # For backwards compatibility w/ previously pickled layers.
def _set_projection_wkt(self, val):
self.wkt = data.layers.types.d_wkt_to_name[val]
def __repr__(self):
return '<GridCoordinates: %s -> %s, %d x %d>' % (
self.ULC, self.LRC, self.y_size, self.x_size)
def intersects(self, other_grid_coordinates):
""" returns True if the GC's overlap. """
ogc = other_grid_coordinates # alias
# for explanation: http://stackoverflow.com/questions/306316/determine-if-two-rectangles-overlap-each-other
# Note the flipped y-coord in this coord system.
ax1, ay1, ax2, ay2 = self.ULC.lon, self.ULC.lat, self.LRC.lon, self.LRC.lat
bx1, by1, bx2, by2 = ogc.ULC.lon, ogc.ULC.lat, ogc.LRC.lon, ogc.LRC.lat
if ((ax1 <= bx2) and (ax2 >= bx1) and (ay1 >= by2) and (ay2 <= by1)):
return True
else:
return False
def unique_str(self):
""" A string that (ideally) uniquely represents this GC object. This
helps with naming files for caching. 'Unique' is defined as 'If
GC1 != GC2, then GC1.unique_str() != GC2.unique_str()'; conversely,
'If GC1 == GC2, then GC1.unique_str() == GC2.unique_str()'.
The string should be filename-safe (no \/:*?"<>|).
..note::Because of length/readability restrictions, this fxn ignores
wkt.
Example output:
"-180.000_0.250_0.000_90.000_0.000_-0.251_512_612_2013-05-21_12_32_52.945000"
"""
unique_str = "_".join(["%.3f" % f for f in self.geotransform] +
["%d" % d for d in self.x_size, self.y_size]
)
if self.date is not None:
unique_str += '_' + str(self.date)
if self.time is not None:
unique_str += '_' + str(self.time)
return unique_str.replace(':', '_')
def __eq__(self, other):
return (isinstance(other, self.__class__)
and np.allclose(self.geotransform, other.geotransform)
and (self.x_size == other.x_size)
and (self.y_size == other.y_size)
and (self.date == other.date)
and (self.wkt == other.wkt)
and (self.time == other.time)
)
def __ne__(self, other):
return not self.__eq__(other)
@cached_property
def _get_x_axis(self):
"""See http://www.gdal.org/gdal_datamodel.html for details."""
# 0,0 is top/left top top/left pixel. Actual x/y coord of that pixel are (.5,.5).
x_centers = np.linspace(.5, self.x_size - .5, self.x_size)
y_centers = x_centers * 0
return (self.geotransform[0]
+ self.geotransform[1] * x_centers
+ self.geotransform[2] * y_centers)
@cached_property
def _get_y_axis(self):
"""See http://www.gdal.org/gdal_datamodel.html for details."""
# 0,0 is top/left top top/left pixel. Actual x/y coord of that pixel are (.5,.5).
y_centers = np.linspace(.5, self.y_size - .5, self.y_size)
x_centers = y_centers * 0
return (self.geotransform[3]
+ self.geotransform[4] * x_centers
+ self.geotransform[5] * y_centers)
def raster_to_projection_coords(self, pixel_x, pixel_y):
""" Use pixel centers when appropriate.
See documentation for the GDAL function GetGeoTransform for details. """
h_px_py = np.array([1, pixel_x, pixel_y])
gt = np.array([[1, 0, 0], self.geotransform[0:3], self.geotransform[3:6]])
arr = np.inner(gt, h_px_py)
return arr[2], arr[1]
def projection_to_raster_coords(self, lat, lon):
""" Returns pixel centers.
See documentation for the GDAL function GetGeoTransform for details. """
r_px_py = np.array([1, lon, lat])
tg = inv(np.array([[1, 0, 0], self.geotransform[0:3], self.geotransform[3:6]]))
return np.inner(tg, r_px_py)[1:]
def _as_gdal_dataset(self, driver="MEM", n_raster_count=1,
file_name="memory.tif", data_type=gdalconst.GDT_Float32
):
driver = gdal.GetDriverByName(driver)
dataset = driver.Create(file_name, int(self.x_size), int(self.y_size),
n_raster_count, data_type)
dataset.SetGeoTransform(self.geotransform) # dem_geotrans)
dataset.SetProjection(self.wkt_)
return dataset
def copy_and_transform(self, zoom=1.0):
# Probalby doesn't handle angled reference frames correctly.
copy = self.clone_traits(copy='deep')
x_center = self.geotransform[0] + self.geotransform[1] * self.x_size * .5
x_new_spacing = self.geotransform[1] / zoom
copy.geotransform[0:3] = x_center - x_new_spacing * self.x_size * .5, x_new_spacing, 0
y_center = self.geotransform[3] + self.geotransform[5] * self.y_size * .5
y_new_spacing = self.geotransform[5] / zoom
copy.geotransform[3:6] = y_center - y_new_spacing * self.y_size * .5, 0, y_new_spacing
return copy
class AbstractDataLayer(HasStrictTraits):
#===========================================================================
# Coordinate system maintained by parent simulation
#===========================================================================
grid_coordinates = Instance('GridCoordinates')
#===========================================================================
# Data Description
#===========================================================================
name = Str() # e.g., 'Temperature'
units = Str() # e.g., 'K'
data_type = Enum([None, 'LHTFL', 'SHTFL', 'GFLUX', 'SSRUN', 'BGRUN', 'TMP',
'ALBDO', 'WEASD', 'SNOD', 'SOLMST', 'TSOIL', 'SOLLIQ', 'EVAPTP',
'CANOPY', 'WDRUN', 'TMP', 'TMIN', 'TMAX', 'SPFH', 'PRES',
'DNSWF', 'DNLWF', 'LAND', 'VEGTYP', 'SOLTYP', 'TERR', 'VGREEN',
'RELSOL', 'MINRH', 'APCP', 'EVAPTS', 'NSWRS', 'NLWRS', 'SNOHF',
'SNOEV', 'DSWRF', 'DLWRF', 'ASNOW', 'ARAIN', 'EVP', 'SNOM',
'AVSFT', 'CNWAT', 'MSTAV', 'EVCW', 'TRANS', 'EVBS',
'SBSNO', 'PEVPR', 'ACOND', 'SNOWC', 'CCOND', 'RCS', 'RCT',
'RCQ', 'RCSOL', 'RSMIN', 'LAI', 'VEG', 'var250', 'var255'
])
depth = Enum([None, 'sfc', '0-200 cm down', '0-10 cm down', '0-100 cm down',
'0-200 cm down', '10-40 cm down',
'40-100 cm down', '100-200 cm down', '2 m above gnd',
'10 m above gnd'])
def __repr__(self):
return "<%s: %s; Data type: %s; Depth: %s >" % (self.__class__, self.name, self.data_type, self.depth)
#===========================================================================
# Enumeration
#===========================================================================
is_enumerated = Bool(False)
enumeration_legend = Dict(key_trait=Int, value_trait=Str) # e.g., {0:'Sea', 1:'Land'}
enumeration_colors = Dict(key_trait=Int, value_trait=Tuple((1., 1., 1.))) # e.g., {0:(128,128,128), 1:(1,50,150)}
############################################################################
# Plotting Info (if not enumerated)
############################################################################
scalar_c_lims = List()
scalar_cm = Instance(matplotlib.colors.Colormap)
def _scalar_cm_default(self):
return matplotlib.cm.get_cmap('gray')
def reproject_to_grid_coordinates(self, grid_coordinates, interp=gdalconst.GRA_NearestNeighbour):
""" Reprojects data in this layer to match that in the GridCoordinates
object. """
source_dataset = self.grid_coordinates._as_gdal_dataset()
dest_dataset = grid_coordinates._as_gdal_dataset()
rb = source_dataset.GetRasterBand(1)
rb.SetNoDataValue(NO_DATA_VALUE)
rb.WriteArray(np.ma.filled(self.raster_data, NO_DATA_VALUE))
gdal.ReprojectImage(source_dataset, dest_dataset,
source_dataset.GetProjection(),
dest_dataset.GetProjection(),
interp)
dest_layer = self.clone_traits()
dest_layer.grid_coordinates = grid_coordinates
rb = dest_dataset.GetRasterBand(1)
dest_layer.raster_data = np.ma.masked_values(rb.ReadAsArray(), NO_DATA_VALUE)
return dest_layer
def export_to_geotiff(self, file_name):
dest_dataset = self.grid_coordinates._as_gdal_dataset(driver='GTiff',
file_name=file_name)
rb = dest_dataset.GetRasterBand(1)
rb.WriteArray(self.raster_data.filled())
rb.SetNoDataValue(float(self.raster_data.fill_value))
rb.SetDescription(self.name)
rb.SetUnitType(self.units)
def inpaint(self):
""" Replace masked-out elements in an array using an iterative image inpainting algorithm. """
import inpaint
filled = inpaint.replace_nans(np.ma.filled(self.raster_data, np.NAN).astype(np.float32), 3, 0.01, 2)
self.raster_data = np.ma.masked_invalid(filled)
def colormap(self):
from matplotlib import colors
import collections
if self.is_enumerated:
if self.enumeration_colors:
d = collections.OrderedDict(sorted(self.enumeration_colors.items()))
cmap = colors.ListedColormap(d.values()) # + [(0., 0., 0.)])
bounds = np.array(d.keys() + [d.keys()[-1] + 1]) - .5
norm = colors.BoundaryNorm(bounds, cmap.N)
return cmap, norm
else:
return None, None
# not enumerated.
return self.scalar_cm
def to_rgba(self):
data = self.raster_data
if self.is_enumerated:
if self.enumeration_colors:
cmap, norm = self.colormap()
data2 = norm(data) # np.clip((data - MIN) / (MAX - MIN), 0, 1)
rgba = (cmap(data2) * 255).astype(int)
rgba[:, :, 3] = np.logical_not(data.mask).astype(int) * 255
return rgba
else:
raise NotImplementedError()
# Not enumerated...
if self.scalar_c_lims:
MIN, MAX = self.scalar_c_lims
else:
MIN, MAX = data.min(), data.max()
cm = self.colormap()
data2 = np.clip((data - MIN) / (MAX - MIN), 0, 1)
rgba = (cm(data2) * 255).astype(int)
rgba[:, :, 3] = np.logical_not(data.mask).astype(int) * 255
return rgba
#===============================================================================
# DifferentiateInput/Staging/Results Layers
#===============================================================================
class InputLayerMixin(HasStrictTraits):
duration_s = Float()
class StagingLayerMixin(HasStrictTraits):
pass
class ResultsLayerMixin(HasStrictTraits):
pass
#===============================================================================
# Raster Data
#===============================================================================
class AbstractRasterDataLayer(AbstractDataLayer):
raster_data = Array()
def interp_value(self, lat, lon, indexed=False):
""" Lookup a pixel value in the raster data, performing linear interpolation
if necessary. Indexed ==> nearest neighbor (*fast*). """
(px, py) = self.grid_coordinates.projection_to_raster_coords(lat, lon)
if indexed:
return self.raster_data[round(py), round(px)]
else:
# from scipy.interpolate import interp2d
# f_interp = interp2d(self.grid_coordinates.x_axis, self.grid_coordinates.y_axis, self.raster_data, bounds_error=True)
# return f_interp(lon, lat)[0]
from scipy.ndimage import map_coordinates
ret = map_coordinates(self.raster_data, [[py], [px]], order=1) # linear interp
return ret[0]
class InputRasterDataLayer(InputLayerMixin, AbstractRasterDataLayer):
pass
class StagingRasterDataLayer(StagingLayerMixin, AbstractRasterDataLayer):
pass
class ResultsRasterDataLayer(ResultsLayerMixin, AbstractRasterDataLayer):
pass
#===============================================================================
# Point Data
#===============================================================================
class PointMeasurement(HasStrictTraits):
pass # Uncertain how to define coord system and how this measurement looks...
class AbstractPointDataLayer(AbstractDataLayer):
point_measurements = List(Instance(PointMeasurement))
class InputPointDataLayer(InputLayerMixin, AbstractPointDataLayer):
pass
class StagingPointDataLayer(StagingLayerMixin, AbstractPointDataLayer):
pass
class ResultsPointDataLayer(ResultsLayerMixin, AbstractPointDataLayer):
pass
| apache-2.0 |
amitsela/incubator-beam | sdks/python/apache_beam/examples/complete/juliaset/juliaset/juliaset.py | 9 | 4504 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Julia set computing workflow: https://en.wikipedia.org/wiki/Julia_set.
We use the quadratic polinomial f(z) = z*z + c, with c = -.62772 +.42193i
"""
from __future__ import absolute_import
import argparse
import apache_beam as beam
from apache_beam.io import WriteToText
def from_pixel(x, y, n):
"""Converts a NxN pixel position to a (-1..1, -1..1) complex number."""
return complex(2.0 * x / n - 1.0, 2.0 * y / n - 1.0)
def get_julia_set_point_color(element, c, n, max_iterations):
"""Given an pixel, convert it into a point in our julia set."""
x, y = element
z = from_pixel(x, y, n)
for i in xrange(max_iterations):
if z.real * z.real + z.imag * z.imag > 2.0:
break
z = z * z + c
return x, y, i # pylint: disable=undefined-loop-variable
def generate_julia_set_colors(pipeline, c, n, max_iterations):
"""Compute julia set coordinates for each point in our set."""
def point_set(n):
for x in range(n):
for y in range(n):
yield (x, y)
julia_set_colors = (pipeline
| 'add points' >> beam.Create(point_set(n))
| beam.Map(
get_julia_set_point_color, c, n, max_iterations))
return julia_set_colors
def generate_julia_set_visualization(data, n, max_iterations):
"""Generate the pixel matrix for rendering the julia set as an image."""
import numpy as np # pylint: disable=wrong-import-order, wrong-import-position
colors = []
for r in range(0, 256, 16):
for g in range(0, 256, 16):
for b in range(0, 256, 16):
colors.append((r, g, b))
xy = np.zeros((n, n, 3), dtype=np.uint8)
for x, y, iteration in data:
xy[x, y] = colors[iteration * len(colors) / max_iterations]
return xy
def save_julia_set_visualization(out_file, image_array):
"""Save the fractal image of our julia set as a png."""
from matplotlib import pyplot as plt # pylint: disable=wrong-import-order, wrong-import-position
plt.imsave(out_file, image_array, format='png')
def run(argv=None): # pylint: disable=missing-docstring
parser = argparse.ArgumentParser()
parser.add_argument('--grid_size',
dest='grid_size',
default=1000,
help='Size of the NxN matrix')
parser.add_argument(
'--coordinate_output',
dest='coordinate_output',
required=True,
help='Output file to write the color coordinates of the image to.')
parser.add_argument('--image_output',
dest='image_output',
default=None,
help='Output file to write the resulting image to.')
known_args, pipeline_args = parser.parse_known_args(argv)
p = beam.Pipeline(argv=pipeline_args)
n = int(known_args.grid_size)
coordinates = generate_julia_set_colors(p, complex(-.62772, .42193), n, 100)
# Group each coordinate triplet by its x value, then write the coordinates to
# the output file with an x-coordinate grouping per line.
# pylint: disable=expression-not-assigned
(coordinates
| 'x coord key' >> beam.Map(lambda (x, y, i): (x, (x, y, i)))
| 'x coord' >> beam.GroupByKey()
| 'format' >> beam.Map(
lambda (k, coords): ' '.join('(%s, %s, %s)' % coord for coord in coords))
| WriteToText(known_args.coordinate_output))
# pylint: enable=expression-not-assigned
return p.run().wait_until_finish()
# Optionally render the image and save it to a file.
# TODO(silviuc): Add this functionality.
# if p.options.image_output is not None:
# julia_set_image = generate_julia_set_visualization(
# file_with_coordinates, n, 100)
# save_julia_set_visualization(p.options.image_output, julia_set_image)
| apache-2.0 |
702nADOS/sumo | tools/visualization/plot_tripinfo_distributions.py | 1 | 4081 | #!/usr/bin/env python
"""
@file plot_tripinfo_distributions.py
@author Daniel Krajzewicz
@author Laura Bieker
@date 2013-11-11
@version $Id: plot_tripinfo_distributions.py 22608 2017-01-17 06:28:54Z behrisch $
This script plots measures from the tripinfo output, classified into bins
matplotlib (http://matplotlib.org/) has to be installed for this purpose
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2013-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import subprocess
import sys
import random
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import sumolib
from sumolib.visualization import helpers
import matplotlib.pyplot as plt
def main(args=None):
"""The main function; parses options and plots"""
# ---------- build and read options ----------
from optparse import OptionParser
optParser = OptionParser()
optParser.add_option("-i", "--tripinfos-inputs", dest="tripinfos", metavar="FILE",
help="Defines the tripinfo-output files to use as input")
optParser.add_option("-v", "--verbose", dest="verbose", action="store_true",
default=False, help="If set, the script says what it's doing")
optParser.add_option("-m", "--measure", dest="measure",
default="duration", help="Define which measure to plot")
optParser.add_option("--bins", dest="bins",
type="int", default=20, help="Define the bin number")
optParser.add_option("--norm", dest="norm",
type="float", default=1., help="Read values will be devided by this number")
optParser.add_option("--minV", dest="minV",
type="float", default=None, help="Define the minimum value boundary")
optParser.add_option("--maxV", dest="maxV",
type="float", default=None, help="Define the maximum value boundary")
# standard plot options
helpers.addInteractionOptions(optParser)
helpers.addPlotOptions(optParser)
# parse
options, remaining_args = optParser.parse_args(args=args)
if options.tripinfos == None:
print("Error: at least one tripinfo file must be given")
sys.exit(1)
minV = options.minV
maxV = options.maxV
files = options.tripinfos.split(",")
values = {}
for f in files:
if options.verbose:
print("Reading '%s'..." % f)
nums = sumolib.output.parse_sax__asList(
f, "tripinfo", [options.measure])
fvp = sumolib.output.toList(nums, options.measure)
fv = [x / options.norm for x in fvp]
sumolib.output.prune(fv, options.minV, options.maxV)
values[f] = fv
if minV == None:
minV = fv[0]
maxV = fv[0]
minV = min(minV, min(fv))
maxV = max(maxV, max(fv))
hists = {}
binWidth = (maxV - minV) / float(options.bins)
for f in files:
h = [0] * options.bins
for v in values[f]:
i = min(int((v - minV) / binWidth), options.bins - 1)
h[i] = h[i] + 1
hists[f] = h
width = binWidth / float(len(files)) * .8
offset = binWidth * .1
center = []
for j in range(0, options.bins):
center.append(binWidth * j + offset)
fig, ax = helpers.openFigure(options)
for i, f in enumerate(files):
c = helpers.getColor(options, i, len(files))
l = helpers.getLabel(f, i, options)
plt.bar(center, hists[f], width=width, label=l, color=c)
for j in range(0, options.bins):
center[j] = center[j] + width
helpers.closeFigure(fig, ax, options)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| gpl-3.0 |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/event_handling/poly_editor.py | 3 | 5289 | """
This is an example to show how to build cross-GUI applications using
matplotlib event handling to interact with objects on the canvas
"""
from matplotlib.artist import Artist
from matplotlib.patches import Polygon, CirclePolygon
from numpy import sqrt, nonzero, equal, array, asarray, dot, amin, cos, sin
from matplotlib.mlab import dist_point_to_segment
class PolygonInteractor:
"""
An polygon editor.
Key-bindings
't' toggle vertex markers on and off. When vertex markers are on,
you can move them, delete them
'd' delete the vertex under point
'i' insert a vertex at point. You must be within epsilon of the
line connecting two existing vertices
"""
showverts = True
epsilon = 5 # max pixel distance to count as a vertex hit
def __init__(self, ax, poly):
if poly.figure is None:
raise RuntimeError('You must first add the polygon to a figure or canvas before defining the interactor')
self.ax = ax
canvas = poly.figure.canvas
self.poly = poly
x, y = zip(*self.poly.xy)
self.line = Line2D(x,y,marker='o', markerfacecolor='r', animated=True)
self.ax.add_line(self.line)
#self._update_line(poly)
cid = self.poly.add_callback(self.poly_changed)
self._ind = None # the active vert
canvas.mpl_connect('draw_event', self.draw_callback)
canvas.mpl_connect('button_press_event', self.button_press_callback)
canvas.mpl_connect('key_press_event', self.key_press_callback)
canvas.mpl_connect('button_release_event', self.button_release_callback)
canvas.mpl_connect('motion_notify_event', self.motion_notify_callback)
self.canvas = canvas
def draw_callback(self, event):
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
def poly_changed(self, poly):
'this method is called whenever the polygon object is called'
# only copy the artist props to the line (except visibility)
vis = self.line.get_visible()
Artist.update_from(self.line, poly)
self.line.set_visible(vis) # don't use the poly visibility state
def get_ind_under_point(self, event):
'get the index of the vertex under point if within epsilon tolerance'
# display coords
xy = asarray(self.poly.xy)
xyt = self.poly.get_transform().transform(xy)
xt, yt = xyt[:, 0], xyt[:, 1]
d = sqrt((xt-event.x)**2 + (yt-event.y)**2)
indseq = nonzero(equal(d, amin(d)))[0]
ind = indseq[0]
if d[ind]>=self.epsilon:
ind = None
return ind
def button_press_callback(self, event):
'whenever a mouse button is pressed'
if not self.showverts: return
if event.inaxes==None: return
if event.button != 1: return
self._ind = self.get_ind_under_point(event)
def button_release_callback(self, event):
'whenever a mouse button is released'
if not self.showverts: return
if event.button != 1: return
self._ind = None
def key_press_callback(self, event):
'whenever a key is pressed'
if not event.inaxes: return
if event.key=='t':
self.showverts = not self.showverts
self.line.set_visible(self.showverts)
if not self.showverts: self._ind = None
elif event.key=='d':
ind = self.get_ind_under_point(event)
if ind is not None:
self.poly.xy = [tup for i,tup in enumerate(self.poly.xy) if i!=ind]
self.line.set_data(zip(*self.poly.xy))
elif event.key=='i':
xys = self.poly.get_transform().transform(self.poly.xy)
p = event.x, event.y # display coords
for i in range(len(xys)-1):
s0 = xys[i]
s1 = xys[i+1]
d = dist_point_to_segment(p, s0, s1)
if d<=self.epsilon:
self.poly.xy = array(
list(self.poly.xy[:i]) +
[(event.xdata, event.ydata)] +
list(self.poly.xy[i:]))
self.line.set_data(zip(*self.poly.xy))
break
self.canvas.draw()
def motion_notify_callback(self, event):
'on mouse movement'
if not self.showverts: return
if self._ind is None: return
if event.inaxes is None: return
if event.button != 1: return
x,y = event.xdata, event.ydata
self.poly.xy[self._ind] = x,y
self.line.set_data(zip(*self.poly.xy))
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
from pylab import *
fig = figure()
theta = arange(0, 2*pi, 0.1)
r = 1.5
xs = r*cos(theta)
ys = r*sin(theta)
poly = Polygon(zip(xs, ys,), animated=True)
ax = subplot(111)
ax.add_patch(poly)
p = PolygonInteractor( ax, poly)
#ax.add_line(p.line)
ax.set_title('Click and drag a point to move it')
ax.set_xlim((-2,2))
ax.set_ylim((-2,2))
show()
| gpl-2.0 |
ctools/ctools | examples/show_pull_histogram.py | 1 | 4671 | #! /usr/bin/env python
# ==========================================================================
# Shows the pull histogram
#
# Copyright (C) 2011-2021 Juergen Knoedlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
import sys
try:
import matplotlib.pyplot as plt
plt.figure()
plt.close()
except (ImportError, RuntimeError):
print('This script needs the "matplotlib" module')
sys.exit()
try:
import numpy as np
except ImportError:
print('This script needs the "numpy" module')
sys.exit()
import gammalib
import cscripts
# ====================== #
# Read pull distribution #
# ====================== #
def read_pull_distribution(filename, parname):
"""
Read pull distribution
Parameters
----------
filename : str
Name of FITS or CSV file
parname : str
Parameter name
Returns
-------
pulls : list of floats
List with pull distribution
"""
# Read pull distribution dependent on file type
fname = gammalib.GFilename(filename)
if fname.is_fits():
pulls = read_pull_distribution_fits(filename, parname)
else:
pulls = cscripts.ioutils.read_pull_values(filename, parname)
# Return pulls
return pulls
# ===================================== #
# Read pull distribution from FITS file #
# ===================================== #
def read_pull_distribution_fits(filename, parname):
"""
Read pull distribution from FITS file
Parameters
----------
filename : str
Name of FITS file
parname : str
Parameter name
Returns
-------
pulls : list of floats
List with pull distribution
"""
# Open FITS file
fits = gammalib.GFits(filename)
# Get pull distribution table
table = fits.table('PULL_DISTRIBUTION')
# Get relevant column
column = table[parname]
# Initialise vector
pulls = []
# Fill vectors
nrows = table.nrows()
for row in range(nrows):
pulls.append(column[row])
# Return
return pulls
# =================== #
# Plot pull histogram #
# =================== #
def plot_pull_histogram(filename, parname, nbins, plotfile):
"""
Plot pull histogram
Parameters
----------
filename : str
Pull filename
parname : str
Parameter name
nbins : int
Number of hisogram bins
plotfile : str
Plot filename
"""
# Read pull distribution from file
pulls = read_pull_distribution(filename, parname)
# Create Numpy array
values = np.array(pulls)
# Create histogram
_, bins, _ = plt.hist(values, nbins, range=[-4.0,4.0],
normed=True, facecolor='green')
# Create expected distribution
y = np.exp(-0.5*bins*bins) / np.sqrt(2.0*np.pi)
plt.plot(bins, y, 'r-', linewidth=2)
# Set plot
plt.xlabel('Pull ('+parname+')')
plt.ylabel('Arbitrary units')
plt.title(parname)
plt.grid(True)
# Show figure
if len(plotfile) > 0:
plt.savefig(plotfile)
else:
plt.show()
# Return
return
# =================== #
# Show pull histogram #
# =================== #
def show_pull_histogram():
"""
Show pull histogram
"""
# Set usage string
usage = 'show_pull_histogram.py [-n bins] [-p plotfile] file parameter'
# Set default options
options = [{'option': '-n', 'value': '50'},
{'option': '-p', 'value': ''}]
# Get arguments and options from command line arguments
args, options = cscripts.ioutils.get_args_options(options, usage)
# Extract script parameters from options
nbins = int(options[0]['value'])
plotfile = options[1]['value']
# Plot pull histogram
plot_pull_histogram(args[0], args[1], nbins, plotfile)
# Return
return
# ======================== #
# Main routine entry point #
# ======================== #
if __name__ == '__main__':
# Show pull histogram
show_pull_histogram()
| gpl-3.0 |
agiovann/Constrained_NMF | caiman/source_extraction/volpy/utils.py | 1 | 9542 | #!/usr/bin/env python
"""
Created on Mon Mar 23 16:45:00 2020
This file create functions used for demo_pipeline_voltage_imaging.py
@author: caichangjia
"""
#%%
from IPython import get_ipython
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
import numpy as np
import os
import tensorflow as tf
import caiman as cm
from caiman.external.cell_magic_wand import cell_magic_wand_single_point
from caiman.paths import caiman_datadir
def quick_annotation(img, min_radius, max_radius, roughness=2):
""" Quick annotation method in VolPy using cell magic wand plugin
Args:
img: 2-D array
img as the background for selection
min_radius: float
minimum radius of the selection
max_radius: float
maximum raidus of the selection
roughness: int
roughness of the selection surface
Return:
ROIs: 3-D array
region of interests
(# of components * # of pixels in x dim * # of pixels in y dim)
"""
try:
if __IPYTHON__:
get_ipython().run_line_magic('matplotlib', 'auto')
except NameError:
pass
def tellme(s):
print(s)
plt.title(s, fontsize=16)
plt.draw()
keep_select=True
ROIs = []
while keep_select:
# Plot img
plt.clf()
plt.imshow(img, cmap='gray', vmax=np.percentile(img, 98))
if len(ROIs) == 0:
pass
elif len(ROIs) == 1:
plt.imshow(ROIs[0], alpha=0.3, cmap='Oranges')
else:
plt.imshow(np.array(ROIs).sum(axis=0), alpha=0.3, cmap='Oranges')
# Plot point and ROI
tellme('Click center of neuron')
center = plt.ginput(1)[0]
plt.plot(center[0], center[1], 'r+')
ROI = cell_magic_wand_single_point(img, (center[1], center[0]),
min_radius=min_radius, max_radius=max_radius,
roughness=roughness, zoom_factor=1)[0]
plt.imshow(ROI, alpha=0.3, cmap='Reds')
# Select or not
tellme('Select? Key click for yes, mouse click for no')
select = plt.waitforbuttonpress()
if select:
ROIs.append(ROI)
tellme('You have selected a neuron. \n Keep selecting? Key click for yes, mouse click for no')
else:
tellme('You did not select a neuron \n Keep selecting? Key click for yes, mouse click for no')
keep_select = plt.waitforbuttonpress()
plt.close()
ROIs = np.array(ROIs)
try:
if __IPYTHON__:
get_ipython().run_line_magic('matplotlib', 'inline')
except NameError:
pass
return ROIs
def mrcnn_inference(img, weights_path, display_result=True):
""" Mask R-CNN inference in VolPy
Args:
img: 2-D array
summary images for detection
weights_path: str
path for Mask R-CNN weight
display_result: boolean
if True, the function will plot the result of inference
Return:
ROIs: 3-D array
region of interests
(# of components * # of pixels in x dim * # of pixels in y dim)
"""
from caiman.source_extraction.volpy.mrcnn import visualize, neurons
import caiman.source_extraction.volpy.mrcnn.model as modellib
config = neurons.NeuronsConfig()
class InferenceConfig(config.__class__):
# Run detection on one img at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0.7
IMAGE_RESIZE_MODE = "pad64"
IMAGE_MAX_DIM = 512
RPN_NMS_THRESHOLD = 0.7
POST_NMS_ROIS_INFERENCE = 1000
config = InferenceConfig()
config.display()
model_dir = os.path.join(caiman_datadir(), 'model')
DEVICE = "/cpu:0" # /cpu:0 or /gpu:0
with tf.device(DEVICE):
model = modellib.MaskRCNN(mode="inference", model_dir=model_dir,
config=config)
model.load_weights(weights_path, by_name=True)
results = model.detect([img], verbose=1)
r = results[0]
ROIs = r['masks'].transpose([2, 0, 1])
if display_result:
_, ax = plt.subplots(1,1, figsize=(16,16))
visualize.display_instances(img, r['rois'], r['masks'], r['class_ids'],
['BG', 'neurons'], r['scores'], ax=ax,
title="Predictions")
return ROIs
def reconstructed_movie(estimates, fnames, idx, scope, flip_signal):
""" Create reconstructed movie in VolPy. The movie has three panels:
motion corrected movie on the left panel, movie removed from the baseline
on the mid panel and reconstructed movie on the right panel.
Args:
estimates: dict
estimates dictionary contain results of VolPy
fnames: list
motion corrected movie in F-order memory mapping format
idx: list
index of selected neurons
scope: list
scope of number of frames in reconstructed movie
flip_signal: boolean
if True the signal will be flipped (for voltron)
Return:
mv_all: 3-D array
motion corrected movie, movie removed from baseline, reconstructed movie
concatenated into one matrix
"""
# motion corrected movie and movie removed from baseline
mv = cm.load(fnames, fr=400)[scope[0]:scope[1]]
dims = (mv.shape[1], mv.shape[2])
mv_bl = mv.computeDFF(secsWindow=0.1)[0]
mv = (mv-mv.min())/(mv.max()-mv.min())
if flip_signal:
mv_bl = -mv_bl
mv_bl[mv_bl<np.percentile(mv_bl,3)] = np.percentile(mv_bl,3)
mv_bl[mv_bl>np.percentile(mv_bl,98)] = np.percentile(mv_bl,98)
mv_bl = (mv_bl - mv_bl.min())/(mv_bl.max()-mv_bl.min())
# reconstructed movie
estimates['weights'][estimates['weights']<0] = 0
A = estimates['weights'][idx].transpose([1,2,0]).reshape((-1,len(idx)))
C = estimates['t_rec'][idx,scope[0]:scope[1]]
mv_rec = np.dot(A, C).reshape((dims[0],dims[1],scope[1]-scope[0])).transpose((2,0,1))
mv_rec = cm.movie(mv_rec,fr=400)
mv_rec = (mv_rec - mv_rec.min())/(mv_rec.max()-mv_rec.min())
mv_all = cm.concatenate((mv,mv_bl,mv_rec),axis=2)
return mv_all
def view_components(estimates, img, idx):
""" View spatial and temporal components interactively
Args:
estimates: dict
estimates dictionary contain results of VolPy
img: 2-D array
summary images for detection
idx: list
index of selected neurons
"""
n = len(idx)
fig = plt.figure(figsize=(10, 10))
axcomp = plt.axes([0.05, 0.05, 0.9, 0.03])
ax1 = plt.axes([0.05, 0.55, 0.4, 0.4])
ax3 = plt.axes([0.55, 0.55, 0.4, 0.4])
ax2 = plt.axes([0.05, 0.1, 0.9, 0.4])
s_comp = Slider(axcomp, 'Component', 0, n, valinit=0)
vmax = np.percentile(img, 98)
def arrow_key_image_control(event):
if event.key == 'left':
new_val = np.round(s_comp.val - 1)
if new_val < 0:
new_val = 0
s_comp.set_val(new_val)
elif event.key == 'right':
new_val = np.round(s_comp.val + 1)
if new_val > n :
new_val = n
s_comp.set_val(new_val)
def update(val):
i = np.int(np.round(s_comp.val))
print(f'Component:{i}')
if i < n:
ax1.cla()
imgtmp = estimates['weights'][idx][i]
ax1.imshow(imgtmp, interpolation='None', cmap=plt.cm.gray, vmax=np.max(imgtmp)*0.5, vmin=0)
ax1.set_title(f'Spatial component {i+1}')
ax1.axis('off')
ax2.cla()
ax2.plot(estimates['t'][idx][i], alpha=0.8)
ax2.plot(estimates['t_sub'][idx][i])
ax2.plot(estimates['t_rec'][idx][i], alpha = 0.4, color='red')
ax2.plot(estimates['spikes'][idx][i],
1.05 * np.max(estimates['t'][idx][i]) * np.ones(estimates['spikes'][idx][i].shape),
color='r', marker='.', fillstyle='none', linestyle='none')
ax2.set_title(f'Signal and spike times {i+1}')
ax2.legend(labels=['t', 't_sub', 't_rec', 'spikes'])
ax2.text(0.1, 0.1, f'snr:{round(estimates["snr"][idx][i],2)}', horizontalalignment='center', verticalalignment='center', transform = ax2.transAxes)
ax2.text(0.1, 0.07, f'num_spikes: {len(estimates["spikes"][idx][i])}', horizontalalignment='center', verticalalignment='center', transform = ax2.transAxes)
ax2.text(0.1, 0.04, f'locality_test: {estimates["locality"][idx][i]}', horizontalalignment='center', verticalalignment='center', transform = ax2.transAxes)
ax3.cla()
ax3.imshow(img, interpolation='None', cmap=plt.cm.gray, vmax=vmax)
imgtmp2 = imgtmp.copy()
imgtmp2[imgtmp2 == 0] = np.nan
ax3.imshow(imgtmp2, interpolation='None',
alpha=0.5, cmap=plt.cm.hot)
ax3.axis('off')
s_comp.on_changed(update)
s_comp.set_val(0)
fig.canvas.mpl_connect('key_release_event', arrow_key_image_control)
plt.show() | gpl-2.0 |
marqh/iris | lib/iris/tests/unit/plot/test_pcolor.py | 10 | 3150 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.plot.pcolor` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.pcolor(self.cube, coords=('bar', 'str_coord'))
self.assertBoundsTickLabels('yaxis')
def test_xaxis_labels(self):
iplt.pcolor(self.cube, coords=('str_coord', 'bar'))
self.assertBoundsTickLabels('xaxis')
def test_xaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(0, 3)
iplt.pcolor(self.cube, coords=('str_coord', 'bar'), axes=ax)
plt.close(fig)
self.assertPointsTickLabels('xaxis', ax)
def test_yaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_ylim(0, 3)
iplt.pcolor(self.cube, axes=ax, coords=('bar', 'str_coord'))
plt.close(fig)
self.assertPointsTickLabels('yaxis', ax)
def test_geoaxes_exception(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
self.assertRaises(TypeError, iplt.pcolor,
self.lat_lon_cube, axes=ax)
plt.close(fig)
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=True)
coord = self.cube.coord('foo')
self.foo = coord.contiguous_bounds()
self.foo_index = np.arange(coord.points.size + 1)
coord = self.cube.coord('bar')
self.bar = coord.contiguous_bounds()
self.bar_index = np.arange(coord.points.size + 1)
self.data = self.cube.data
self.dataT = self.data.T
self.mpl_patch = self.patch('matplotlib.pyplot.pcolor')
self.draw_func = iplt.pcolor
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
bhargavasana/synthpop | synthpop/categorizer.py | 2 | 5677 | import pandas as pd
import itertools
# TODO DOCSTRINGS!!
def categorize(df, eval_d, index_cols=None):
"""
Given a DataFrame, a definition for categories and a list of index columns,
returns a new DataFrame with he new categories.
Parameters
----------
df: pandas.DataFrame
DataFrame to categorize
eval_d : dictionary
Category definitions. This is a mapping from tuples (category name, category value) to
an expression that can be evaluated on the `df`.
(e.g. {('workers', 'two or more'): 'B08202_004E + B08202_005E', ... })
index_cols : array
A list of column names of `df` that should be used as an index.
Returns
-------
cat_df : pandas.DataFrame
The `df` DataFrame but with a pandas.MultiIndex as the index defined by `index_cols`,
and a pandas.MultiIndex defining the columns as defined by `eval_d`
"""
cat_df = pd.DataFrame(index=df.index)
for index, expr in eval_d.iteritems():
cat_df[index] = df.eval(expr)
if index_cols is not None:
cat_df[index_cols] = df[index_cols]
cat_df = cat_df.set_index(index_cols)
cat_df.columns = pd.MultiIndex.from_tuples(cat_df.columns,
names=['cat_name', 'cat_value'])
cat_df = cat_df.sort_index(axis=1)
return cat_df
def sum_accross_category(df, subtract_mean=True):
"""
This is a convenience function to sum the categorical values for each
category - the mean across each category is then subtracted so all the
cells in the table should be close to zero. The reason why it's not
exactly zero is because of rounding errors in the scaling of any tract
variables down to block group variables
"""
df = df.stack(level=1).fillna(0).groupby(level=0).sum()
if subtract_mean:
df = df.sub(df.mean(axis=1), axis="rows")
return df
def category_combinations(index):
"""
THis method converts a hierarchical multindex of category names and
category values and converts to the cross-product of all possible
category combinations.
"""
d = {}
for cat_name, cat_value in index:
d.setdefault(cat_name, [])
d[cat_name].append(cat_value)
for cat_name in d.keys():
if len(d[cat_name]) == 1:
del d[cat_name]
df = pd.DataFrame(list(itertools.product(*d.values())))
df.columns = cols = d.keys()
df.index.name = "cat_id"
df = df.reset_index().set_index(cols)
return df
def joint_distribution(sample_df, category_df, mapping_functions, map_all=True):
# set counts to zero
category_df["frequency"] = 0
category_names = category_df.index.names
# by default apply all mapping functions irrespective of whether or not the categories are being controlled for
if map_all:
for name in category_names:
assert name in mapping_functions, "Every category needs to have a " \
"mapping function with the same " \
"name to define that category for " \
"the pums sample records"
for name in mapping_functions.keys():
sample_df[name] = sample_df.apply(mapping_functions[name], axis=1)
else:
for name in category_names:
assert name in mapping_functions, "Every category needs to have a " \
"mapping function with the same " \
"name to define that category for " \
"the pums sample records"
sample_df[name] = sample_df.apply(mapping_functions[name], axis=1)
category_df["frequency"] = sample_df.groupby(category_names).size()
category_df["frequency"] = category_df["frequency"].fillna(0)
# do the merge to add the category id
sample_df = pd.merge(sample_df, category_df[["cat_id"]],
left_on=category_names, right_index=True)
return sample_df, category_df
def _frequency_table(sample_df, category_ids):
"""
Take the result that comes out of the method above and turn it in to the
frequencytable format used by the ipu
"""
df = sample_df.groupby(['hh_id', 'cat_id']).size().\
unstack().fillna(0)
# need to manually add in case we missed a whole cat_id in the sample
for cat_id in category_ids:
if cat_id not in df.columns:
df[cat_id] = 0
assert len(df.columns) == len(category_ids)
assert df.sum().sum() == len(sample_df)
return df
def frequency_tables(persons_sample_df, households_sample_df,
person_cat_ids, household_cat_ids):
households_sample_df.index.name = "hh_id"
households_sample_df = households_sample_df.reset_index().\
set_index("serialno")
h_freq_table = _frequency_table(households_sample_df,
household_cat_ids)
persons_sample_df = pd.merge(persons_sample_df,
households_sample_df[["hh_id"]],
left_on=["serialno"], right_index=True,
how="left")
p_freq_table = _frequency_table(persons_sample_df,
person_cat_ids)
p_freq_table = p_freq_table.reindex(h_freq_table.index).fillna(0)
assert len(h_freq_table) == len(p_freq_table)
h_freq_table = h_freq_table.sort_index(axis=1)
p_freq_table = p_freq_table.sort_index(axis=1)
return h_freq_table, p_freq_table
| bsd-3-clause |
bikong2/scikit-learn | examples/model_selection/plot_learning_curve.py | 250 | 4171 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
lucidfrontier45/scikit-learn | benchmarks/bench_glm.py | 6 | 1430 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print 'Iteration %s of %s' % (i, n_iter)
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.xlabel('Dimesions')
pl.ylabel('Time (in seconds)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'])
pl.axis('tight')
pl.show()
| bsd-3-clause |
mikeengland/fireant | fireant/tests/queries/test_dimension_choices.py | 2 | 13142 | from unittest import TestCase
from unittest.mock import (
ANY,
MagicMock,
Mock,
patch,
)
import pandas as pd
from fireant import DataSet, DataType, Field
from fireant.tests.dataset.matchers import (
FieldMatcher,
PypikaQueryMatcher,
)
from fireant.tests.dataset.mocks import (
mock_dataset,
mock_hint_dataset,
politicians_table,
test_database,
)
# noinspection SqlDialectInspection,SqlNoDataSourceInspection
class DimensionsChoicesQueryBuilderTests(TestCase):
maxDiff = None
def test_query_choices_for_field(self):
query = mock_dataset.fields.political_party.choices.sql[0]
self.assertEqual(
"SELECT "
'"political_party" "$political_party" '
'FROM "politics"."politician" '
'GROUP BY "$political_party"',
str(query),
)
def test_query_choices_for_field_with_join(self):
query = mock_dataset.fields["district-name"].choices.sql[0]
self.assertEqual(
"SELECT "
'"district"."district_name" "$district-name" '
'FROM "politics"."politician" '
'FULL OUTER JOIN "locations"."district" '
'ON "politician"."district_id"="district"."id" '
'GROUP BY "$district-name"',
str(query),
)
def test_filter_choices(self):
query = (
mock_dataset.fields["candidate-name"]
.choices.filter(mock_dataset.fields.political_party.isin(["d", "r"]))
.sql[0]
)
self.assertEqual(
"SELECT "
'"candidate_name" "$candidate-name" '
'FROM "politics"."politician" '
"WHERE \"political_party\" IN ('d','r') "
'GROUP BY "$candidate-name"',
str(query),
)
# noinspection SqlDialectInspection,SqlNoDataSourceInspection
class DimensionsChoicesQueryBuilderWithHintTableTests(TestCase):
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
def test_query_choices_for_dataset_with_hint_table(self, mock_fetch_data: Mock):
mock_hint_dataset.fields.political_party.choices.fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"political_party" "$political_party" '
'FROM "politics"."hints" '
'WHERE NOT "political_party" IS NULL '
'GROUP BY "$political_party" '
'ORDER BY "$political_party"'
)
],
FieldMatcher(mock_hint_dataset.fields.political_party),
)
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
@patch.object(
mock_hint_dataset.database,
"get_column_definitions",
return_value=[
["candidate_name", "varchar(128)"],
["candidate_name_display", "varchar(128)"],
],
)
def test_query_choices_for_field_with_display_hint_table(
self, mock_get_column_definitions: Mock, mock_fetch_data: Mock
):
mock_hint_dataset.fields.candidate_name.choices.fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"candidate_name" "$candidate_name",'
'"candidate_name_display" '
'"$candidate_name_display" '
'FROM "politics"."hints" '
'WHERE NOT "candidate_name" IS NULL '
'GROUP BY "$candidate_name",'
'"$candidate_name_display" '
'ORDER BY "$candidate_name"'
)
],
FieldMatcher(mock_hint_dataset.fields.candidate_name),
)
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
@patch.object(
mock_hint_dataset.database,
"get_column_definitions",
return_value=[
["political_party", "varchar(128)"],
["state_id", "varchar(128)"],
],
)
def test_query_choices_for_filters_from_joins(self, mock_get_column_definitions: Mock, mock_fetch_data: Mock):
mock_hint_dataset.fields.political_party.choices.filter(
mock_hint_dataset.fields["district-name"].isin(["Manhattan"])
).filter(mock_hint_dataset.fields["state"].isin(["Texas"])).fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"hints"."political_party" "$political_party" '
'FROM "politics"."hints" '
'JOIN "locations"."state" ON '
'"hints"."state_id"="state"."id" '
'WHERE "state"."state_name" IN (\'Texas\') '
'AND NOT "hints"."political_party" IS NULL '
'GROUP BY "$political_party" '
'ORDER BY "$political_party"'
)
],
FieldMatcher(mock_hint_dataset.fields.political_party),
)
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
@patch.object(
mock_hint_dataset.database,
"get_column_definitions",
return_value=[
["political_party", "varchar(128)"],
["candidate_name", "varchar(128)"],
],
)
def test_query_choices_for_filters_from_base(self, mock_get_column_definitions: Mock, mock_fetch_data: Mock):
mock_hint_dataset.fields.political_party.choices.filter(
mock_hint_dataset.fields.candidate_name.isin(["Bill Clinton"])
).filter(mock_hint_dataset.fields["election-year"].isin([1992])).fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"political_party" "$political_party" '
'FROM "politics"."hints" '
"WHERE \"candidate_name\" IN ('Bill Clinton') "
'AND NOT "political_party" IS NULL '
'GROUP BY "$political_party" '
'ORDER BY "$political_party"'
)
],
FieldMatcher(mock_hint_dataset.fields.political_party),
)
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
@patch.object(
mock_hint_dataset.database,
"get_column_definitions",
return_value=[["political_party", "varchar(128)"]],
)
def test_query_choices_for_case_filter(self, mock_get_column_definitions: Mock, mock_fetch_data: Mock):
mock_hint_dataset.fields.political_party.choices.filter(
mock_hint_dataset.fields.political_party_case.isin(["Democrat", "Bill Clinton"])
).fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"political_party" "$political_party" '
'FROM "politics"."hints" '
'WHERE NOT "political_party" IS NULL '
'GROUP BY "$political_party" '
'ORDER BY "$political_party"'
)
],
FieldMatcher(mock_hint_dataset.fields.political_party),
)
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
@patch.object(
mock_hint_dataset.database,
"get_column_definitions",
return_value=[["district_name", "varchar(128)"]],
)
def test_query_choices_for_join_dimension(self, mock_get_column_definitions: Mock, mock_fetch_data: Mock):
mock_hint_dataset.fields["district-name"].choices.fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"district_name" "$district-name" '
'FROM "politics"."hints" '
'WHERE NOT "district_name" IS NULL '
'GROUP BY "$district-name" '
'ORDER BY "$district-name"'
)
],
FieldMatcher(mock_hint_dataset.fields["district-name"]),
)
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
@patch.object(
mock_hint_dataset.database,
"get_column_definitions",
return_value=[
["district_name", "varchar(128)"],
["candidate_name", "varchar(128)"],
],
)
def test_query_choices_for_join_dimension_with_filter_from_base(
self, mock_get_column_definitions: Mock, mock_fetch_data: Mock
):
mock_hint_dataset.fields["district-name"].choices.filter(
mock_hint_dataset.fields.candidate_name.isin(["Bill Clinton"])
).fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"district_name" "$district-name" '
'FROM "politics"."hints" '
"WHERE \"candidate_name\" IN ('Bill Clinton') "
'AND NOT "district_name" IS NULL '
'GROUP BY "$district-name" '
'ORDER BY "$district-name"'
)
],
FieldMatcher(mock_hint_dataset.fields["district-name"]),
)
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
@patch.object(
mock_hint_dataset.database,
"get_column_definitions",
return_value=[
["district_name", "varchar(128)"],
["district_id", "varchar(128)"],
],
)
def test_query_choices_for_join_dimension_with_filter_from_join(
self, mock_get_column_definitions: Mock, mock_fetch_data: Mock
):
mock_hint_dataset.fields["district-name"].choices.filter(
mock_hint_dataset.fields["district-name"].isin(["Manhattan"])
).fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"hints"."district_name" "$district-name" '
'FROM "politics"."hints" '
'FULL OUTER JOIN "locations"."district" ON '
'"hints"."district_id"="district"."id" '
'WHERE "district"."district_name" IN ('
"'Manhattan') "
'AND NOT "hints"."district_name" IS NULL '
'GROUP BY "$district-name" '
'ORDER BY "$district-name"'
)
],
FieldMatcher(mock_hint_dataset.fields["district-name"]),
)
# noinspection SqlDialectInspection,SqlNoDataSourceInspection
@patch("fireant.queries.builder.dimension_choices_query_builder.fetch_data", return_value=(100, MagicMock()))
class DimensionsChoicesFetchTests(TestCase):
def test_query_choices_for_field(self, mock_fetch_data: Mock):
mock_dataset.fields.political_party.choices.fetch()
mock_fetch_data.assert_called_once_with(
ANY,
[
PypikaQueryMatcher(
"SELECT "
'"political_party" "$political_party" '
'FROM "politics"."politician" '
'WHERE NOT "political_party" IS NULL '
'GROUP BY "$political_party" '
'ORDER BY "$political_party"'
)
],
FieldMatcher(mock_dataset.fields.political_party),
)
def test_envelopes_responses_if_return_additional_metadata_True(self, mock_fetch_data):
mock_dataset = DataSet(
table=politicians_table,
database=test_database,
return_additional_metadata=True,
fields=[
Field(
"political_party",
label="Party",
definition=politicians_table.political_party,
data_type=DataType.text,
hyperlink_template="http://example.com/{political_party}",
)
],
)
df = pd.DataFrame({'political_party': ['a', 'b', 'c']}).set_index('political_party')
mock_fetch_data.return_value = 100, df
result = mock_dataset.fields.political_party.choices.fetch()
self.assertEqual(dict(max_rows_returned=100), result['metadata'])
self.assertTrue(
pd.Series(['a', 'b', 'c'], index=['a', 'b', 'c'], name='political_party').equals(result['data'])
)
| apache-2.0 |
dsm054/pandas | pandas/tests/io/test_parquet.py | 1 | 20019 | """ test parquet compat """
import os
import pytest
import datetime
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.compat import PY3, is_platform_windows, is_platform_mac
from pandas.io.parquet import (to_parquet, read_parquet, get_engine,
PyArrowImpl, FastParquetImpl)
from pandas.util import testing as tm
try:
import pyarrow # noqa
_HAVE_PYARROW = True
except ImportError:
_HAVE_PYARROW = False
try:
import fastparquet # noqa
_HAVE_FASTPARQUET = True
except ImportError:
_HAVE_FASTPARQUET = False
# setup engines & skips
@pytest.fixture(params=[
pytest.param('fastparquet',
marks=pytest.mark.skipif(not _HAVE_FASTPARQUET,
reason='fastparquet is '
'not installed')),
pytest.param('pyarrow',
marks=pytest.mark.skipif(not _HAVE_PYARROW,
reason='pyarrow is '
'not installed'))])
def engine(request):
return request.param
@pytest.fixture
def pa():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
if LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0'):
pytest.skip("pyarrow is < 0.7.0")
return 'pyarrow'
@pytest.fixture
def fp():
if not _HAVE_FASTPARQUET:
pytest.skip("fastparquet is not installed")
return 'fastparquet'
@pytest.fixture
def fp_lt_014():
if not _HAVE_FASTPARQUET:
pytest.skip("fastparquet is not installed")
if LooseVersion(fastparquet.__version__) >= LooseVersion('0.1.4'):
pytest.skip("fastparquet is >= 0.1.4")
return 'fastparquet'
@pytest.fixture
def df_compat():
return pd.DataFrame({'A': [1, 2, 3], 'B': 'foo'})
@pytest.fixture
def df_cross_compat():
df = pd.DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
# 'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('20130101', periods=3),
# 'g': pd.date_range('20130101', periods=3,
# tz='US/Eastern'),
# 'h': pd.date_range('20130101', periods=3, freq='ns')
})
return df
@pytest.fixture
def df_full():
return pd.DataFrame(
{'string': list('abc'),
'string_with_nan': ['a', np.nan, 'c'],
'string_with_none': ['a', None, 'c'],
'bytes': [b'foo', b'bar', b'baz'],
'unicode': [u'foo', u'bar', u'baz'],
'int': list(range(1, 4)),
'uint': np.arange(3, 6).astype('u1'),
'float': np.arange(4.0, 7.0, dtype='float64'),
'float_with_nan': [2., np.nan, 3.],
'bool': [True, False, True],
'datetime': pd.date_range('20130101', periods=3),
'datetime_with_nat': [pd.Timestamp('20130101'),
pd.NaT,
pd.Timestamp('20130103')]})
def check_round_trip(df, engine=None, path=None,
write_kwargs=None, read_kwargs=None,
expected=None, check_names=True,
repeat=2):
"""Verify parquet serializer and deserializer produce the same results.
Performs a pandas to disk and disk to pandas round trip,
then compares the 2 resulting DataFrames to verify equality.
Parameters
----------
df: Dataframe
engine: str, optional
'pyarrow' or 'fastparquet'
path: str, optional
write_kwargs: dict of str:str, optional
read_kwargs: dict of str:str, optional
expected: DataFrame, optional
Expected deserialization result, otherwise will be equal to `df`
check_names: list of str, optional
Closed set of column names to be compared
repeat: int, optional
How many times to repeat the test
"""
write_kwargs = write_kwargs or {'compression': None}
read_kwargs = read_kwargs or {}
if expected is None:
expected = df
if engine:
write_kwargs['engine'] = engine
read_kwargs['engine'] = engine
def compare(repeat):
for _ in range(repeat):
df.to_parquet(path, **write_kwargs)
with catch_warnings(record=True):
actual = read_parquet(path, **read_kwargs)
tm.assert_frame_equal(expected, actual,
check_names=check_names)
if path is None:
with tm.ensure_clean() as path:
compare(repeat)
else:
compare(repeat)
def test_invalid_engine(df_compat):
with pytest.raises(ValueError):
check_round_trip(df_compat, 'foo', 'bar')
def test_options_py(df_compat, pa):
# use the set option
with pd.option_context('io.parquet.engine', 'pyarrow'):
check_round_trip(df_compat)
def test_options_fp(df_compat, fp):
# use the set option
with pd.option_context('io.parquet.engine', 'fastparquet'):
check_round_trip(df_compat)
def test_options_auto(df_compat, fp, pa):
# use the set option
with pd.option_context('io.parquet.engine', 'auto'):
check_round_trip(df_compat)
def test_options_get_engine(fp, pa):
assert isinstance(get_engine('pyarrow'), PyArrowImpl)
assert isinstance(get_engine('fastparquet'), FastParquetImpl)
with pd.option_context('io.parquet.engine', 'pyarrow'):
assert isinstance(get_engine('auto'), PyArrowImpl)
assert isinstance(get_engine('pyarrow'), PyArrowImpl)
assert isinstance(get_engine('fastparquet'), FastParquetImpl)
with pd.option_context('io.parquet.engine', 'fastparquet'):
assert isinstance(get_engine('auto'), FastParquetImpl)
assert isinstance(get_engine('pyarrow'), PyArrowImpl)
assert isinstance(get_engine('fastparquet'), FastParquetImpl)
with pd.option_context('io.parquet.engine', 'auto'):
assert isinstance(get_engine('auto'), PyArrowImpl)
assert isinstance(get_engine('pyarrow'), PyArrowImpl)
assert isinstance(get_engine('fastparquet'), FastParquetImpl)
@pytest.mark.xfail(is_platform_windows() or is_platform_mac(),
reason="reading pa metadata failing on Windows/mac",
strict=True)
def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=pa, compression=None)
result = read_parquet(path, engine=fp)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=fp, columns=['a', 'd'])
tm.assert_frame_equal(result, df[['a', 'd']])
def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=fp, compression=None)
with catch_warnings(record=True):
result = read_parquet(path, engine=pa)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=pa, columns=['a', 'd'])
tm.assert_frame_equal(result, df[['a', 'd']])
class Base(object):
def check_error_on_write(self, df, engine, exc):
# check that we are raising the exception on writing
with tm.ensure_clean() as path:
with pytest.raises(exc):
to_parquet(df, path, engine, compression=None)
class TestBasic(Base):
def test_error(self, engine):
for obj in [pd.Series([1, 2, 3]), 1, 'foo', pd.Timestamp('20130101'),
np.array([1, 2, 3])]:
self.check_error_on_write(obj, engine, ValueError)
def test_columns_dtypes(self, engine):
df = pd.DataFrame({'string': list('abc'),
'int': list(range(1, 4))})
# unicode
df.columns = [u'foo', u'bar']
check_round_trip(df, engine)
def test_columns_dtypes_invalid(self, engine):
df = pd.DataFrame({'string': list('abc'),
'int': list(range(1, 4))})
# numeric
df.columns = [0, 1]
self.check_error_on_write(df, engine, ValueError)
if PY3:
# bytes on PY3, on PY2 these are str
df.columns = [b'foo', b'bar']
self.check_error_on_write(df, engine, ValueError)
# python object
df.columns = [datetime.datetime(2011, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 1, 1)]
self.check_error_on_write(df, engine, ValueError)
@pytest.mark.parametrize('compression', [None, 'gzip', 'snappy', 'brotli'])
def test_compression(self, engine, compression):
if compression == 'snappy':
pytest.importorskip('snappy')
elif compression == 'brotli':
pytest.importorskip('brotli')
df = pd.DataFrame({'A': [1, 2, 3]})
check_round_trip(df, engine, write_kwargs={'compression': compression})
def test_read_columns(self, engine):
# GH18154
df = pd.DataFrame({'string': list('abc'),
'int': list(range(1, 4))})
expected = pd.DataFrame({'string': list('abc')})
check_round_trip(df, engine, expected=expected,
read_kwargs={'columns': ['string']})
def test_write_index(self, engine):
check_names = engine != 'fastparquet'
if engine == 'pyarrow':
import pyarrow
if LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0'):
pytest.skip("pyarrow is < 0.7.0")
df = pd.DataFrame({'A': [1, 2, 3]})
check_round_trip(df, engine)
indexes = [
[2, 3, 4],
pd.date_range('20130101', periods=3),
list('abc'),
[1, 3, 4],
]
# non-default index
for index in indexes:
df.index = index
check_round_trip(df, engine, check_names=check_names)
# index with meta-data
df.index = [0, 1, 2]
df.index.name = 'foo'
check_round_trip(df, engine)
def test_write_multiindex(self, pa):
# Not suppoprted in fastparquet as of 0.1.3 or older pyarrow version
engine = pa
df = pd.DataFrame({'A': [1, 2, 3]})
index = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)])
df.index = index
check_round_trip(df, engine)
def test_write_column_multiindex(self, engine):
# column multi-index
mi_columns = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)])
df = pd.DataFrame(np.random.randn(4, 3), columns=mi_columns)
self.check_error_on_write(df, engine, ValueError)
def test_multiindex_with_columns(self, pa):
engine = pa
dates = pd.date_range('01-Jan-2018', '01-Dec-2018', freq='MS')
df = pd.DataFrame(np.random.randn(2 * len(dates), 3),
columns=list('ABC'))
index1 = pd.MultiIndex.from_product(
[['Level1', 'Level2'], dates],
names=['level', 'date'])
index2 = index1.copy(names=None)
for index in [index1, index2]:
df.index = index
check_round_trip(df, engine)
check_round_trip(df, engine, read_kwargs={'columns': ['A', 'B']},
expected=df[['A', 'B']])
def test_write_ignoring_index(self, engine):
# ENH 20768
# Ensure index=False omits the index from the written Parquet file.
df = pd.DataFrame({'a': [1, 2, 3], 'b': ['q', 'r', 's']})
write_kwargs = {
'compression': None,
'index': False,
}
# Because we're dropping the index, we expect the loaded dataframe to
# have the default integer index.
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs,
expected=expected)
# Ignore custom index
df = pd.DataFrame({'a': [1, 2, 3], 'b': ['q', 'r', 's']},
index=['zyx', 'wvu', 'tsr'])
check_round_trip(df, engine, write_kwargs=write_kwargs,
expected=expected)
# Ignore multi-indexes as well.
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = pd.DataFrame({'one': [i for i in range(8)],
'two': [-i for i in range(8)]}, index=arrays)
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs,
expected=expected)
class TestParquetPyArrow(Base):
def test_basic(self, pa, df_full):
df = df_full
# additional supported types for pyarrow
import pyarrow
if LooseVersion(pyarrow.__version__) >= LooseVersion('0.7.0'):
df['datetime_tz'] = pd.date_range('20130101', periods=3,
tz='Europe/Brussels')
df['bool_with_none'] = [True, None, True]
check_round_trip(df, pa)
# TODO: This doesn't fail on all systems; track down which
@pytest.mark.xfail(reason="pyarrow fails on this (ARROW-1883)")
def test_basic_subset_columns(self, pa, df_full):
# GH18628
df = df_full
# additional supported types for pyarrow
df['datetime_tz'] = pd.date_range('20130101', periods=3,
tz='Europe/Brussels')
check_round_trip(df, pa, expected=df[['string', 'int']],
read_kwargs={'columns': ['string', 'int']})
def test_duplicate_columns(self, pa):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=list('aaa')).copy()
self.check_error_on_write(df, pa, ValueError)
@pytest.mark.xfail(reason="failing for pyarrow < 0.11.0")
def test_unsupported(self, pa):
# period
df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)})
# pyarrow 0.11 raises ArrowTypeError
# older pyarrows raise ArrowInvalid
self.check_error_on_write(df, pa, Exception)
# timedelta
df = pd.DataFrame({'a': pd.timedelta_range('1 day',
periods=3)})
self.check_error_on_write(df, pa, NotImplementedError)
# mixed python objects
df = pd.DataFrame({'a': ['a', 1, 2.0]})
# pyarrow 0.11 raises ArrowTypeError
# older pyarrows raise ArrowInvalid
self.check_error_on_write(df, pa, Exception)
def test_categorical(self, pa):
# supported in >= 0.7.0
df = pd.DataFrame({'a': pd.Categorical(list('abc'))})
# de-serialized as object
expected = df.assign(a=df.a.astype(object))
check_round_trip(df, pa, expected=expected)
def test_s3_roundtrip(self, df_compat, s3_resource, pa):
# GH #19134
check_round_trip(df_compat, pa,
path='s3://pandas-test/pyarrow.parquet')
def test_partition_cols_supported(self, pa, df_full):
# GH #23283
partition_cols = ['bool', 'int']
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(path, partition_cols=partition_cols,
compression=None)
import pyarrow.parquet as pq
dataset = pq.ParquetDataset(path, validate_schema=False)
assert len(dataset.partitions.partition_names) == 2
assert dataset.partitions.partition_names == set(partition_cols)
class TestParquetFastParquet(Base):
def test_basic(self, fp, df_full):
df = df_full
# additional supported types for fastparquet
if LooseVersion(fastparquet.__version__) >= LooseVersion('0.1.4'):
df['datetime_tz'] = pd.date_range('20130101', periods=3,
tz='US/Eastern')
df['timedelta'] = pd.timedelta_range('1 day', periods=3)
check_round_trip(df, fp)
@pytest.mark.skip(reason="not supported")
def test_duplicate_columns(self, fp):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=list('aaa')).copy()
self.check_error_on_write(df, fp, ValueError)
def test_bool_with_none(self, fp):
df = pd.DataFrame({'a': [True, None, False]})
expected = pd.DataFrame({'a': [1.0, np.nan, 0.0]}, dtype='float16')
check_round_trip(df, fp, expected=expected)
def test_unsupported(self, fp):
# period
df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)})
self.check_error_on_write(df, fp, ValueError)
# mixed
df = pd.DataFrame({'a': ['a', 1, 2.0]})
self.check_error_on_write(df, fp, ValueError)
def test_categorical(self, fp):
if LooseVersion(fastparquet.__version__) < LooseVersion("0.1.3"):
pytest.skip("CategoricalDtype not supported for older fp")
df = pd.DataFrame({'a': pd.Categorical(list('abc'))})
check_round_trip(df, fp)
def test_datetime_tz(self, fp_lt_014):
# fastparquet<0.1.4 doesn't preserve tz
df = pd.DataFrame({'a': pd.date_range('20130101', periods=3,
tz='US/Eastern')})
# warns on the coercion
with catch_warnings(record=True):
check_round_trip(df, fp_lt_014,
expected=df.astype('datetime64[ns]'))
def test_filter_row_groups(self, fp):
d = {'a': list(range(0, 3))}
df = pd.DataFrame(d)
with tm.ensure_clean() as path:
df.to_parquet(path, fp, compression=None,
row_group_offsets=1)
result = read_parquet(path, fp, filters=[('a', '==', 0)])
assert len(result) == 1
def test_s3_roundtrip(self, df_compat, s3_resource, fp):
# GH #19134
check_round_trip(df_compat, fp,
path='s3://pandas-test/fastparquet.parquet')
def test_partition_cols_supported(self, fp, df_full):
# GH #23283
partition_cols = ['bool', 'int']
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(path, engine="fastparquet",
partition_cols=partition_cols, compression=None)
assert os.path.exists(path)
import fastparquet
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 2
def test_partition_on_supported(self, fp, df_full):
# GH #23283
partition_cols = ['bool', 'int']
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(path, engine="fastparquet", compression=None,
partition_on=partition_cols)
assert os.path.exists(path)
import fastparquet
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 2
def test_error_on_using_partition_cols_and_partition_on(self, fp, df_full):
# GH #23283
partition_cols = ['bool', 'int']
df = df_full
with pytest.raises(ValueError):
with tm.ensure_clean_dir() as path:
df.to_parquet(path, engine="fastparquet", compression=None,
partition_on=partition_cols,
partition_cols=partition_cols)
| bsd-3-clause |
SnappyDataInc/spark | python/pyspark/sql/context.py | 6 | 23638 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import IntegerType, Row, StringType
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext", "UDFRegistration"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession.builder.getOrCreate()
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=None):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is not None, return
defaultValue. If the key is not set and defaultValue is None, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return UDFRegistration(self)
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@ignore_unicode_prefix
@since(1.2)
def registerFunction(self, name, f, returnType=StringType()):
"""Registers a python function (including lambda function) as a UDF
so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not given it default to a string and conversion will automatically
be done. For any other return type, the produced object must match the specified type.
:param name: name of the UDF
:param f: python function
:param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlContext.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)=u'4')]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
"""
self.sparkSession.catalog.registerFunction(name, f, returnType)
@ignore_unicode_prefix
@since(2.1)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""Register a java UDF so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not specified we would infer it via reflection.
:param name: name of the UDF
:param javaClassName: fully qualified name of java class
:param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerJavaFunction("javaStringLength",
... "test.org.apache.spark.sql.JavaStringLength", IntegerType())
>>> sqlContext.sql("SELECT javaStringLength('test')").collect()
[Row(UDF(test)=4)]
>>> sqlContext.registerJavaFunction("javaStringLength2",
... "test.org.apache.spark.sql.JavaStringLength")
>>> sqlContext.sql("SELECT javaStringLength2('test')").collect()
[Row(UDF(test)=4)]
"""
jdt = None
if returnType is not None:
jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json())
self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
datatype string after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temp table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(database=u'', tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
.. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate().
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
if jhiveContext is None:
sparkSession = SparkSession.builder.enableHiveSupport().getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
class UDFRegistration(object):
"""Wrapper for user-defined function registration."""
def __init__(self, sqlContext):
self.sqlContext = sqlContext
def register(self, name, f, returnType=StringType()):
return self.sqlContext.registerFunction(name, f, returnType)
register.__doc__ = SQLContext.registerFunction.__doc__
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
madjelan/CostSensitiveClassification | costcla/sampling/_smote.py | 1 | 4876 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# https://raw.githubusercontent.com/blacklab/nyan/master/shared_modules/_smote.py
'''
The MIT License (MIT)
Copyright (c) 2012-2013 Karsten Jeschkies <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Created on 24.11.2012
@author: karsten jeschkies <[email protected]>
This is an implementation of the SMOTE Algorithm.
See: "SMOTE: synthetic minority over-sampling technique" by
Chawla, N.V et al.
'''
import logging
import numpy as np
from random import randrange, choice
from sklearn.neighbors import NearestNeighbors
logger = logging.getLogger("main")
def _SMOTE(T, N, k, h = 1.0):
"""
Returns (N/100) * n_minority_samples synthetic minority samples.
Parameters
----------
T : array-like, shape = [n_minority_samples, n_features]
Holds the minority samples
N : percetange of new synthetic samples:
n_synthetic_samples = N/100 * n_minority_samples. Can be < 100.
k : int. Number of nearest neighbours.
Returns
-------
S : Synthetic samples. array,
shape = [(N/100) * n_minority_samples, n_features].
"""
n_minority_samples, n_features = T.shape
if N < 100:
#create synthetic samples only for a subset of T.
#TODO: select random minortiy samples
N = 100
pass
if (N % 100) != 0:
raise ValueError("N must be < 100 or multiple of 100")
N = N/100
n_synthetic_samples = N * n_minority_samples
S = np.zeros(shape=(n_synthetic_samples, n_features))
#Learn nearest neighbours
neigh = NearestNeighbors(n_neighbors = k)
neigh.fit(T)
#Calculate synthetic samples
for i in xrange(n_minority_samples):
nn = neigh.kneighbors(T[i], return_distance=False)
for n in xrange(N):
nn_index = choice(nn[0])
#NOTE: nn includes T[i], we don't want to select it
while nn_index == i:
nn_index = choice(nn[0])
dif = T[nn_index] - T[i]
gap = np.random.uniform(low = 0.0, high = h)
S[n + i * N, :] = T[i,:] + gap * dif[:]
return S
def _borderlineSMOTE(X, y, minority_target, N, k):
"""
Returns synthetic minority samples.
Parameters
----------
X : array-like, shape = [n__samples, n_features]
Holds the minority and majority samples
y : array-like, shape = [n__samples]
Holds the class targets for samples
minority_target : value for minority class
N : percetange of new synthetic samples:
n_synthetic_samples = N/100 * n_minority_samples. Can be < 100.
k : int. Number of nearest neighbours.
h : high in random.uniform to scale dif of snythetic sample
Returns
-------
safe : Safe minorities
synthetic : Synthetic sample of minorities in danger zone
danger : Minorities of danger zone
"""
n_samples, _ = X.shape
#Learn nearest neighbours on complete training set
neigh = NearestNeighbors(n_neighbors = k)
neigh.fit(X)
safe_minority_indices = list()
danger_minority_indices = list()
for i in xrange(n_samples):
if y[i] != minority_target: continue
nn = neigh.kneighbors(X[i], return_distance=False)
majority_neighbours = 0
for n in nn[0]:
if y[n] != minority_target:
majority_neighbours += 1
if majority_neighbours == len(nn):
continue
elif majority_neighbours < (len(nn)/2):
logger.debug("Add sample to safe minorities.")
safe_minority_indices.append(i)
else:
#DANGER zone
danger_minority_indices.append(i)
#SMOTE danger minority samples
synthetic_samples = _SMOTE(X[danger_minority_indices], N, k, h = 0.5)
return (X[safe_minority_indices],
synthetic_samples,
X[danger_minority_indices])
| bsd-3-clause |
anhaidgroup/py_stringsimjoin | py_stringsimjoin/join/cosine_join_py.py | 1 | 12275 | # cosine join
from joblib import delayed, Parallel
import pandas as pd
from py_stringsimjoin.join.set_sim_join import set_sim_join
from py_stringsimjoin.utils.generic_helper import convert_dataframe_to_array, \
get_attrs_to_project, get_num_processes_to_launch, remove_redundant_attrs, \
split_table
from py_stringsimjoin.utils.missing_value_handler import \
get_pairs_with_missing_value
from py_stringsimjoin.utils.validation import validate_attr, \
validate_attr_type, validate_comp_op_for_sim_measure, validate_key_attr, \
validate_input_table, validate_threshold, validate_tokenizer, \
validate_output_attrs
def cosine_join_py(ltable, rtable,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, threshold, comp_op='>=',
allow_empty=True, allow_missing=False,
l_out_attrs=None, r_out_attrs=None,
l_out_prefix='l_', r_out_prefix='r_',
out_sim_score=True, n_jobs=1, show_progress=True):
"""Join two tables using a variant of cosine similarity known as Ochiai
coefficient.
This is not the cosine measure that computes the cosine of the angle
between two given vectors. Rather, it is a variant of cosine measure known
as Ochiai coefficient (see the Wikipedia page
`Cosine Similarity <https://en.wikipedia.org/wiki/Cosine_similarity>`_).
Specifically, for two sets X and Y, this measure computes:
:math:`cosine(X, Y) = \\frac{|X \\cap Y|}{\\sqrt{|X| \\cdot |Y|}}`
In the case where one of X and Y is an empty set and the other is a
non-empty set, we define their cosine score to be 0. In the case where both
X and Y are empty sets, we define their cosine score to be 1.
Finds tuple pairs from left table and right table such that the cosine
similarity between the join attributes satisfies the condition on input
threshold. For example, if the comparison operator is '>=', finds tuple
pairs whose cosine similarity between the strings that are the values of
the join attributes is greater than or equal to the input threshold, as
specified in "threshold".
Args:
ltable (DataFrame): left input table.
rtable (DataFrame): right input table.
l_key_attr (string): key attribute in left table.
r_key_attr (string): key attribute in right table.
l_join_attr (string): join attribute in left table.
r_join_attr (string): join attribute in right table.
tokenizer (Tokenizer): tokenizer to be used to tokenize join
attributes.
threshold (float): cosine similarity threshold to be satisfied.
comp_op (string): comparison operator. Supported values are '>=', '>'
and '=' (defaults to '>=').
allow_empty (boolean): flag to indicate whether tuple pairs with empty
set of tokens in both the join attributes should be included in the
output (defaults to True).
allow_missing (boolean): flag to indicate whether tuple pairs with
missing value in at least one of the join attributes should be
included in the output (defaults to False). If this flag is set to
True, a tuple in ltable with missing value in the join attribute
will be matched with every tuple in rtable and vice versa.
l_out_attrs (list): list of attribute names from the left table to be
included in the output table (defaults to None).
r_out_attrs (list): list of attribute names from the right table to be
included in the output table (defaults to None).
l_out_prefix (string): prefix to be used for the attribute names coming
from the left table, in the output table (defaults to 'l\_').
r_out_prefix (string): prefix to be used for the attribute names coming
from the right table, in the output table (defaults to 'r\_').
out_sim_score (boolean): flag to indicate whether similarity score
should be included in the output table (defaults to True). Setting
this flag to True will add a column named '_sim_score' in the
output table. This column will contain the similarity scores for the
tuple pairs in the output.
n_jobs (int): number of parallel jobs to use for the computation
(defaults to 1). If -1 is given, all CPUs are used. If 1 is given,
no parallel computing code is used at all, which is useful for
debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used
(where n_cpus is the total number of CPUs in the machine). Thus for
n_jobs = -2, all CPUs but one are used. If (n_cpus + 1 + n_jobs)
becomes less than 1, then no parallel computing code will be used
(i.e., equivalent to the default).
show_progress (boolean): flag to indicate whether task progress should
be displayed to the user (defaults to True).
Returns:
An output table containing tuple pairs that satisfy the join
condition (DataFrame).
"""
# check if the input tables are dataframes
validate_input_table(ltable, 'left table')
validate_input_table(rtable, 'right table')
# check if the key attributes and join attributes exist
validate_attr(l_key_attr, ltable.columns,
'key attribute', 'left table')
validate_attr(r_key_attr, rtable.columns,
'key attribute', 'right table')
validate_attr(l_join_attr, ltable.columns,
'join attribute', 'left table')
validate_attr(r_join_attr, rtable.columns,
'join attribute', 'right table')
# check if the join attributes are not of numeric type
validate_attr_type(l_join_attr, ltable[l_join_attr].dtype,
'join attribute', 'left table')
validate_attr_type(r_join_attr, rtable[r_join_attr].dtype,
'join attribute', 'right table')
# check if the input tokenizer is valid
validate_tokenizer(tokenizer)
# check if the input threshold is valid
validate_threshold(threshold, 'COSINE')
# check if the comparison operator is valid
validate_comp_op_for_sim_measure(comp_op, 'COSINE')
# check if the output attributes exist
validate_output_attrs(l_out_attrs, ltable.columns,
r_out_attrs, rtable.columns)
# check if the key attributes are unique and do not contain missing values
validate_key_attr(l_key_attr, ltable, 'left table')
validate_key_attr(r_key_attr, rtable, 'right table')
# set return_set flag of tokenizer to be True, in case it is set to False
revert_tokenizer_return_set_flag = False
if not tokenizer.get_return_set():
tokenizer.set_return_set(True)
revert_tokenizer_return_set_flag = True
# remove redundant attrs from output attrs.
l_out_attrs = remove_redundant_attrs(l_out_attrs, l_key_attr)
r_out_attrs = remove_redundant_attrs(r_out_attrs, r_key_attr)
# get attributes to project.
l_proj_attrs = get_attrs_to_project(l_out_attrs, l_key_attr, l_join_attr)
r_proj_attrs = get_attrs_to_project(r_out_attrs, r_key_attr, r_join_attr)
# Do a projection on the input dataframes to keep only the required
# attributes. Then, remove rows with missing value in join attribute from
# the input dataframes. Then, convert the resulting dataframes into ndarray.
ltable_array = convert_dataframe_to_array(ltable, l_proj_attrs, l_join_attr)
rtable_array = convert_dataframe_to_array(rtable, r_proj_attrs, r_join_attr)
# computes the actual number of jobs to launch.
n_jobs = min(get_num_processes_to_launch(n_jobs), len(rtable_array))
if n_jobs <= 1:
# if n_jobs is 1, do not use any parallel code.
output_table = set_sim_join(ltable_array, rtable_array,
l_proj_attrs, r_proj_attrs,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, 'COSINE',
threshold, comp_op, allow_empty,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
out_sim_score, show_progress)
else:
# if n_jobs is above 1, split the right table into n_jobs splits and
# join each right table split with the whole of left table in a separate
# process.
r_splits = split_table(rtable_array, n_jobs)
results = Parallel(n_jobs=n_jobs)(delayed(set_sim_join)(
ltable_array, r_splits[job_index],
l_proj_attrs, r_proj_attrs,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, 'COSINE',
threshold, comp_op, allow_empty,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
out_sim_score,
(show_progress and (job_index==n_jobs-1)))
for job_index in range(n_jobs))
output_table = pd.concat(results)
# If allow_missing flag is set, then compute all pairs with missing value in
# at least one of the join attributes and then add it to the output
# obtained from the join.
if allow_missing:
missing_pairs = get_pairs_with_missing_value(
ltable, rtable,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
out_sim_score, show_progress)
output_table = pd.concat([output_table, missing_pairs])
# add an id column named '_id' to the output table.
output_table.insert(0, '_id', range(0, len(output_table)))
# revert the return_set flag of tokenizer, in case it was modified.
if revert_tokenizer_return_set_flag:
tokenizer.set_return_set(False)
return output_table
| bsd-3-clause |
depet/scikit-learn | sklearn/gpml/lik.py | 1 | 14213 | import numpy
import scipy.special
def gauss(hyp, y=None, mu=None, s2=None, inf=None, hi=None, nargout=None):
"""
Gaussian likelihood function for regression. The expression for the
likelihood is
likGauss(t) = exp(-(t-y)^2/2*sn^2) / sqrt(2*pi*sn^2),
where y is the mean and sn is the standard deviation.
The hyperparameters are:
hyp = [ log(sn) ]
Several modes are provided, for computing likelihoods, derivatives and moments
respectively, see likFunctions.m for the details. In general, care is taken
to avoid numerical issues when the arguments are extreme.
"""
if mu is None:
return '1'
sn2 = numpy.exp(2*hyp)
if inf is None:
if numpy.size(y) == 0:
y = numpy.zeros(numpy.shape(mu))
if s2 is not None and numpy.linalg.norm(s2) > 0: # s2==0?
out = gauss(hyp, y, mu, s2, 'ep')
lp = out[0]
else:
lp = -(y-mu)**2/sn2/2-numpy.log(2*numpy.pi*sn2)/2
s2 = 0
if nargout == 1:
return lp
elif nargout == 2:
return (mu, mu)
else:
return (lp, mu, s2 + sn2)
else:
if inf == 'laplace':
if hi is None:
if nargout is None:
nargout = 4
if numpy.size(y) == 0:
y = 0
ymmu = y-mu
lp = -numpy.power(ymmu,2)/(2*sn2) - numpy.log(2*numpy.pi*sn2)/2
res = lp
if nargout > 1:
dlp = ymmu/sn2
res = (lp, dlp)
if nargout > 2:
d2lp = -numpy.ones(numpy.shape(ymmu))/sn2
res += (d2lp,)
if nargout > 3:
d3lp = numpy.zeros(numpy.shape(ymmu))
res += (d3lp)
else:
if nargout is None:
nargout = 3
lp_dhyp = numpy.power(y-mu,2)/sn2 - 1
res = lp
if nargout > 1:
dlp_dhyp = 2*(mu-y)/sn2
res = (lp, dlp_dhyp)
if nargout > 2:
d2lp_dhyp = 2*numpy.ones(numpy.shape(mu))/sn2
res += (d2lp_dhyp,)
return res
elif inf == 'ep':
if hi is None:
if nargout is None:
nargout = 3
lZ = -(y-mu)**2/(sn2+s2)/2 - numpy.log(2*numpy.pi*(sn2+s2))/2
dlZ = (y-mu)/(sn2+s2)
d2lZ = -1./(sn2+s2)
if nargout == 1:
return lZ
elif nargout == 2:
return (lZ, dlZ)
else:
return (lZ, dlZ, d2lZ)
else:
if nargout is None:
nargout = 1
dlZhyp = ((y-mu)**2/(sn2+s2)-1)/(1+s2/sn2)
if nargout == 1:
return dlZhyp
else:
res = (dlZhyp,)
for i in range(2,nargout):
res += (None,)
return res
# elif inf == 'infVB':
# if hi is None:
# # variational lower site bound
# # t(s) = exp(-(y-s)^2/2sn2)/sqrt(2*pi*sn2)
# # the bound has the form: b*s - s.^2/(2*ga) - h(ga)/2 with b=y/ga
# ga = s2
# n = numel(ga)
# b = y./ga
# y = y.*ones(n,1)
# db = -y./ga.^2
# d2b = 2*y./ga.^3
# h = zeros(n,1)
# dh = h
# d2h = h
# id = ga(:)<=sn2+1e-8
# h(id) = y(id).^2./ga(id) + log(2*pi*sn2)
# h(~id) = Inf
# dh(id) = -y(id).^2./ga(id).^2
# d2h(id) = 2*y(id).^2./ga(id).^3
# id = ga<0
# h(id) = numpy.inf
# dh(id) = 0
# d2h(id) = 0
# return (h, b, dh, db, d2h, d2b)
# else:
# ga = s2
# n = numel(ga)
# dhhyp = zeros(n,1)
# dhhyp(ga(:)<=sn2) = 2
# dhhyp(ga<0) = 0
# return (dhhyp,)
else:
raise AttributeError('Unknown inference')
def erf(hyp, y=None, mu=None, s2=None, inf=None, hi=None, nargout=None):
"""
Error function or cumulative Gaussian likelihood function for binary
classification or probit regression. The expression for the likelihood is
likErf(t) = (1+erf(t/sqrt(2)))/2 = normcdf(t).
Several modes are provided, for computing likelihoods, derivatives and moments
respectively. In general, care is taken to avoid numerical issues when the
arguments are extreme.
"""
if mu is None:
return '0'
if y is not None:
if numpy.size(y) == 0:
y = numpy.array([[1]])
else:
y = numpy.sign(y)
y[y==0] = 1
else:
y = numpy.array([[1]])
# prediction mode if inf is not present
if inf is None:
y = y*numpy.ones(numpy.shape(mu))
if s2 is not None and numpy.linalg.norm(s2) > 0: # s2==0?
lp = erf(hyp, y, mu, s2, 'ep', nargout=1)
p = numpy.exp(lp)
else:
p, lp = __cumGauss(y,mu,nargout=2)
if nargout is None:
nargout = 3
res = lp
if nargout > 1:
ymu = 2*p-1
res = (lp, ymu)
if nargout > 2:
ys2 = 4*p*(1-p)
res += (ys2,)
return res
else:
# TODO: TEST
if inf == 'laplace':
# no derivative mode
if hi is None:
f = mu
yf = y*f # product latents and labels
p, lp = __cumGauss(y, f, nargout=2)
res = lp
# derivative of log likelihood
if nargout > 1:
n_p = __gauOverCumGauss(yf, p)
dlp = y*n_p # derivative of log likelihood
res = (lp, dlp)
# 2nd derivative of log likelihood
if nargout > 2:
d2lp = -numpy.power(n_p,2) - yf*n_p
res += (d2lp,)
# 3rd derivative of log likelihood
if nargout > 3:
d3lp = 2*y*numpy.power(n_p,3) + 3*f*numpy.power(n_p,2) + y*(numpy.power(f,2)-1)*n_p
res += (d3lp,)
return res
# derivative mode
else:
return numpy.array([[]])
elif inf == 'ep':
if hi is None:
if nargout is None:
nargout = 3
z = mu/numpy.sqrt(1+s2)
# log part function
junk, lZ = __cumGauss(y,z,nargout=2)
res = lZ
if numpy.size(y) > 0:
z = z*y
if nargout > 1:
if numpy.size(y) == 0:
y = 1
n_p = __gauOverCumGauss(z,numpy.exp(lZ))
# 1st derivative wrt mean
dlZ = y*n_p/numpy.sqrt(1+s2)
res = (lZ,dlZ)
if nargout > 2:
# 2nd derivative wrt mean
d2lZ = -n_p*(z+n_p)/(1+s2)
res += (d2lZ,)
return res
else:
return numpy.array([[]])
elif inf == 'vb':
a = 0
else:
raise AttributeError('Unknown inference')
def __cumGauss(y, f, nargout=1):
# product of latents and labels
if numpy.size(y) > 0:
yf = y*f
else:
yf = f
# likelihood
p = (1+scipy.special.erf(yf/numpy.sqrt(2)))/2
res = p
# log likelihood
if nargout > 1:
lp = __logphi(yf,p)
res = (p,lp)
return res
def __logphi(z, p):
"""
safe implementation of the log of phi(x) = \int_{-\infty}^x N(f|0,1) df
logphi(z) = log(normcdf(z))
"""
lp = numpy.zeros(numpy.shape(z))
zmin = -6.2
zmax = -5.5
ok = z > zmax
bd = z < zmin
# interpolate between both of them
ip = ~ok & ~bd
# interpolate weights
lam = 1/(1+numpy.exp(25*(1/2-(z[ip]-zmin)/(zmax-zmin))))
lp[ok] = numpy.log(p[ok])
# use lower and upper bound acoording to Abramowitz&Stegun 7.1.13 for z<0
# lower -log(pi)/2 -z.^2/2 -log( sqrt(z.^2/2+2 ) -z/sqrt(2) )
# upper -log(pi)/2 -z.^2/2 -log( sqrt(z.^2/2+4/pi) -z/sqrt(2) )
# the lower bound captures the asymptotics
lp[~ok] = -numpy.log(numpy.pi)/2 -numpy.power(z[~ok],2)/2 - numpy.log(numpy.sqrt(numpy.power(z[~ok],2)/2+2)-z[~ok]/numpy.sqrt(2))
lp[ip] = (1-lam)*lp[ip] + lam*numpy.log(p[ip])
return lp
def __gauOverCumGauss(f, p):
"""
Safely compute Gaussian over cumulative Gaussian.
"""
n_p = numpy.zeros(numpy.shape(f))
# naive evaluation for large values of f
ok = f>-5
n_p[ok] = (numpy.exp(-numpy.power(f[ok],2)/2)/numpy.sqrt(2*numpy.pi)) / p[ok]
# tight upper bound evaluation
bd = f < -6
n_p[bd] = numpy.sqrt(numpy.power(f[bd],2)/4+1)-f[bd]/2
# linearly interpolate between both of them
interp = ~ok & ~bd
tmp = f[interp]
lam = -5-f[interp]
n_p[interp] = (1-lam)*(numpy.exp(-numpy.power(tmp,2)/2)/numpy.sqrt(2*numpy.pi))/p[interp] + lam*(numpy.sqrt(numpy.power(tmp,2)/4+1)-tmp/2)
return n_p
def logistic(hyp, y=None, mu=None, s2=None, inf=None, hi=None, nargout=None):
"""
Logistic function for binary classification or logit regression.
The expression for the likelihood is
logistic(t) = 1/(1+exp(-t)).
Several modes are provided, for computing likelihoods, derivatives and moments
respectively. In general, care is taken to avoid numerical issues when the
arguments are extreme. The moments \int f^k logistic(y,f) N(f|mu,var) df
are calculated via a cumulative Gaussian scale mixture approximation.
"""
if mu is None:
return '0'
if y is not None:
if numpy.size(y) == 0:
y = numpy.array([[1]])
else:
y = numpy.sign(y)
y[y==0] = 1
else:
y = numpy.array([[1]])
# prediction mode if inf is not present
if inf is None:
y = y*numpy.ones(numpy.shape(mu))
if s2 is not None and numpy.linalg.norm(s2) > 0: # s2==0?
lp = logistic(hyp, y, mu, s2, 'ep', nargout=1)
else:
yf = y*mu
lp = yf.copy()
ok = -35<yf
lp[ok] = -numpy.log(1+numpy.exp(-yf[ok]))
if nargout is None:
nargout = 3
res = lp
if nargout > 1:
p = numpy.exp(lp)
ymu = 2*p-1
res = (lp, ymu)
if nargout > 2:
ys2 = 4*p*(1-p)
res += (ys2,)
return res
else:
# TODO: TEST
if inf == 'laplace':
# no derivative mode
if hi is None:
# product latents and labels
f = mu
yf = y*f
s = -yf
ps = numpy.maximum(0,s)
# lp = -(log(1+exp(s)))
lp = -(ps+numpy.log(numpy.exp(-ps) + numpy.exp(s-ps)))
res = lp
# first derivatives
if nargout > 1:
s = numpy.minimum(0,f)
p = numpy.exp(s)/(numpy.exp(s) + numpy.exp(s-f)) # p = 1./(1+exp(-f))
dlp = (y+1)/2.-p # derivative of log likelihood
res = (lp,dlp)
# 2nd derivative of log likelihood
if nargout > 2:
d2lp = -numpy.exp(2*s-f)/numpy.power(numpy.exp(s)+numpy.exp(s-f),2)
res += (d2lp,)
# 3rd derivative of log likelihood
if nargout > 3:
d3lp = 2*d2lp*(0.5-p)
res += (d3lp)
return res
# derivative mode
else:
return numpy.array([[]])
elif inf == 'ep':
if hi is None:
if nargout is None:
nargout = 3
y = y*numpy.ones(numpy.shape(mu))
# likLogistic(t) \approx 1/2 + \sum_{i=1}^5 (c_i/2) erf(lam_i/sqrt(2)t)
# approx coeffs lam_i and c_i
lam = numpy.sqrt(2)*numpy.array([[0.44, 0.41, 0.40, 0.39, 0.36]])
c = numpy.array([[1.146480988574439e+02, -1.508871030070582e+03, 2.676085036831241e+03, -1.356294962039222e+03, 7.543285642111850e+01]]).T
lZc, dlZc, d2lZc = erf({'cov': numpy.array([[]]), 'lik': numpy.array([[]]), 'mean': numpy.array([[]])}, numpy.dot(y,numpy.ones((1,5))), numpy.dot(mu,lam), numpy.dot(s2,numpy.power(lam,2)), inf, nargout=3)
# A=lZc, B=dlZc, d=c.*lam', lZ=log(exp(A)*c)
lZ = __log_expA_x(lZc,c)
# ((exp(A).*B)*d)./(exp(A)*c)
dlZ = __expABz_expAx(lZc, c, dlZc, c*lam.T)
# d2lZ = ((exp(A).*Z)*e)./(exp(A)*c) - dlZ.^2 where e = c.*(lam.^2)'
d2lZ = __expABz_expAx(lZc, c, numpy.power(dlZc,2)+d2lZc, c*numpy.power(lam,2).T) - numpy.power(dlZ,2)
# The scale mixture approximation does not capture the correct asymptotic
# behavior; we have linear decay instead of quadratic decay as suggested
# by the scale mixture approximation. By observing that for large values
# of -f*y ln(p(y|f)) of logistic likelihood is linear in f with slope y,
# we are able to analytically integrate the tail region; there is no
# contribution to the second derivative
# empirically determined bound at val==0
val = numpy.abs(mu)-196./200.*s2-4.
# interpolation weights
lam = 1/(1+numpy.exp(-10*val))
# apply the same to p(y|f) = 1 - p(-y|f)
lZtail = numpy.minimum(s2/2-numpy.abs(mu),-0.1)
dlZtail = -numpy.sign(mu)
id = y*mu>0
# label and mean agree
lZtail[id] = numpy.log(1-numpy.exp(lZtail[id]))
dlZtail[id] = 0
# interpolate between scale mixture ..
lZ = (1-lam)*lZ + lam*lZtail
# .. and tail approximation
dlZ = (1-lam)*dlZ + lam*dlZtail
res = lZ
if nargout > 1:
res = (lZ,dlZ)
if nargout > 2:
res += (d2lZ,)
return res
else:
return numpy.array([[]])
elif inf == 'vb':
a = 0
else:
raise AttributeError('Unknown inference')
def __log_expA_x(A,x):
"""
Computes y = log( exp(A)*x ) in a numerically safe way by subtracting the
maximal value in each row to avoid cancelation after taking the exp.
"""
N = numpy.size(A,1)
# number of columns, max over columns
maxA = numpy.reshape(numpy.max(A,1),(-1,1))
# exp(A) = exp(A-max(A))*exp(max(A))
return numpy.log(numpy.dot(numpy.exp(A-numpy.dot(maxA,numpy.ones((1,N)))),x)) + maxA
def __expABz_expAx(A,x,B,z):
"""
Computes y = ( (exp(A).*B)*z ) ./ ( exp(A)*x ) in a numerically safe way
The function is not general in the sense that it yields correct values for
all types of inputs. We assume that the values are close together.
"""
# number of columns, max over columns
N = numpy.size(A,1)
maxA = numpy.reshape(numpy.max(A,1),(-1,1))
# subtract maximum value
A = A - numpy.dot(maxA,numpy.ones((1,N)))
return numpy.dot(numpy.exp(A)*B,z) / numpy.dot(numpy.exp(A),x)
def mix():
raise NotImplementedError('')
# Evaluates lik functions
def feval(fun, hyp=None, y=None, mu=None, s2=None, inff=None, hi=None, nargout=None):
if not isinstance(fun, tuple):
fun = (fun,)
f = fun[0]
if f.__module__ == 'sklearn.gpml.lik':
if len(fun) > 1 and f == lik.mix:
return f(fun[1], hyp, y, mu, s2, inff, hi, nargout)
else:
return f(hyp, y, mu, s2, inff, hi, nargout)
else:
raise AttributeError('Unknown function')
| bsd-3-clause |
felipebetancur/scipy | scipy/special/c_misc/struve_convergence.py | 76 | 3725 | """
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
try:
import mpmath
except:
from sympy import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import \
_struve_power_series, _struve_asymp_large_z, _struve_bessel_series
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
import os
import sys
if '--main' in sys.argv:
main()
else:
import subprocess
subprocess.call([sys.executable, os.path.join('..', '..', '..', 'runtests.py'),
'-g', '--python', __file__, '--main'])
| bsd-3-clause |
PeterRochford/SkillMetrics | skill_metrics/overlay_target_diagram_circles.py | 1 | 2705 | import matplotlib.pyplot as plt
import numpy as np
def overlay_target_diagram_circles(option):
'''
Overlays circle contours on a target diagram.
Plots circle contours on a target diagram to indicate standard
deviation ranges and observational uncertainty threshold.
INPUTS:
option : dictionary containing option values. (Refer to
GET_TARGET_DIAGRAM_OPTIONS function for more information.)
option['axismax'] : maximum for the X & Y values. Used to set
default circles when no contours specified
option['circles'] : radii of circles to draw to indicate isopleths
of standard deviation
option['circleLineSpec'] : circle line specification (default dashed
black, '--k')
option['normalized'] : statistics supplied are normalized with
respect to the standard deviation of reference values
option['obsUncertainty'] : Observational Uncertainty (default of 0)
OUTPUTS:
None.
Author: Peter A. Rochford
Symplectic, LLC
www.thesymplectic.com
[email protected]
'''
theta = np.arange(0, 2*np.pi, 0.01)
unit = np.ones(len(theta))
# 1 - reference circle if normalized
if option['normalized'] == 'on':
rho = unit
X, Y = pol2cart(theta, rho)
plt.plot(X, Y, 'k', 'LineWidth', option['circleLineWidth'])
# Set range for target circles
if option['normalized'] == 'on':
circles = [.5, 1]
else:
if len(option['circles']) > 0:
circles = np.asarray(option['circles'])
index = np.where(circles <= option['axismax'])
circles = [option['circles'][i] for i in index[0]]
else:
circles = [option['axismax'] * x for x in [.7, 1]]
# 2 - secondary circles
for c in circles:
rho = c * unit
X, Y = pol2cart(theta, rho)
plt.plot(X, Y, option['circlelinespec'],
linewidth=option['circlelinewidth'])
# 3 - Observational Uncertainty threshold
if option['obsuncertainty'] > 0:
rho = option['obsuncertainty'] * unit
X, Y = pol2cart(theta, rho)
plt.plot(X, Y, '--b')
def pol2cart(phi, rho):
'''
Transforms corresponding elements of polar coordinate arrays to
Cartesian coordinates.
INPUTS:
phi : polar angle counter-clockwise from x-axis in radians
rho : radius
OUTPUTS:
x : Cartesian x-coordinate
y : Cartesian y-coordinate
'''
x = np.multiply(rho, np.cos(phi))
y = np.multiply(rho, np.sin(phi))
return x, y
| gpl-3.0 |
pompiduskus/scikit-learn | doc/conf.py | 210 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
xodus7/tensorflow | tensorflow/contrib/metrics/python/ops/metric_ops.py | 5 | 178391 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains metric-computing operations on streamed tensors.
Module documentation, including "@@" callouts, should be put in
third_party/tensorflow/contrib/metrics/__init__.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import metrics_impl
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.distributions.normal import Normal
from tensorflow.python.util.deprecation import deprecated
# Epsilon constant used to represent extremely small quantity.
_EPSILON = 1e-7
def _safe_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.truediv(numerator, denominator),
0,
name=name)
@deprecated(None, 'Please switch to tf.metrics.true_positives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_true_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.true_negatives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_true_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.false_positives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_false_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.false_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.false_negatives. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_false_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.false_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.mean')
def streaming_mean(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the (weighted) mean of the given values.
The `streaming_mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.mean_tensor')
def streaming_mean_tensor(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `streaming_mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `streaming_mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean_tensor(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.accuracy. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_accuracy(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates how often `predictions` matches `labels`.
The `streaming_accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of any shape.
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.accuracy(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.precision. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_precision(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `streaming_precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.recall. Note that the order '
'of the labels and predictions arguments has been switched.')
def streaming_recall(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `streaming_recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false positive rate of predictions with respect to labels.
The `false_positive_rate` function creates two local variables,
`false_positives` and `true_negatives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_positive_rate`, an idempotent operation that simply divides
`false_positives` by the sum of `false_positives` and `true_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: Scalar float `Tensor` with the value of
`false_positives` divided by the sum of `false_positives` and
`true_negatives`.
update_op: `Operation` that increments `false_positives` and
`true_negatives` variables appropriately and whose value matches
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_p, false_positives_update_op = metrics.false_positives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_n, true_negatives_update_op = metrics.true_negatives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fpr(fp, tn, name):
return array_ops.where(
math_ops.greater(fp + tn, 0), math_ops.div(fp, fp + tn), 0, name)
fpr = compute_fpr(false_p, true_n, 'value')
update_op = compute_fpr(false_positives_update_op, true_negatives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false negative rate of predictions with respect to labels.
The `false_negative_rate` function creates two local variables,
`false_negatives` and `true_positives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_negative_rate`, an idempotent operation that simply divides
`false_negatives` by the sum of `false_negatives` and `true_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_negative_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: Scalar float `Tensor` with the value of
`false_negatives` divided by the sum of `false_negatives` and
`true_positives`.
update_op: `Operation` that increments `false_negatives` and
`true_positives` variables appropriately and whose value matches
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_n, false_negatives_update_op = metrics.false_negatives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_p, true_positives_update_op = metrics.true_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fnr(fn, tp, name):
return array_ops.where(
math_ops.greater(fn + tp, 0), math_ops.div(fn, fn + tp), 0, name)
fnr = compute_fnr(false_n, true_p, 'value')
update_op = compute_fnr(false_negatives_update_op, true_positives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _streaming_confusion_matrix_at_thresholds(predictions,
labels,
thresholds,
weights=None,
includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `Tensor` whose shape matches `predictions`. `labels` will be cast
to `bool`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invalid key: %s.' % include)
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
broadcast_weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), predictions)
weights_tiled = array_ops.tile(
array_ops.reshape(broadcast_weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_positives')
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(true_positives,
math_ops.reduce_sum(
is_true_positive, 1))
values['tp'] = true_positives
if 'fn' in includes:
false_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_negatives')
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(false_negatives,
math_ops.reduce_sum(
is_false_negative, 1))
values['fn'] = false_negatives
if 'tn' in includes:
true_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_negatives')
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(true_negatives,
math_ops.reduce_sum(
is_true_negative, 1))
values['tn'] = true_negatives
if 'fp' in includes:
false_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_positives')
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(false_positives,
math_ops.reduce_sum(
is_false_positive, 1))
values['fp'] = false_positives
return values, update_ops
def streaming_true_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tp',))
return values['tp'], update_ops['tp']
def streaming_false_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fn',))
return values['fn'], update_ops['fn']
def streaming_false_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fp',))
return values['fp'], update_ops['fp']
def streaming_true_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tn',))
return values['tn'], update_ops['tn']
def streaming_curve_points(labels=None,
predictions=None,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes curve (ROC or PR) values for a prespecified number of points.
The `streaming_curve_points` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
that are used to compute the curve values. To discretize the curve, a linearly
spaced set of thresholds is used to compute pairs of recall and precision
values.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
points: A `Tensor` with shape [num_thresholds, 2] that contains points of
the curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
TODO(chizeng): Consider rewriting this method to make use of logic within the
precision_recall_at_equal_thresholds method (to improve run time).
"""
with variable_scope.variable_scope(name, 'curve_points',
(labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' % (curve))
kepsilon = _EPSILON # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def compute_points(tp, fn, tn, fp):
"""Computes the roc-auc or pr-auc based on confusion counts."""
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
return fp_rate, rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
return rec, prec
xs, ys = compute_points(values['tp'], values['fn'], values['tn'],
values['fp'])
points = array_ops.stack([xs, ys], axis=1)
update_op = control_flow_ops.group(*update_ops.values())
if metrics_collections:
ops.add_to_collections(metrics_collections, points)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return points, update_op
@deprecated(None, 'Please switch to tf.metrics.auc. Note that the order of '
'the labels and predictions arguments has been switched.')
def streaming_auc(predictions,
labels,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes the approximate AUC via a Riemann sum.
The `streaming_auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.auc(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
num_thresholds=num_thresholds,
curve=curve,
updates_collections=updates_collections,
name=name)
def _compute_dynamic_auc(labels, predictions, curve='ROC', weights=None):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This could be slow for large batches, but has the advantage of not
having its results degrade depending on the distribution of predictions.
Args:
labels: A `Tensor` of ground truth labels with the same shape as
`predictions` with values of 0 or 1 and type `int64`.
predictions: A 1-D `Tensor` of predictions whose values are `float64`.
curve: The name of the curve to be computed, 'ROC' for the Receiving
Operating Characteristic or 'PR' for the Precision-Recall curve.
weights: A 1-D `Tensor` of weights whose values are `float64`.
Returns:
A scalar `Tensor` containing the area-under-curve value for the input.
"""
# Compute the total weight and the total positive weight.
size = array_ops.size(predictions)
if weights is None:
weights = array_ops.ones_like(labels, dtype=dtypes.float64)
labels, predictions, weights = metrics_impl._remove_squeezable_dimensions(
labels, predictions, weights)
total_weight = math_ops.reduce_sum(weights)
total_positive = math_ops.reduce_sum(
array_ops.where(
math_ops.greater(labels, 0), weights,
array_ops.zeros_like(labels, dtype=dtypes.float64)))
def continue_computing_dynamic_auc():
"""Continues dynamic auc computation, entered if labels are not all equal.
Returns:
A scalar `Tensor` containing the area-under-curve value.
"""
# Sort the predictions descending, keeping the same order for the
# corresponding labels and weights.
ordered_predictions, indices = nn.top_k(predictions, k=size)
ordered_labels = array_ops.gather(labels, indices)
ordered_weights = array_ops.gather(weights, indices)
# Get the counts of the unique ordered predictions.
_, _, counts = array_ops.unique_with_counts(ordered_predictions)
# Compute the indices of the split points between different predictions.
splits = math_ops.cast(
array_ops.pad(math_ops.cumsum(counts), paddings=[[1, 0]]), dtypes.int32)
# Count the positives to the left of the split indices.
true_positives = array_ops.gather(
array_ops.pad(
math_ops.cumsum(
array_ops.where(
math_ops.greater(ordered_labels, 0), ordered_weights,
array_ops.zeros_like(ordered_labels,
dtype=dtypes.float64))),
paddings=[[1, 0]]), splits)
if curve == 'ROC':
# Compute the weight of the negatives to the left of every split point and
# the total weight of the negatives number of negatives for computing the
# FPR.
false_positives = array_ops.gather(
array_ops.pad(
math_ops.cumsum(
array_ops.where(
math_ops.less(ordered_labels, 1), ordered_weights,
array_ops.zeros_like(
ordered_labels, dtype=dtypes.float64))),
paddings=[[1, 0]]), splits)
total_negative = total_weight - total_positive
x_axis_values = math_ops.truediv(false_positives, total_negative)
y_axis_values = math_ops.truediv(true_positives, total_positive)
elif curve == 'PR':
x_axis_values = math_ops.truediv(true_positives, total_positive)
# For conformance, set precision to 1 when the number of positive
# classifications is 0.
positives = array_ops.gather(
array_ops.pad(math_ops.cumsum(ordered_weights), paddings=[[1, 0]]),
splits)
y_axis_values = array_ops.where(
math_ops.greater(splits, 0),
math_ops.truediv(true_positives, positives),
array_ops.ones_like(true_positives, dtype=dtypes.float64))
# Calculate trapezoid areas.
heights = math_ops.add(y_axis_values[1:], y_axis_values[:-1]) / 2.0
widths = math_ops.abs(
math_ops.subtract(x_axis_values[1:], x_axis_values[:-1]))
return math_ops.reduce_sum(math_ops.multiply(heights, widths))
# If all the labels are the same, AUC isn't well-defined (but raising an
# exception seems excessive) so we return 0, otherwise we finish computing.
return control_flow_ops.cond(
math_ops.logical_or(
math_ops.equal(total_positive, 0), math_ops.equal(
total_positive, total_weight)),
true_fn=lambda: array_ops.constant(0, dtypes.float64),
false_fn=continue_computing_dynamic_auc)
def streaming_dynamic_auc(labels,
predictions,
curve='ROC',
metrics_collections=(),
updates_collections=(),
name=None,
weights=None):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
USAGE NOTE: this approach requires storing all of the predictions and labels
for a single evaluation in memory, so it may not be usable when the evaluation
batch size and/or the number of evaluation steps is very large.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This has the advantage of being resilient to the distribution of
predictions by aggregating across batches, accumulating labels and predictions
and performing the final calculation using all of the concatenated values.
Args:
labels: A `Tensor` of ground truth labels with the same shape as `labels`
and with values of 0 or 1 whose values are castable to `int64`.
predictions: A `Tensor` of predictions whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
curve: The name of the curve for which to compute AUC, 'ROC' for the
Receiving Operating Characteristic or 'PR' for the Precision-Recall curve.
metrics_collections: An optional iterable of collections that `auc` should
be added to.
updates_collections: An optional iterable of collections that `update_op`
should be added to.
name: An optional name for the variable_scope that contains the metric
variables.
weights: A 'Tensor' of non-negative weights whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
Returns:
auc: A scalar `Tensor` containing the current area-under-curve value.
update_op: An operation that concatenates the input labels and predictions
to the accumulated values.
Raises:
ValueError: If `labels` and `predictions` have mismatched shapes or if
`curve` isn't a recognized curve type.
"""
if curve not in ['PR', 'ROC']:
raise ValueError('curve must be either ROC or PR, %s unknown' % curve)
with variable_scope.variable_scope(name, default_name='dynamic_auc'):
labels.get_shape().assert_is_compatible_with(predictions.get_shape())
predictions = array_ops.reshape(
math_ops.cast(predictions, dtypes.float64), [-1])
labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
with ops.control_dependencies([
check_ops.assert_greater_equal(
labels,
array_ops.zeros_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is <0'),
check_ops.assert_less_equal(
labels,
array_ops.ones_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is >1'),
]):
preds_accum, update_preds = streaming_concat(
predictions, name='concat_preds')
labels_accum, update_labels = streaming_concat(
labels, name='concat_labels')
if weights is not None:
weights = array_ops.reshape(
math_ops.cast(weights, dtypes.float64), [-1])
weights_accum, update_weights = streaming_concat(
weights, name='concat_weights')
update_op = control_flow_ops.group(update_labels, update_preds,
update_weights)
else:
weights_accum = None
update_op = control_flow_ops.group(update_labels, update_preds)
auc = _compute_dynamic_auc(
labels_accum, preds_accum, curve=curve, weights=weights_accum)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, auc)
return auc, update_op
def _compute_placement_auc(labels, predictions, weights, alpha,
logit_transformation, is_valid):
"""Computes the AUC and asymptotic normally distributed confidence interval.
The calculations are achieved using the fact that AUC = P(Y_1>Y_0) and the
concept of placement values for each labeled group, as presented by Delong and
Delong (1988). The actual algorithm used is a more computationally efficient
approach presented by Sun and Xu (2014). This could be slow for large batches,
but has the advantage of not having its results degrade depending on the
distribution of predictions.
Args:
labels: A `Tensor` of ground truth labels with the same shape as
`predictions` with values of 0 or 1 and type `int64`.
predictions: A 1-D `Tensor` of predictions whose values are `float64`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`.
alpha: Confidence interval level desired.
logit_transformation: A boolean value indicating whether the estimate should
be logit transformed prior to calculating the confidence interval. Doing
so enforces the restriction that the AUC should never be outside the
interval [0,1].
is_valid: A bool tensor describing whether the input is valid.
Returns:
A 1-D `Tensor` containing the area-under-curve, lower, and upper confidence
interval values.
"""
# Disable the invalid-name checker so that we can capitalize the name.
# pylint: disable=invalid-name
AucData = collections_lib.namedtuple('AucData', ['auc', 'lower', 'upper'])
# pylint: enable=invalid-name
# If all the labels are the same or if number of observations are too few,
# AUC isn't well-defined
size = array_ops.size(predictions, out_type=dtypes.int32)
# Count the total number of positive and negative labels in the input.
total_0 = math_ops.reduce_sum(
math_ops.cast(1 - labels, weights.dtype) * weights)
total_1 = math_ops.reduce_sum(
math_ops.cast(labels, weights.dtype) * weights)
# Sort the predictions ascending, as well as
# (i) the corresponding labels and
# (ii) the corresponding weights.
ordered_predictions, indices = nn.top_k(predictions, k=size, sorted=True)
ordered_predictions = array_ops.reverse(
ordered_predictions, axis=array_ops.zeros(1, dtypes.int32))
indices = array_ops.reverse(indices, axis=array_ops.zeros(1, dtypes.int32))
ordered_labels = array_ops.gather(labels, indices)
ordered_weights = array_ops.gather(weights, indices)
# We now compute values required for computing placement values.
# We generate a list of indices (segmented_indices) of increasing order. An
# index is assigned for each unique prediction float value. Prediction
# values that are the same share the same index.
_, segmented_indices = array_ops.unique(ordered_predictions)
# We create 2 tensors of weights. weights_for_true is non-zero for true
# labels. weights_for_false is non-zero for false labels.
float_labels_for_true = math_ops.cast(ordered_labels, dtypes.float32)
float_labels_for_false = 1.0 - float_labels_for_true
weights_for_true = ordered_weights * float_labels_for_true
weights_for_false = ordered_weights * float_labels_for_false
# For each set of weights with the same segmented indices, we add up the
# weight values. Note that for each label, we deliberately rely on weights
# for the opposite label.
weight_totals_for_true = math_ops.segment_sum(weights_for_false,
segmented_indices)
weight_totals_for_false = math_ops.segment_sum(weights_for_true,
segmented_indices)
# These cumulative sums of weights importantly exclude the current weight
# sums.
cum_weight_totals_for_true = math_ops.cumsum(weight_totals_for_true,
exclusive=True)
cum_weight_totals_for_false = math_ops.cumsum(weight_totals_for_false,
exclusive=True)
# Compute placement values using the formula. Values with the same segmented
# indices and labels share the same placement values.
placements_for_true = (
(cum_weight_totals_for_true + weight_totals_for_true / 2.0) /
(math_ops.reduce_sum(weight_totals_for_true) + _EPSILON))
placements_for_false = (
(cum_weight_totals_for_false + weight_totals_for_false / 2.0) /
(math_ops.reduce_sum(weight_totals_for_false) + _EPSILON))
# We expand the tensors of placement values (for each label) so that their
# shapes match that of predictions.
placements_for_true = array_ops.gather(placements_for_true, segmented_indices)
placements_for_false = array_ops.gather(placements_for_false,
segmented_indices)
# Select placement values based on the label for each index.
placement_values = (
placements_for_true * float_labels_for_true +
placements_for_false * float_labels_for_false)
# Split placement values by labeled groups.
placement_values_0 = placement_values * math_ops.cast(
1 - ordered_labels, weights.dtype)
weights_0 = ordered_weights * math_ops.cast(
1 - ordered_labels, weights.dtype)
placement_values_1 = placement_values * math_ops.cast(
ordered_labels, weights.dtype)
weights_1 = ordered_weights * math_ops.cast(
ordered_labels, weights.dtype)
# Calculate AUC using placement values
auc_0 = (math_ops.reduce_sum(weights_0 * (1. - placement_values_0)) /
(total_0 + _EPSILON))
auc_1 = (math_ops.reduce_sum(weights_1 * (placement_values_1)) /
(total_1 + _EPSILON))
auc = array_ops.where(math_ops.less(total_0, total_1), auc_1, auc_0)
# Calculate variance and standard error using the placement values.
var_0 = (
math_ops.reduce_sum(
weights_0 * math_ops.square(1. - placement_values_0 - auc_0)) /
(total_0 - 1. + _EPSILON))
var_1 = (
math_ops.reduce_sum(
weights_1 * math_ops.square(placement_values_1 - auc_1)) /
(total_1 - 1. + _EPSILON))
auc_std_err = math_ops.sqrt(
(var_0 / (total_0 + _EPSILON)) + (var_1 / (total_1 + _EPSILON)))
# Calculate asymptotic normal confidence intervals
std_norm_dist = Normal(loc=0., scale=1.)
z_value = std_norm_dist.quantile((1.0 - alpha) / 2.0)
if logit_transformation:
estimate = math_ops.log(auc / (1. - auc + _EPSILON))
std_err = auc_std_err / (auc * (1. - auc + _EPSILON))
transformed_auc_lower = estimate + (z_value * std_err)
transformed_auc_upper = estimate - (z_value * std_err)
def inverse_logit_transformation(x):
exp_negative = math_ops.exp(math_ops.negative(x))
return 1. / (1. + exp_negative + _EPSILON)
auc_lower = inverse_logit_transformation(transformed_auc_lower)
auc_upper = inverse_logit_transformation(transformed_auc_upper)
else:
estimate = auc
std_err = auc_std_err
auc_lower = estimate + (z_value * std_err)
auc_upper = estimate - (z_value * std_err)
## If estimate is 1 or 0, no variance is present so CI = 1
## n.b. This can be misleading, since number obs can just be too low.
lower = array_ops.where(
math_ops.logical_or(
math_ops.equal(auc, array_ops.ones_like(auc)),
math_ops.equal(auc, array_ops.zeros_like(auc))),
auc, auc_lower)
upper = array_ops.where(
math_ops.logical_or(
math_ops.equal(auc, array_ops.ones_like(auc)),
math_ops.equal(auc, array_ops.zeros_like(auc))),
auc, auc_upper)
# If all the labels are the same, AUC isn't well-defined (but raising an
# exception seems excessive) so we return 0, otherwise we finish computing.
trivial_value = array_ops.constant(0.0)
return AucData(*control_flow_ops.cond(
is_valid, lambda: [auc, lower, upper], lambda: [trivial_value]*3))
def auc_with_confidence_intervals(labels,
predictions,
weights=None,
alpha=0.95,
logit_transformation=True,
metrics_collections=(),
updates_collections=(),
name=None):
"""Computes the AUC and asymptotic normally distributed confidence interval.
USAGE NOTE: this approach requires storing all of the predictions and labels
for a single evaluation in memory, so it may not be usable when the evaluation
batch size and/or the number of evaluation steps is very large.
Computes the area under the ROC curve and its confidence interval using
placement values. This has the advantage of being resilient to the
distribution of predictions by aggregating across batches, accumulating labels
and predictions and performing the final calculation using all of the
concatenated values.
Args:
labels: A `Tensor` of ground truth labels with the same shape as `labels`
and with values of 0 or 1 whose values are castable to `int64`.
predictions: A `Tensor` of predictions whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`.
alpha: Confidence interval level desired.
logit_transformation: A boolean value indicating whether the estimate should
be logit transformed prior to calculating the confidence interval. Doing
so enforces the restriction that the AUC should never be outside the
interval [0,1].
metrics_collections: An optional iterable of collections that `auc` should
be added to.
updates_collections: An optional iterable of collections that `update_op`
should be added to.
name: An optional name for the variable_scope that contains the metric
variables.
Returns:
auc: A 1-D `Tensor` containing the current area-under-curve, lower, and
upper confidence interval values.
update_op: An operation that concatenates the input labels and predictions
to the accumulated values.
Raises:
ValueError: If `labels`, `predictions`, and `weights` have mismatched shapes
or if `alpha` isn't in the range (0,1).
"""
if not (alpha > 0 and alpha < 1):
raise ValueError('alpha must be between 0 and 1; currently %.02f' % alpha)
if weights is None:
weights = array_ops.ones_like(predictions)
with variable_scope.variable_scope(
name,
default_name='auc_with_confidence_intervals',
values=[labels, predictions, weights]):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions,
labels=labels,
weights=weights)
total_weight = math_ops.reduce_sum(weights)
weights = array_ops.reshape(weights, [-1])
predictions = array_ops.reshape(
math_ops.cast(predictions, dtypes.float64), [-1])
labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
with ops.control_dependencies([
check_ops.assert_greater_equal(
labels,
array_ops.zeros_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is <0'),
check_ops.assert_less_equal(
labels,
array_ops.ones_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is >1'),
]):
preds_accum, update_preds = streaming_concat(
predictions, name='concat_preds')
labels_accum, update_labels = streaming_concat(labels,
name='concat_labels')
weights_accum, update_weights = streaming_concat(
weights, name='concat_weights')
update_op_for_valid_case = control_flow_ops.group(
update_labels, update_preds, update_weights)
# Only perform updates if this case is valid.
all_labels_positive_or_0 = math_ops.logical_and(
math_ops.equal(math_ops.reduce_min(labels), 0),
math_ops.equal(math_ops.reduce_max(labels), 1))
sums_of_weights_at_least_1 = math_ops.greater_equal(total_weight, 1.0)
is_valid = math_ops.logical_and(all_labels_positive_or_0,
sums_of_weights_at_least_1)
update_op = control_flow_ops.cond(
sums_of_weights_at_least_1,
lambda: update_op_for_valid_case, control_flow_ops.no_op)
auc = _compute_placement_auc(
labels_accum,
preds_accum,
weights_accum,
alpha=alpha,
logit_transformation=logit_transformation,
is_valid=is_valid)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, auc)
return auc, update_op
def precision_recall_at_equal_thresholds(labels,
predictions,
weights=None,
num_thresholds=None,
use_locking=None,
name=None):
"""A helper method for creating metrics related to precision-recall curves.
These values are true positives, false negatives, true negatives, false
positives, precision, and recall. This function returns a data structure that
contains ops within it.
Unlike _streaming_confusion_matrix_at_thresholds (which exhibits O(T * N)
space and run time), this op exhibits O(T + N) space and run time, where T is
the number of thresholds and N is the size of the predictions tensor. Hence,
it may be advantageous to use this function when `predictions` is big.
For instance, prefer this method for per-pixel classification tasks, for which
the predictions tensor may be very large.
Each number in `predictions`, a float in `[0, 1]`, is compared with its
corresponding label in `labels`, and counts as a single tp/fp/tn/fn value at
each threshold. This is then multiplied with `weights` which can be used to
reweight certain values, or more commonly used for masking values.
Args:
labels: A bool `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional; If provided, a `Tensor` that has the same dtype as,
and broadcastable to, `predictions`. This tensor is multiplied by counts.
num_thresholds: Optional; Number of thresholds, evenly distributed in
`[0, 1]`. Should be `>= 2`. Defaults to 201. Note that the number of bins
is 1 less than `num_thresholds`. Using an even `num_thresholds` value
instead of an odd one may yield unfriendly edges for bins.
use_locking: Optional; If True, the op will be protected by a lock.
Otherwise, the behavior is undefined, but may exhibit less contention.
Defaults to True.
name: Optional; variable_scope name. If not provided, the string
'precision_recall_at_equal_threshold' is used.
Returns:
result: A named tuple (See PrecisionRecallData within the implementation of
this function) with properties that are variables of shape
`[num_thresholds]`. The names of the properties are tp, fp, tn, fn,
precision, recall, thresholds. Types are same as that of predictions.
update_op: An op that accumulates values.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
# Disable the invalid-name checker so that we can capitalize the name.
# pylint: disable=invalid-name
PrecisionRecallData = collections_lib.namedtuple(
'PrecisionRecallData',
['tp', 'fp', 'tn', 'fn', 'precision', 'recall', 'thresholds'])
# pylint: enable=invalid-name
if num_thresholds is None:
num_thresholds = 201
if weights is None:
weights = 1.0
if use_locking is None:
use_locking = True
check_ops.assert_type(labels, dtypes.bool)
with variable_scope.variable_scope(name,
'precision_recall_at_equal_thresholds',
(labels, predictions, weights)):
# Make sure that predictions are within [0.0, 1.0].
with ops.control_dependencies([
check_ops.assert_greater_equal(
predictions,
math_ops.cast(0.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]'),
check_ops.assert_less_equal(
predictions,
math_ops.cast(1.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]')
]):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions,
labels=labels,
weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# It's important we aggregate using float64 since we're accumulating a lot
# of 1.0's for the true/false labels, and accumulating to float32 will
# be quite inaccurate even with just a modest amount of values (~20M).
# We use float64 instead of integer primarily since GPU scatter kernel
# only support floats.
agg_dtype = dtypes.float64
f_labels = math_ops.cast(labels, agg_dtype)
weights = math_ops.cast(weights, agg_dtype)
true_labels = f_labels * weights
false_labels = (1.0 - f_labels) * weights
# Flatten predictions and labels.
predictions = array_ops.reshape(predictions, [-1])
true_labels = array_ops.reshape(true_labels, [-1])
false_labels = array_ops.reshape(false_labels, [-1])
# To compute TP/FP/TN/FN, we are measuring a binary classifier
# C(t) = (predictions >= t)
# at each threshold 't'. So we have
# TP(t) = sum( C(t) * true_labels )
# FP(t) = sum( C(t) * false_labels )
#
# But, computing C(t) requires computation for each t. To make it fast,
# observe that C(t) is a cumulative integral, and so if we have
# thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
# where n = num_thresholds, and if we can compute the bucket function
# B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
# then we get
# C(t_i) = sum( B(j), j >= i )
# which is the reversed cumulative sum in tf.cumsum().
#
# We can compute B(i) efficiently by taking advantage of the fact that
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
# Given a prediction value p, we can map it to its bucket by
# bucket_index(p) = floor( p * (num_thresholds - 1) )
# so we can use tf.scatter_add() to update the buckets in one pass.
#
# This implementation exhibits a run time and space complexity of O(T + N),
# where T is the number of thresholds and N is the size of predictions.
# Metrics that rely on _streaming_confusion_matrix_at_thresholds instead
# exhibit a complexity of O(T * N).
# Compute the bucket indices for each prediction value.
bucket_indices = math_ops.cast(
math_ops.floor(predictions * (num_thresholds - 1)), dtypes.int32)
with ops.name_scope('variables'):
tp_buckets_v = metrics_impl.metric_variable(
[num_thresholds], agg_dtype, name='tp_buckets')
fp_buckets_v = metrics_impl.metric_variable(
[num_thresholds], agg_dtype, name='fp_buckets')
with ops.name_scope('update_op'):
update_tp = state_ops.scatter_add(
tp_buckets_v, bucket_indices, true_labels, use_locking=use_locking)
update_fp = state_ops.scatter_add(
fp_buckets_v, bucket_indices, false_labels, use_locking=use_locking)
# Set up the cumulative sums to compute the actual metrics.
tp = math_ops.cumsum(tp_buckets_v, reverse=True, name='tp')
fp = math_ops.cumsum(fp_buckets_v, reverse=True, name='fp')
# fn = sum(true_labels) - tp
# = sum(tp_buckets) - tp
# = tp[0] - tp
# Similarly,
# tn = fp[0] - fp
tn = fp[0] - fp
fn = tp[0] - tp
# We use a minimum to prevent division by 0.
epsilon = ops.convert_to_tensor(1e-7, dtype=agg_dtype)
precision = tp / math_ops.maximum(epsilon, tp + fp)
recall = tp / math_ops.maximum(epsilon, tp + fn)
# Convert all tensors back to predictions' dtype (as per function contract).
out_dtype = predictions.dtype
_convert = lambda tensor: math_ops.cast(tensor, out_dtype)
result = PrecisionRecallData(
tp=_convert(tp),
fp=_convert(fp),
tn=_convert(tn),
fn=_convert(fn),
precision=_convert(precision),
recall=_convert(recall),
thresholds=_convert(math_ops.lin_space(0.0, 1.0, num_thresholds)))
update_op = control_flow_ops.group(update_tp, update_fp)
return result, update_op
def streaming_specificity_at_sensitivity(predictions,
labels,
sensitivity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the specificity at a given sensitivity.
The `streaming_specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
sensitivity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.specificity_at_sensitivity(
sensitivity=sensitivity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sensitivity_at_specificity(predictions,
labels,
specificity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the sensitivity at a given specificity.
The `streaming_sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
specificity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.sensitivity_at_specificity(
specificity=specificity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.precision_at_thresholds. Note that '
'the order of the labels and predictions arguments are switched.')
def streaming_precision_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `streaming_precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.recall_at_thresholds. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_recall_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `streaming_recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fpr values for different `thresholds` on `predictions`.
The `streaming_false_positive_rate_at_thresholds` function creates two
local variables, `false_positives`, `true_negatives`, for various values of
thresholds. `false_positive_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `False` values in `labels`
(`false_positives[i] / (false_positives[i] + true_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_positives` and
`true_negatives` variables that are used in the computation of
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fp', 'tn'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fpr(fp, tn, name):
return math_ops.div(fp, epsilon + fp + tn, name='fpr_' + name)
fpr = compute_fpr(values['fp'], values['tn'], 'value')
update_op = compute_fpr(update_ops['fp'], update_ops['tn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fnr values for different `thresholds` on `predictions`.
The `streaming_false_negative_rate_at_thresholds` function creates two
local variables, `false_negatives`, `true_positives`, for various values of
thresholds. `false_negative_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `True` values in `labels`
(`false_negatives[i] / (false_negatives[i] + true_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_negatives` and
`true_positives` variables that are used in the computation of
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fn', 'tp'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fnr(fn, tp, name):
return math_ops.div(fn, epsilon + fn + tp, name='fnr_' + name)
fnr = compute_fnr(values['fn'], values['tp'], 'value')
update_op = compute_fnr(update_ops['fn'], update_ops['tp'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
@deprecated('2016-11-08', 'Please use `streaming_sparse_recall_at_k`, '
'and reshape labels from [batch_size] to [batch_size, 1].')
def streaming_recall_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall@k of the predictions with respect to dense labels.
The `streaming_recall_at_k` function creates two local variables, `total` and
`count`, that are used to compute the recall@k frequency. This frequency is
ultimately returned as `recall_at_<k>`: an idempotent operation that simply
divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, an `in_top_k` operation computes a `Tensor` with
shape [batch_size] whose elements indicate whether or not the corresponding
label is in the top `k` `predictions`. Then `update_op` increments `total`
with the reduced sum of `weights` where `in_top_k` is `True`, and it
increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A float `Tensor` of dimension [batch_size, num_classes].
labels: A `Tensor` of dimension [batch_size] whose type is in `int32`,
`int64`.
k: The number of top elements to look at for computing recall.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall_at_k`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
recall_at_k: A `Tensor` representing the recall@k, the fraction of labels
which fall into the top `k` predictions.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `recall_at_k`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
in_top_k = math_ops.to_float(nn.in_top_k(predictions, labels, k))
return streaming_mean(in_top_k, weights, metrics_collections,
updates_collections, name or _at_k_name('recall', k))
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_recall_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we'll calculate recall as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
actual positives (the full `labels` row).
If `class_id` is specified, we calculate recall by considering only the rows
in the batch for which `class_id` is in `labels`, and computing the
fraction of them for which `class_id` is in the corresponding row in
`labels`.
`streaming_sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`.
Values should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.recall_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
positives (all top `k` `predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.precision_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_top_k(top_k_predictions,
labels,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of top-k predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of
true positives (i.e., correct predictions, items in `top_k_predictions`
that are found in the corresponding row in `labels`) to positives (all
`top_k_predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_top_k` creates two local variables,
`true_positive_at_k` and `false_positive_at_k`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_k`: an idempotent operation that simply divides
`true_positive_at_k` by total (`true_positive_at_k` + `false_positive_at_k`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_k`. Internally, set operations applied to `top_k_predictions`
and `labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_k` and
`false_positive_at_k` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`top_k_predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
ValueError: If `top_k_predictions` has rank < 2.
"""
default_name = _at_k_name('precision', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.precision_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def sparse_recall_at_top_k(labels,
top_k_predictions,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of top-k predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_top_k` creates two local variables, `true_positive_at_<k>`
and `false_negative_at_<k>`, that are used to compute the recall_at_k
frequency. This frequency is ultimately returned as `recall_at_<k>`: an
idempotent operation that simply divides `true_positive_at_<k>` by total
(`true_positive_at_<k>` + `false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Set operations applied to `top_k` and `labels` calculate the
true positives and false negatives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_negative_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`top_k_predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range always count towards `false_negative_at_<k>`.
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
default_name = _at_k_name('recall', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.recall_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def _compute_recall_at_precision(tp, fp, fn, precision, name,
strict_mode=False):
"""Helper function to compute recall at a given `precision`.
Args:
tp: The number of true positives.
fp: The number of false positives.
fn: The number of false negatives.
precision: The precision for which the recall will be calculated.
name: An optional variable_scope name.
strict_mode: If true and there exists a threshold where the precision is
no smaller than the target precision, return the corresponding recall at
the threshold. Otherwise, return 0. If false, find the threshold where the
precision is closest to the target precision and return the recall at the
threshold.
Returns:
The recall at a given `precision`.
"""
precisions = math_ops.div(tp, tp + fp + _EPSILON)
if not strict_mode:
tf_index = math_ops.argmin(
math_ops.abs(precisions - precision), 0, output_type=dtypes.int32)
# Now, we have the implicit threshold, so compute the recall:
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,
name)
else:
# We aim to find the threshold where the precision is minimum but no smaller
# than the target precision.
# The rationale:
# 1. Compute the difference between precisions (by different thresholds) and
# the target precision.
# 2. Take the reciprocal of the values by the above step. The intention is
# to make the positive values rank before negative values and also the
# smaller positives rank before larger positives.
tf_index = math_ops.argmax(
math_ops.div(1.0, precisions - precision + _EPSILON),
0,
output_type=dtypes.int32)
def _return_good_recall():
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,
name)
return control_flow_ops.cond(precisions[tf_index] >= precision,
_return_good_recall, lambda: .0)
def recall_at_precision(labels,
predictions,
precision,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None,
strict_mode=False):
"""Computes `recall` at `precision`.
The `recall_at_precision` function creates four local variables,
`tp` (true positives), `fp` (false positives) and `fn` (false negatives)
that are used to compute the `recall` at the given `precision` value. The
threshold for the given `precision` value is computed and used to evaluate the
corresponding `recall`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall`. `update_op` increments the `tp`, `fp` and `fn` counts with the
weight of each case found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
precision: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
`precision`.
metrics_collections: An optional list of collections that `recall`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
strict_mode: If true and there exists a threshold where the precision is
above the target precision, return the corresponding recall at the
threshold. Otherwise, return 0. If false, find the threshold where the
precision is closest to the target precision and return the recall at the
threshold.
Returns:
recall: A scalar `Tensor` representing the recall at the given
`precision` value.
update_op: An operation that increments the `tp`, `fp` and `fn`
variables appropriately and whose value matches `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`precision` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if not 0 <= precision <= 1:
raise ValueError('`precision` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'recall_at_precision',
(predictions, labels, weights)):
thresholds = [
i * 1.0 / (num_thresholds - 1) for i in range(1, num_thresholds - 1)
]
thresholds = [0.0 - _EPSILON] + thresholds + [1.0 + _EPSILON]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights)
recall = _compute_recall_at_precision(values['tp'], values['fp'],
values['fn'], precision, 'value',
strict_mode)
update_op = _compute_recall_at_precision(update_ops['tp'], update_ops['fp'],
update_ops['fn'], precision,
'update_op', strict_mode)
if metrics_collections:
ops.add_to_collections(metrics_collections, recall)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return recall, update_op
def precision_at_recall(labels,
predictions,
target_recall,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision at a given recall.
This function creates variables to track the true positives, false positives,
true negatives, and false negatives at a set of thresholds. Among those
thresholds where recall is at least `target_recall`, precision is computed
at the threshold where recall is closest to `target_recall`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
precision at `target_recall`. `update_op` increments the counts of true
positives, false positives, true negatives, and false negatives with the
weight of each case found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about precision and recall, see
http://en.wikipedia.org/wiki/Precision_and_recall
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
target_recall: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
recall.
metrics_collections: An optional list of collections to which `precision`
should be added.
updates_collections: An optional list of collections to which `update_op`
should be added.
name: An optional variable_scope name.
Returns:
precision: A scalar `Tensor` representing the precision at the given
`target_recall` value.
update_op: An operation that increments the variables for tracking the
true positives, false positives, true negatives, and false negatives and
whose value matches `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`target_recall` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision_at_recall is not '
'supported when eager execution is enabled.')
if target_recall < 0 or target_recall > 1:
raise ValueError('`target_recall` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'precision_at_recall',
(predictions, labels, weights)):
kepsilon = 1e-7 # Used to avoid division by zero.
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights)
def compute_precision_at_recall(tp, fp, fn, name):
"""Computes the precision at a given recall.
Args:
tp: True positives.
fp: False positives.
fn: False negatives.
name: A name for the operation.
Returns:
The precision at the desired recall.
"""
recalls = math_ops.div(tp, tp + fn + kepsilon)
# Because recall is monotone decreasing as a function of the threshold,
# the smallest recall exceeding target_recall occurs at the largest
# threshold where recall >= target_recall.
admissible_recalls = math_ops.cast(
math_ops.greater_equal(recalls, target_recall), dtypes.int64)
tf_index = math_ops.reduce_sum(admissible_recalls) - 1
# Now we have the threshold at which to compute precision:
return math_ops.div(tp[tf_index] + kepsilon,
tp[tf_index] + fp[tf_index] + kepsilon,
name)
precision_value = compute_precision_at_recall(
values['tp'], values['fp'], values['fn'], 'value')
update_op = compute_precision_at_recall(
update_ops['tp'], update_ops['fp'], update_ops['fn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, precision_value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return precision_value, update_op
def streaming_sparse_average_precision_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
See `sparse_average_precision_at_k` for details on formula. `weights` are
applied to the result of `sparse_average_precision_at_k`
`streaming_sparse_average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
return metrics.average_precision_at_k(
k=k,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sparse_average_precision_at_top_k(top_k_predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`streaming_sparse_average_precision_at_top_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate
the true positives and false positives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_positive_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension must be set and contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`. Values should be in range
[0, num_classes).
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `top_k_predictions`.
Values should be in range [0, num_classes).
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
Raises:
ValueError: if the last dimension of top_k_predictions is not set.
"""
return metrics_impl._streaming_sparse_average_precision_at_top_k( # pylint: disable=protected-access
predictions_idx=top_k_predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.mean_absolute_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_mean_absolute_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `streaming_mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_absolute_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_relative_error(predictions,
labels,
normalizer,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `streaming_mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_relative_error(
normalizer=normalizer,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.mean_squared_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `streaming_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(
None,
'Please switch to tf.metrics.root_mean_squared_error. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_root_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `streaming_root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.root_mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_covariance(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the unbiased sample covariance between `predictions` and `labels`.
The `streaming_covariance` function creates four local variables,
`comoment`, `mean_prediction`, `mean_label`, and `count`, which are used to
compute the sample covariance between predictions and labels across multiple
batches of data. The covariance is ultimately returned as an idempotent
operation that simply divides `comoment` by `count` - 1. We use `count` - 1
in order to get an unbiased estimate.
The algorithm used for this online computation is described in
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance.
Specifically, the formula used to combine two sample comoments is
`C_AB = C_A + C_B + (E[x_A] - E[x_B]) * (E[y_A] - E[y_B]) * n_A * n_B / n_AB`
The comoment for a single batch of data is simply
`sum((x - E[x]) * (y - E[y]))`, optionally weighted.
If `weights` is not None, then it is used to compute weighted comoments,
means, and count. NOTE: these weights are treated as "frequency weights", as
opposed to "reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
To facilitate the computation of covariance across multiple batches of data,
the function creates an `update_op` operation, which updates underlying
variables and returns the updated covariance.
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
covariance: A `Tensor` representing the current unbiased sample covariance,
`comoment` / (`count` - 1).
update_op: An operation that updates the local variables appropriately.
Raises:
ValueError: If labels and predictions are of different sizes or if either
`metrics_collections` or `updates_collections` are not a list or tuple.
"""
with variable_scope.variable_scope(name, 'covariance',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
mean_prediction = metrics_impl.metric_variable(
[], dtypes.float32, name='mean_prediction')
mean_label = metrics_impl.metric_variable(
[], dtypes.float32, name='mean_label')
comoment = metrics_impl.metric_variable( # C_A in update equation
[], dtypes.float32, name='comoment')
if weights is None:
batch_count = math_ops.to_float(array_ops.size(labels)) # n_B in eqn
weighted_predictions = predictions
weighted_labels = labels
else:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
batch_count = math_ops.reduce_sum(weights) # n_B in eqn
weighted_predictions = math_ops.multiply(predictions, weights)
weighted_labels = math_ops.multiply(labels, weights)
update_count = state_ops.assign_add(count_, batch_count) # n_AB in eqn
prev_count = update_count - batch_count # n_A in update equation
# We update the means by Delta=Error*BatchCount/(BatchCount+PrevCount)
# batch_mean_prediction is E[x_B] in the update equation
batch_mean_prediction = _safe_div(
math_ops.reduce_sum(weighted_predictions), batch_count,
'batch_mean_prediction')
delta_mean_prediction = _safe_div(
(batch_mean_prediction - mean_prediction) * batch_count, update_count,
'delta_mean_prediction')
update_mean_prediction = state_ops.assign_add(mean_prediction,
delta_mean_prediction)
# prev_mean_prediction is E[x_A] in the update equation
prev_mean_prediction = update_mean_prediction - delta_mean_prediction
# batch_mean_label is E[y_B] in the update equation
batch_mean_label = _safe_div(
math_ops.reduce_sum(weighted_labels), batch_count, 'batch_mean_label')
delta_mean_label = _safe_div((batch_mean_label - mean_label) * batch_count,
update_count, 'delta_mean_label')
update_mean_label = state_ops.assign_add(mean_label, delta_mean_label)
# prev_mean_label is E[y_A] in the update equation
prev_mean_label = update_mean_label - delta_mean_label
unweighted_batch_coresiduals = ((predictions - batch_mean_prediction) *
(labels - batch_mean_label))
# batch_comoment is C_B in the update equation
if weights is None:
batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals)
else:
batch_comoment = math_ops.reduce_sum(
unweighted_batch_coresiduals * weights)
# View delta_comoment as = C_AB - C_A in the update equation above.
# Since C_A is stored in a var, by how much do we need to increment that var
# to make the var = C_AB?
delta_comoment = (
batch_comoment + (prev_mean_prediction - batch_mean_prediction) *
(prev_mean_label - batch_mean_label) *
(prev_count * batch_count / update_count))
update_comoment = state_ops.assign_add(comoment, delta_comoment)
covariance = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='covariance')
with ops.control_dependencies([update_comoment]):
update_op = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, covariance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return covariance, update_op
def streaming_pearson_correlation(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes Pearson correlation coefficient between `predictions`, `labels`.
The `streaming_pearson_correlation` function delegates to
`streaming_covariance` the tracking of three [co]variances:
- `streaming_covariance(predictions, labels)`, i.e. covariance
- `streaming_covariance(predictions, predictions)`, i.e. variance
- `streaming_covariance(labels, labels)`, i.e. variance
The product-moment correlation ultimately returned is an idempotent operation
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`. To
facilitate correlation computation across multiple batches, the function
groups the `update_op`s of the underlying streaming_covariance and returns an
`update_op`.
If `weights` is not None, then it is used to compute a weighted correlation.
NOTE: these weights are treated as "frequency weights", as opposed to
"reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as predictions.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
pearson_r: A `Tensor` representing the current Pearson product-moment
correlation coefficient, the value of
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`.
update_op: An operation that updates the underlying variables appropriately.
Raises:
ValueError: If `labels` and `predictions` are of different sizes, or if
`weights` is the wrong size, or if either `metrics_collections` or
`updates_collections` are not a `list` or `tuple`.
"""
with variable_scope.variable_scope(name, 'pearson_r',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# Broadcast weights here to avoid duplicate broadcasting in each call to
# `streaming_covariance`.
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
cov, update_cov = streaming_covariance(
predictions, labels, weights=weights, name='covariance')
var_predictions, update_var_predictions = streaming_covariance(
predictions, predictions, weights=weights, name='variance_predictions')
var_labels, update_var_labels = streaming_covariance(
labels, labels, weights=weights, name='variance_labels')
pearson_r = math_ops.truediv(
cov,
math_ops.multiply(
math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)),
name='pearson_r')
update_op = math_ops.truediv(
update_cov,
math_ops.multiply(
math_ops.sqrt(update_var_predictions),
math_ops.sqrt(update_var_labels)),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, pearson_r)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return pearson_r, update_op
# TODO(nsilberman): add a 'normalized' flag so that the user can request
# normalization if the inputs are not normalized.
def streaming_mean_cosine_distance(predictions,
labels,
dim,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `streaming_mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of the same shape as `labels`.
labels: A `Tensor` of arbitrary shape.
dim: The dimension along which the cosine distance is computed.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`,
and whose dimension `dim` is 1.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum(
radial_diffs, reduction_indices=[
dim,
], keepdims=True)
mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None,
name or 'mean_cosine_distance')
mean_distance = math_ops.subtract(1.0, mean_distance)
update_op = math_ops.subtract(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
def streaming_percentage_less(values,
threshold,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `streaming_percentage_less` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.percentage_below(
values=values,
threshold=threshold,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_iou(predictions,
labels,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened, if its rank > 1.
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened, if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_iou(
num_classes=num_classes,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def _next_array_size(required_size, growth_factor=1.5):
"""Calculate the next size for reallocating a dynamic array.
Args:
required_size: number or tf.Tensor specifying required array capacity.
growth_factor: optional number or tf.Tensor specifying the growth factor
between subsequent allocations.
Returns:
tf.Tensor with dtype=int32 giving the next array size.
"""
exponent = math_ops.ceil(
math_ops.log(math_ops.cast(required_size, dtypes.float32)) / math_ops.log(
math_ops.cast(growth_factor, dtypes.float32)))
return math_ops.cast(math_ops.ceil(growth_factor**exponent), dtypes.int32)
def streaming_concat(values,
axis=0,
max_size=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Concatenate values along an axis across batches.
The function `streaming_concat` creates two local variables, `array` and
`size`, that are used to store concatenated values. Internally, `array` is
used as storage for a dynamic array (if `maxsize` is `None`), which ensures
that updates can be run in amortized constant time.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that appends the values of a tensor and returns the
length of the concatenated axis.
This op allows for evaluating metrics that cannot be updated incrementally
using the same framework as other streaming metrics.
Args:
values: `Tensor` to concatenate. Rank and the shape along all axes other
than the axis to concatenate along must be statically known.
axis: optional integer axis to concatenate along.
max_size: optional integer maximum size of `value` along the given axis.
Once the maximum size is reached, further updates are no-ops. By default,
there is no maximum size: the array is resized as necessary.
metrics_collections: An optional list of collections that `value`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
value: A `Tensor` representing the concatenated values.
update_op: An operation that concatenates the next values.
Raises:
ValueError: if `values` does not have a statically known rank, `axis` is
not in the valid range or the size of `values` is not statically known
along any axis other than `axis`.
"""
with variable_scope.variable_scope(name, 'streaming_concat', (values,)):
# pylint: disable=invalid-slice-index
values_shape = values.get_shape()
if values_shape.dims is None:
raise ValueError('`values` must have known statically known rank')
ndim = len(values_shape)
if axis < 0:
axis += ndim
if not 0 <= axis < ndim:
raise ValueError('axis = %r not in [0, %r)' % (axis, ndim))
fixed_shape = [dim.value for n, dim in enumerate(values_shape) if n != axis]
if any(value is None for value in fixed_shape):
raise ValueError('all dimensions of `values` other than the dimension to '
'concatenate along must have statically known size')
# We move `axis` to the front of the internal array so assign ops can be
# applied to contiguous slices
init_size = 0 if max_size is None else max_size
init_shape = [init_size] + fixed_shape
array = metrics_impl.metric_variable(
init_shape, values.dtype, validate_shape=False, name='array')
size = metrics_impl.metric_variable([], dtypes.int32, name='size')
perm = [0 if n == axis else n + 1 if n < axis else n for n in range(ndim)]
valid_array = array[:size]
valid_array.set_shape([None] + fixed_shape)
value = array_ops.transpose(valid_array, perm, name='concat')
values_size = array_ops.shape(values)[axis]
if max_size is None:
batch_size = values_size
else:
batch_size = math_ops.minimum(values_size, max_size - size)
perm = [axis] + [n for n in range(ndim) if n != axis]
batch_values = array_ops.transpose(values, perm)[:batch_size]
def reallocate():
next_size = _next_array_size(new_size)
next_shape = array_ops.stack([next_size] + fixed_shape)
new_value = array_ops.zeros(next_shape, dtype=values.dtype)
old_value = array.value()
assign_op = state_ops.assign(array, new_value, validate_shape=False)
with ops.control_dependencies([assign_op]):
copy_op = array[:size].assign(old_value[:size])
# return value needs to be the same dtype as no_op() for cond
with ops.control_dependencies([copy_op]):
return control_flow_ops.no_op()
new_size = size + batch_size
array_size = array_ops.shape_internal(array, optimize=False)[0]
maybe_reallocate_op = control_flow_ops.cond(
new_size > array_size, reallocate, control_flow_ops.no_op)
with ops.control_dependencies([maybe_reallocate_op]):
append_values_op = array[size:new_size].assign(batch_values)
with ops.control_dependencies([append_values_op]):
update_op = size.assign(new_size)
if metrics_collections:
ops.add_to_collections(metrics_collections, value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value, update_op
# pylint: enable=invalid-slice-index
def aggregate_metrics(*value_update_tuples):
"""Aggregates the metric value tensors and update ops into two lists.
Args:
*value_update_tuples: a variable number of tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A list of value `Tensor` objects and a list of update ops.
Raises:
ValueError: if `value_update_tuples` is empty.
"""
if not value_update_tuples:
raise ValueError('Expected at least one value_tensor/update_op pair')
value_ops, update_ops = zip(*value_update_tuples)
return list(value_ops), list(update_ops)
def aggregate_metric_map(names_to_tuples):
"""Aggregates the metric names to tuple dictionary.
This function is useful for pairing metric names with their associated value
and update ops when the list of metrics is long. For example:
```python
metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({
'Mean Absolute Error': new_slim.metrics.streaming_mean_absolute_error(
predictions, labels, weights),
'Mean Relative Error': new_slim.metrics.streaming_mean_relative_error(
predictions, labels, labels, weights),
'RMSE Linear': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
'RMSE Log': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
})
```
Args:
names_to_tuples: a map of metric names to tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A dictionary from metric names to value ops and a dictionary from metric
names to update ops.
"""
metric_names = names_to_tuples.keys()
value_ops, update_ops = zip(*names_to_tuples.values())
return dict(zip(metric_names, value_ops)), dict(zip(metric_names, update_ops))
def count(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the number of examples, or sum of `weights`.
This metric keeps track of the denominator in `tf.metrics.mean`.
When evaluating some metric (e.g. mean) on one or more subsets of the data,
this auxiliary metric is useful for keeping track of how many examples there
are in each subset.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions. Only it's shape is used.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
count: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the metric from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.contrib.metrics.count is not supported when eager '
'execution is enabled.')
with variable_scope.variable_scope(name, 'count', (values, weights)):
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
else:
values = math_ops.to_float(values)
values, _, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=values,
labels=None,
weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
num_values = math_ops.reduce_sum(weights)
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count_, num_values)
count_ = metrics_impl._aggregate_variable(count_, metrics_collections) # pylint: disable=protected-access
if updates_collections:
ops.add_to_collections(updates_collections, update_count_op)
return count_, update_count_op
def cohen_kappa(labels,
predictions_idx,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates Cohen's kappa.
[Cohen's kappa](https://en.wikipedia.org/wiki/Cohen's_kappa) is a statistic
that measures inter-annotator agreement.
The `cohen_kappa` function calculates the confusion matrix, and creates three
local variables to compute the Cohen's kappa: `po`, `pe_row`, and `pe_col`,
which refer to the diagonal part, rows and columns totals of the confusion
matrix, respectively. This value is ultimately returned as `kappa`, an
idempotent operation that is calculated by
pe = (pe_row * pe_col) / N
k = (sum(po) - sum(pe)) / (N - sum(pe))
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`kappa`. `update_op` weights each prediction by the corresponding value in
`weights`.
Class labels are expected to start at 0. E.g., if `num_classes`
was three, then the possible labels would be [0, 1, 2].
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
NOTE: Equivalent to `sklearn.metrics.cohen_kappa_score`, but the method
doesn't support weighted matrix yet.
Args:
labels: 1-D `Tensor` of real labels for the classification task. Must be
one of the following types: int16, int32, int64.
predictions_idx: 1-D `Tensor` of predicted class indices for a given
classification. Must have the same type as `labels`.
num_classes: The possible number of labels.
weights: Optional `Tensor` whose shape matches `predictions`.
metrics_collections: An optional list of collections that `kappa` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
kappa: Scalar float `Tensor` representing the current Cohen's kappa.
update_op: `Operation` that increments `po`, `pe_row` and `pe_col`
variables appropriately and whose value matches `kappa`.
Raises:
ValueError: If `num_classes` is less than 2, or `predictions` and `labels`
have mismatched shapes, or if `weights` is not `None` and its shape
doesn't match `predictions`, or if either `metrics_collections` or
`updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.contrib.metrics.cohen_kappa is not supported '
'when eager execution is enabled.')
if num_classes < 2:
raise ValueError('`num_classes` must be >= 2.'
'Found: {}'.format(num_classes))
with variable_scope.variable_scope(name, 'cohen_kappa',
(labels, predictions_idx, weights)):
# Convert 2-dim (num, 1) to 1-dim (num,)
labels.get_shape().with_rank_at_most(2)
if labels.get_shape().ndims == 2:
labels = array_ops.squeeze(labels, axis=[-1])
predictions_idx, labels, weights = (
metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions_idx,
labels=labels,
weights=weights))
predictions_idx.get_shape().assert_is_compatible_with(labels.get_shape())
stat_dtype = (
dtypes.int64
if weights is None or weights.dtype.is_integer else dtypes.float32)
po = metrics_impl.metric_variable((num_classes,), stat_dtype, name='po')
pe_row = metrics_impl.metric_variable(
(num_classes,), stat_dtype, name='pe_row')
pe_col = metrics_impl.metric_variable(
(num_classes,), stat_dtype, name='pe_col')
# Table of the counts of agreement:
counts_in_table = confusion_matrix.confusion_matrix(
labels,
predictions_idx,
num_classes=num_classes,
weights=weights,
dtype=stat_dtype,
name='counts_in_table')
po_t = array_ops.diag_part(counts_in_table)
pe_row_t = math_ops.reduce_sum(counts_in_table, axis=0)
pe_col_t = math_ops.reduce_sum(counts_in_table, axis=1)
update_po = state_ops.assign_add(po, po_t)
update_pe_row = state_ops.assign_add(pe_row, pe_row_t)
update_pe_col = state_ops.assign_add(pe_col, pe_col_t)
def _calculate_k(po, pe_row, pe_col, name):
po_sum = math_ops.reduce_sum(po)
total = math_ops.reduce_sum(pe_row)
pe_sum = math_ops.reduce_sum(
metrics_impl._safe_div( # pylint: disable=protected-access
pe_row * pe_col, total, None))
po_sum, pe_sum, total = (math_ops.to_double(po_sum),
math_ops.to_double(pe_sum),
math_ops.to_double(total))
# kappa = (po - pe) / (N - pe)
k = metrics_impl._safe_scalar_div( # pylint: disable=protected-access
po_sum - pe_sum,
total - pe_sum,
name=name)
return k
kappa = _calculate_k(po, pe_row, pe_col, name='value')
update_op = _calculate_k(
update_po, update_pe_row, update_pe_col, name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, kappa)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return kappa, update_op
__all__ = [
'auc_with_confidence_intervals',
'aggregate_metric_map',
'aggregate_metrics',
'cohen_kappa',
'count',
'precision_recall_at_equal_thresholds',
'recall_at_precision',
'sparse_recall_at_top_k',
'streaming_accuracy',
'streaming_auc',
'streaming_curve_points',
'streaming_dynamic_auc',
'streaming_false_negative_rate',
'streaming_false_negative_rate_at_thresholds',
'streaming_false_negatives',
'streaming_false_negatives_at_thresholds',
'streaming_false_positive_rate',
'streaming_false_positive_rate_at_thresholds',
'streaming_false_positives',
'streaming_false_positives_at_thresholds',
'streaming_mean',
'streaming_mean_absolute_error',
'streaming_mean_cosine_distance',
'streaming_mean_iou',
'streaming_mean_relative_error',
'streaming_mean_squared_error',
'streaming_mean_tensor',
'streaming_percentage_less',
'streaming_precision',
'streaming_precision_at_thresholds',
'streaming_recall',
'streaming_recall_at_k',
'streaming_recall_at_thresholds',
'streaming_root_mean_squared_error',
'streaming_sensitivity_at_specificity',
'streaming_sparse_average_precision_at_k',
'streaming_sparse_average_precision_at_top_k',
'streaming_sparse_precision_at_k',
'streaming_sparse_precision_at_top_k',
'streaming_sparse_recall_at_k',
'streaming_specificity_at_sensitivity',
'streaming_true_negatives',
'streaming_true_negatives_at_thresholds',
'streaming_true_positives',
'streaming_true_positives_at_thresholds',
]
| apache-2.0 |
GeosoftInc/gxpy | setup.py | 1 | 4120 | # coding = utf-8
import json
import sys
import shutil
from os import path, remove, environ
from glob import glob
from setuptools import setup
with open('geosoft/pkg_info.json') as fp:
_info = json.load(fp)
def read(fname):
return open(path.join(path.dirname(__file__), fname)).read()
version_tag = "{}{}".format(_info['version'], _info['pre-release'])
if _info['pre-release'] == '':
dev_status_classifier = "Development Status :: 5 - Production/Stable"
else:
dev_status_classifier = "Development Status :: 4 - Beta"
for f in glob("geosoft/*.pyd"):
try:
remove(f)
except PermissionError as e:
raise Exception("An application is using a file we need to change: \n {}".format(str(e)))
dependencies = ['numpy', 'pandas', 'requests']
if 'bdist_wheel' in sys.argv:
# Have to specify python-tag to specify which module
for arg in sys.argv:
if arg.startswith('--python-tag='):
pythontag = arg[13:]
if pythontag == "cp36":
shutil.copyfile('gxapi_cy.cp36-win_amd64.pyd', 'geosoft/gxapi/gxapi_cy.pyd')
shutil.copyfile('gxapi_cy_extend.cp36-win_amd64.pyd', 'geosoft/gxapi/gxapi_cy_extend.pyd')
elif pythontag == "cp37":
shutil.copyfile('gxapi_cy.cp37-win_amd64.pyd', 'geosoft/gxapi/gxapi_cy.pyd')
shutil.copyfile('gxapi_cy_extend.cp37-win_amd64.pyd', 'geosoft/gxapi/gxapi_cy_extend.pyd')
elif pythontag == "cp38":
shutil.copyfile('gxapi_cy.cp38-win_amd64.pyd', 'geosoft/gxapi/gxapi_cy.pyd')
shutil.copyfile('gxapi_cy_extend.cp38-win_amd64.pyd', 'geosoft/gxapi/gxapi_cy_extend.pyd')
elif pythontag == "cp39":
shutil.copyfile('gxapi_cy.cp39-win_amd64.pyd', 'geosoft/gxapi/gxapi_cy.pyd')
shutil.copyfile('gxapi_cy_extend.cp39-win_amd64.pyd', 'geosoft/gxapi/gxapi_cy_extend.pyd')
break
else:
# Copy the version we are building for
py_ver_major_minor = sys.version_info[:2]
if py_ver_major_minor == (3, 6):
shutil.copyfile('gxapi_cy.cp36-win_amd64.pyd', 'geosoft/gxapi/gxapi_cy.pyd')
shutil.copyfile('gxapi_cy_extend.cp36-win_amd64.pyd', 'geosoft/gxapi/gxapi_cy_extend.pyd')
elif py_ver_major_minor == (3, 7):
shutil.copyfile('gxapi_cy.cp37-win_amd64.pyd', 'geosoft/gxapi/gxapi_cy.pyd')
shutil.copyfile('gxapi_cy_extend.cp37-win_amd64.pyd', 'geosoft/gxapi/gxapi_cy_extend.pyd')
elif py_ver_major_minor == (3, 8):
shutil.copyfile('gxapi_cy.cp38-win_amd64.pyd', 'geosoft/gxapi/gxapi_cy.pyd')
shutil.copyfile('gxapi_cy_extend.cp38-win_amd64.pyd', 'geosoft/gxapi/gxapi_cy_extend.pyd')
elif py_ver_major_minor == (3, 9):
shutil.copyfile('gxapi_cy.cp39-win_amd64.pyd', 'geosoft/gxapi/gxapi_cy.pyd')
shutil.copyfile('gxapi_cy_extend.cp39-win_amd64.pyd', 'geosoft/gxapi/gxapi_cy_extend.pyd')
packages=[
'geosoft',
'geosoft.gxapi',
'geosoft.gxpy',
'geosoft.gxpy._jdcal',
'geosoft.gxpy._xmltodict',
'geosoft.gxpy.user_input'
]
package_data={
'geosoft': ['*.json'],
'geosoft.gxapi': ['gxapi_cy.pyd', 'gxapi_cy_extend.pyd', '*.dll'],
'geosoft.gxpy._jdcal': ['*.txt', '*.rst'],
'geosoft.gxpy._xmltodict': ['LICENSE', '*.md'],
'geosoft.gxpy.user_input': ['*.gx']
}
setup(
name='geosoft',
version=version_tag,
description='Geosoft GX API module for Python',
long_description=read('README.md'),
author='Geosoft Inc.',
author_email='[email protected]',
platforms=["win_amd64"],
url='https://github.com/GeosoftInc/gxpy',
license='BSD',
install_requires=dependencies,
packages=packages,
package_data=package_data,
test_suite="geosoft.gxpy.tests",
classifiers=[
dev_status_classifier,
"Topic :: Scientific/Engineering",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 3 :: Only",
],
)
| bsd-2-clause |
JeanKossaifi/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 251 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
cle1109/scot | scot/eegtopo/topoplot.py | 4 | 7357 | # Released under The MIT License (MIT)
# http://opensource.org/licenses/MIT
# Copyright (c) 2013 Martin Billinger
from __future__ import division
import numpy as np
from scipy.interpolate import interp1d
from .projections import (array_project_radial_to3d,
array_project_radial_to2d)
from .geo_euclidean import Vector
from scipy.spatial import ConvexHull
class Topoplot(object):
""" Creates 2D scalp maps. """
def __init__(self, m=4, num_lterms=10, headcolor=[0, 0, 0, 1], clipping='head', electrodescale=1, interpolationrange=np.pi * 3 / 4, head_radius=np.pi * 3 / 4):
import matplotlib.path as path
self.interprange = interpolationrange
self.head_radius = head_radius
self.nose_angle = 15
self.nose_length = 0.12
self.headcolor = headcolor
self.clipping = clipping
self.electrodescale = np.asarray(electrodescale)
verts = np.array([
(1, 0),
(1, 0.5535714285714286), (0.5535714285714286, 1), (0, 1),
(-0.5535714285714286, 1), (-1, 0.5535714285714286), (-1, 0),
(-1, -0.5535714285714286), (-0.5535714285714286, -1), (0, -1),
(0.5535714285714286, -1), (1, -0.5535714285714286), (1, 0),
]) * self.head_radius
codes = [path.Path.MOVETO,
path.Path.CURVE4, path.Path.CURVE4, path.Path.CURVE4,
path.Path.CURVE4, path.Path.CURVE4, path.Path.CURVE4,
path.Path.CURVE4, path.Path.CURVE4, path.Path.CURVE4,
path.Path.CURVE4, path.Path.CURVE4, path.Path.CURVE4,
]
self.path_head = path.Path(verts, codes)
x = self.head_radius * np.cos((90.0 - self.nose_angle / 2) * np.pi / 180.0)
y = self.head_radius * np.sin((90.0 - self.nose_angle / 2) * np.pi / 180.0)
verts = np.array([(x, y), (0, self.head_radius * (1 + self.nose_length)), (-x, y)])
codes = [path.Path.MOVETO, path.Path.LINETO, path.Path.LINETO]
self.path_nose = path.Path(verts, codes)
self.legendre_factors = self.calc_legendre_factors(m, num_lterms)
self.channel_fence = None
self.locations = None
self.g = None
self.z = None
self.c = None
self.image = None
self.g_map = {}
@staticmethod
def calc_legendre_factors(m, num_lterms):
return [0] + [(2 * n + 1) / (n ** m * (n + 1) ** m * 4 * np.pi) for n in range(1, num_lterms + 1)]
def calc_g(self, x):
return np.polynomial.legendre.legval(x, self.legendre_factors)
def set_locations(self, locations):
n = len(locations)
g = np.zeros((1 + n, 1 + n))
g[:, 0] = np.ones(1 + n)
g[-1, :] = np.ones(1 + n)
g[-1, 0] = 0
for i in range(n):
for j in range(n):
g[i, j + 1] = self.calc_g(np.dot(locations[i], locations[j]))
self.channel_fence = None
self.locations = locations
self.g = g
def set_values(self, z):
self.z = z
self.c = np.linalg.solve(self.g, np.concatenate((z, [0])))
def get_map(self):
return self.image
def set_map(self, img):
self.image = img
def calc_gmap(self, pixels):
try:
return self.g_map[pixels]
except KeyError:
pass
x = np.linspace(-self.interprange, self.interprange, pixels)
y = np.linspace(self.interprange, -self.interprange, pixels)
xy = np.transpose(np.meshgrid(x, y)) / self.electrodescale
e = array_project_radial_to3d(xy)
gmap = self.calc_g(e.dot(np.transpose(self.locations)))
self.g_map[pixels] = gmap
return gmap
def create_map(self, pixels=32):
gm = self.calc_gmap(pixels)
self.image = gm.dot(self.c[1:]) + self.c[0]
def plot_map(self, axes=None, crange=None, offset=(0,0)):
if axes is None:
import matplotlib.pyplot as plot
axes = plot.gca()
if crange is str:
if crange.lower() == 'channels':
crange = None
elif crange.lower() in ['full', 'map']:
vru = np.nanmax(np.abs(self.image))
vrl = -vru
if crange is None:
vru = np.nanmax(np.abs(self.z))
vrl = -vru
else:
vrl, vru = crange
head = self.path_head.deepcopy()
head.vertices += offset
if self.clipping == 'head':
clip_path = (head, axes.transData)
elif self.clipping == 'electrodes':
import matplotlib.path as path
verts = self._get_fence() + offset
codes = [path.Path.LINETO] * (len(verts) - 1)
codes.insert(0, path.Path.MOVETO)
clip_path = (path.Path(verts, codes), axes.transData)
else:
raise ValueError('unknown clipping mode: ', self.clipping)
return axes.imshow(self.image, vmin=vrl, vmax=vru, clip_path=clip_path,
extent=(offset[0]-self.interprange, offset[0]+self.interprange,
offset[1]-self.interprange, offset[1]+self.interprange))
def plot_locations(self, axes=None, offset=(0,0), fmt='k.', alpha=0.5):
if axes is None:
import matplotlib.pyplot as plot
axes = plot.gca()
p2 = array_project_radial_to2d(self.locations) * self.electrodescale + offset
axes.plot(p2[:, 0], p2[:, 1], fmt, alpha=alpha, markersize=2)
def plot_head(self, axes=None, offset=(0,0)):
import matplotlib.patches as patches
if axes is None:
import matplotlib.pyplot as plot
axes = plot.gca()
head = self.path_head.deepcopy()
nose = self.path_nose.deepcopy()
head.vertices += offset
nose.vertices += offset
axes.add_patch(patches.PathPatch(head, facecolor='none', edgecolor=self.headcolor))
axes.add_patch(patches.PathPatch(nose, facecolor='none', edgecolor=self.headcolor))
def plot_circles(self, radius, axes=None, offset=(0,0)):
import matplotlib.pyplot as plot
if axes is None: axes = plot.gca()
mx = np.max(np.abs(self.z))
col = interp1d([-mx, 0, mx], [[0, 1, 1], [0, 1, 0], [1, 1, 0]])
for i in range(len(self.locations)):
p3 = self.locations[i]
p2 = array_project_radial_to2d(p3) * self.electrodescale + offset
circ = plot.Circle((p2[0, 0], p2[0, 1]), radius=radius, color=col(self.z[i]))
axes.add_patch(circ)
def _get_fence(self):
if self.channel_fence is None:
points = array_project_radial_to2d(self.locations) * self.electrodescale
hull = ConvexHull(points)
self.channel_fence = points[hull.vertices]
return self.channel_fence
def topoplot(values, locations, axes=None, offset=(0, 0), plot_locations=True,
plot_head=True, **kwargs):
"""Wrapper function for :class:`Topoplot.
"""
topo = Topoplot(**kwargs)
topo.set_locations(locations)
topo.set_values(values)
topo.create_map()
topo.plot_map(axes=axes, offset=offset)
if plot_locations:
topo.plot_locations(axes=axes, offset=offset)
if plot_head:
topo.plot_head(axes=axes, offset=offset)
return topo
| mit |
simon-pepin/scikit-learn | sklearn/cluster/mean_shift_.py | 106 | 14056 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None, got %f" %
bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
stop_thresh = 1e-3 * bandwidth # when mean has converged
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# For each seed, climb gradient until convergence or max_iter
for my_mean in seeds:
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
center_intensity_dict[tuple(my_mean)] = len(points_within)
break
completed_iterations += 1
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f, using data"
" points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
zfrenchee/pandas | pandas/tests/indexes/interval/test_interval.py | 1 | 47165 | from __future__ import division
import pytest
import numpy as np
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, date_range, timedelta_range)
from pandas.compat import lzip
from pandas.core.common import _asarray_tuplesafe
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
@pytest.mark.parametrize('data', [
Index([0, 1, 2, 3, 4]),
Index(list('abcde')),
date_range('2017-01-01', periods=5),
date_range('2017-01-01', periods=5, tz='US/Eastern'),
timedelta_range('1 day', periods=5)])
def test_constructors(self, data, closed, name):
left, right = data[:-1], data[1:]
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
# validate expected
assert expected.closed == closed
assert expected.name == name
assert expected.dtype.subtype == data.dtype
tm.assert_index_equal(expected.left, data[:-1])
tm.assert_index_equal(expected.right, data[1:])
# validated constructors
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(data, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left, right, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = (r'IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex([Interval(0, 1), Interval(2, 3, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
@pytest.mark.parametrize('tz_left, tz_right', [
(None, 'UTC'), ('UTC', None), ('UTC', 'US/Eastern')])
def test_constructors_errors_tz(self, tz_left, tz_right):
# GH 18537
left = date_range('2017-01-01', periods=4, tz=tz_left)
right = date_range('2017-01-02', periods=4, tz=tz_right)
# don't need to check IntervalIndex(...) or from_intervals, since
# mixed tz are disallowed at the Interval level
with pytest.raises(ValueError):
IntervalIndex.from_arrays(left, right)
with pytest.raises(ValueError):
IntervalIndex.from_tuples(lzip(left, right))
with pytest.raises(ValueError):
breaks = left.tolist() + [right[-1]]
IntervalIndex.from_breaks(breaks)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
@pytest.mark.parametrize('breaks', [
[1, 1, 2, 5, 15, 53, 217, 1014, 5335, 31240, 201608],
[-np.inf, -100, -10, 0.5, 1, 1.5, 3.8, 101, 202, np.inf],
pd.to_datetime(['20170101', '20170202', '20170303', '20170404']),
pd.to_timedelta(['1ns', '2ms', '3s', '4M', '5H', '6D'])])
def test_length(self, closed, breaks):
# GH 18789
index = IntervalIndex.from_breaks(breaks, closed=closed)
result = index.length
expected = Index(iv.length for iv in index)
tm.assert_index_equal(result, expected)
# with NA
index = index.insert(1, np.nan)
result = index.length
expected = Index(iv.length if notna(iv) else iv for iv in index)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('breaks', [
list('abcdefgh'),
lzip(range(10), range(1, 11)),
[['A', 'B'], ['a', 'b'], ['c', 'd'], ['e', 'f']],
[Interval(0, 1), Interval(1, 2), Interval(3, 4), Interval(4, 5)]])
def test_length_errors(self, closed, breaks):
# GH 18789
index = IntervalIndex.from_breaks(breaks)
msg = 'IntervalIndex contains Intervals without defined length'
with tm.assert_raises_regex(TypeError, msg):
index.length
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
def test_astype(self, closed):
idx = self.create_index(closed=closed)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
@pytest.mark.parametrize('dtype', [
np.int64, np.float64, 'period[M]', 'timedelta64', 'datetime64[ns]',
'datetime64[ns, US/Eastern]'])
def test_astype_errors(self, closed, dtype):
idx = self.create_index(closed=closed)
msg = 'Cannot cast IntervalIndex to dtype'
with tm.assert_raises_regex(TypeError, msg):
idx.astype(dtype)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right'),
IntervalIndex.from_tuples([('a', 'd'), ('e', 'j'), ('w', 'z')]),
IntervalIndex.from_tuples([(1, 2), ('a', 'z'), (3.14, 6.28)])])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with tm.assert_raises_regex(ValueError, msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with tm.assert_raises_regex(ValueError, msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_unique(self, closed):
# unique non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_unique
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique
# duplicate
idx = IntervalIndex.from_tuples(
[(0, 1), (0, 1), (2, 3)], closed=closed)
assert not idx.is_unique
# unique mixed
idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed)
assert idx.is_unique
# duplicate mixed
idx = IntervalIndex.from_tuples(
[(0, 1), ('a', 'b'), (0, 1)], closed=closed)
assert not idx.is_unique
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_unique
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
@pytest.mark.skip(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.skip(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.skip(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
# TODO: check this behavior is consistent with test_interval_new.py
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples(
[(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays(
[0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
# TODO: check this behavior is consistent with test_interval_new.py
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(13), closed=closed)
result = index.union(other)
tm.assert_index_equal(result, expected)
result = other.union(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.union(index), index)
tm.assert_index_equal(index.union(index[:1]), index)
def test_intersection(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(5, 11), closed=closed)
result = index.intersection(other)
tm.assert_index_equal(result, expected)
result = other.intersection(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.intersection(index), index)
def test_difference(self, closed):
index = self.create_index(closed=closed)
tm.assert_index_equal(index.difference(index[:1]), index[1:])
def test_symmetric_difference(self, closed):
idx = self.create_index(closed=closed)
result = idx[1:].symmetric_difference(idx[:-1])
expected = IntervalIndex([idx[0], idx[-1]])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op_name', [
'union', 'intersection', 'difference', 'symmetric_difference'])
def test_set_operation_errors(self, closed, op_name):
index = self.create_index(closed=closed)
set_op = getattr(index, op_name)
# test errors
msg = ('can only do set operations between two IntervalIndex objects '
'that are closed on the same side')
with tm.assert_raises_regex(ValueError, msg):
set_op(Index([1, 2, 3]))
for other_closed in {'right', 'left', 'both', 'neither'} - {closed}:
other = self.create_index(closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
set_op(other)
def test_isin(self, closed):
index = self.create_index(closed=closed)
expected = np.array([True] + [False] * (len(index) - 1))
result = index.isin(index[:1])
tm.assert_numpy_array_equal(result, expected)
result = index.isin([index[0]])
tm.assert_numpy_array_equal(result, expected)
other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
expected = np.array([True] * (len(index) - 1) + [False])
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
for other_closed in {'right', 'left', 'both', 'neither'}:
other = self.create_index(closed=other_closed)
expected = np.repeat(closed == other_closed, len(index))
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index > 0
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index <= 0
with pytest.raises(TypeError):
self.index > np.arange(2)
with pytest.raises(ValueError):
self.index > np.arange(3)
def test_missing_values(self, closed):
idx = Index([np.nan, Interval(0, 1, closed=closed),
Interval(1, 2, closed=closed)])
idx2 = IntervalIndex.from_arrays(
[np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
with pytest.raises(ValueError):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed)
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self, closed):
index = self.create_index(closed=closed)
result = index.sort_values()
tm.assert_index_equal(result, index)
result = index.sort_values(ascending=False)
tm.assert_index_equal(result, index[::-1])
# with nan
index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
result = index.sort_values()
expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
tm.assert_index_equal(result, expected)
result = index.sort_values(ascending=False)
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'US/Eastern'])
def test_datetime(self, tz):
start = Timestamp('2000-01-01', tz=tz)
dates = date_range(start=start, periods=10)
index = IntervalIndex.from_breaks(dates)
# test mid
start = Timestamp('2000-01-01T12:00', tz=tz)
expected = date_range(start=start, periods=9)
tm.assert_index_equal(index.mid, expected)
# __contains__ doesn't check individual points
assert Timestamp('2000-01-01', tz=tz) not in index
assert Timestamp('2000-01-01T12', tz=tz) not in index
assert Timestamp('2000-01-02', tz=tz) not in index
iv_true = Interval(Timestamp('2000-01-01T08', tz=tz),
Timestamp('2000-01-01T18', tz=tz))
iv_false = Interval(Timestamp('1999-12-31', tz=tz),
Timestamp('2000-01-01', tz=tz))
assert iv_true in index
assert iv_false not in index
# .contains does check individual points
assert not index.contains(Timestamp('2000-01-01', tz=tz))
assert index.contains(Timestamp('2000-01-01T12', tz=tz))
assert index.contains(Timestamp('2000-01-02', tz=tz))
assert index.contains(iv_true)
assert not index.contains(iv_false)
# test get_indexer
start = Timestamp('1999-12-31T12:00', tz=tz)
target = date_range(start=start, periods=7, freq='12H')
actual = index.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, 2], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
start = Timestamp('2000-01-08T18:00', tz=tz)
target = date_range(start=start, periods=7, freq='6H')
actual = index.get_indexer(target)
expected = np.array([7, 7, 8, 8, 8, 8, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self, closed):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed)
index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed)
result = index1.append(index2)
expected = IntervalIndex.from_arrays(
[0, 1, 1, 2], [1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays(
[0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
msg = ('can only append two IntervalIndex objects that are closed '
'on the same side')
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
index_other_closed = IntervalIndex.from_arrays(
[0, 1], [1, 2], closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
index1.append(index_other_closed)
def test_is_non_overlapping_monotonic(self, closed):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False in all cases (non-monotonic)
tpls = [(0, 1), (2, 3), (6, 7), (4, 5)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False for closed='both', otherwise True (GH16560)
if closed == 'both':
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is False
else:
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
@pytest.mark.parametrize('tuples', [
lzip(range(10), range(1, 11)),
lzip(date_range('20170101', periods=10),
date_range('20170101', periods=10)),
lzip(timedelta_range('0 days', periods=10),
timedelta_range('1 day', periods=10))])
def test_to_tuples(self, tuples):
# GH 18756
idx = IntervalIndex.from_tuples(tuples)
result = idx.to_tuples()
expected = Index(_asarray_tuplesafe(tuples))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('tuples', [
lzip(range(10), range(1, 11)) + [np.nan],
lzip(date_range('20170101', periods=10),
date_range('20170101', periods=10)) + [np.nan],
lzip(timedelta_range('0 days', periods=10),
timedelta_range('1 day', periods=10)) + [np.nan]])
@pytest.mark.parametrize('na_tuple', [True, False])
def test_to_tuples_na(self, tuples, na_tuple):
# GH 18756
idx = IntervalIndex.from_tuples(tuples)
result = idx.to_tuples(na_tuple=na_tuple)
# check the non-NA portion
expected_notna = Index(_asarray_tuplesafe(tuples[:-1]))
result_notna = result[:-1]
tm.assert_index_equal(result_notna, expected_notna)
# check the NA portion
result_na = result[-1]
if na_tuple:
assert isinstance(result_na, tuple)
assert len(result_na) == 2
assert all(isna(x) for x in result_na)
else:
assert isna(result_na)
| bsd-3-clause |
dmitriz/zipline | tests/pipeline/test_frameload.py | 2 | 7621 | """
Tests for zipline.pipeline.loaders.frame.DataFrameLoader.
"""
from unittest import TestCase
from mock import patch
from numpy import arange, ones
from numpy.testing import assert_array_equal
from pandas import (
DataFrame,
DatetimeIndex,
Int64Index,
)
from zipline.lib.adjustment import (
Float64Add,
Float64Multiply,
Float64Overwrite,
)
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.loaders.frame import (
ADD,
DataFrameLoader,
MULTIPLY,
OVERWRITE,
)
from zipline.utils.tradingcalendar import trading_day
class DataFrameLoaderTestCase(TestCase):
def setUp(self):
self.nsids = 5
self.ndates = 20
self.sids = Int64Index(range(self.nsids))
self.dates = DatetimeIndex(
start='2014-01-02',
freq=trading_day,
periods=self.ndates,
)
self.mask = ones((len(self.dates), len(self.sids)), dtype=bool)
def tearDown(self):
pass
def test_bad_input(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
loader = DataFrameLoader(
USEquityPricing.close,
baseline,
)
with self.assertRaises(ValueError):
# Wrong column.
loader.load_adjusted_array(
[USEquityPricing.open], self.dates, self.sids, self.mask
)
with self.assertRaises(ValueError):
# Too many columns.
loader.load_adjusted_array(
[USEquityPricing.open, USEquityPricing.close],
self.dates,
self.sids,
self.mask,
)
def test_baseline(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
loader = DataFrameLoader(USEquityPricing.close, baseline)
dates_slice = slice(None, 10, None)
sids_slice = slice(1, 3, None)
[adj_array] = loader.load_adjusted_array(
[USEquityPricing.close],
self.dates[dates_slice],
self.sids[sids_slice],
self.mask[dates_slice, sids_slice],
).values()
for idx, window in enumerate(adj_array.traverse(window_length=3)):
expected = baseline.values[dates_slice, sids_slice][idx:idx + 3]
assert_array_equal(window, expected)
def test_adjustments(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
# Use the dates from index 10 on and sids 1-3.
dates_slice = slice(10, None, None)
sids_slice = slice(1, 4, None)
# Adjustments that should actually affect the output.
relevant_adjustments = [
{
'sid': 1,
'start_date': None,
'end_date': self.dates[15],
'apply_date': self.dates[16],
'value': 0.5,
'kind': MULTIPLY,
},
{
'sid': 2,
'start_date': self.dates[5],
'end_date': self.dates[15],
'apply_date': self.dates[16],
'value': 1.0,
'kind': ADD,
},
{
'sid': 2,
'start_date': self.dates[15],
'end_date': self.dates[16],
'apply_date': self.dates[17],
'value': 1.0,
'kind': ADD,
},
{
'sid': 3,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': 99.0,
'kind': OVERWRITE,
},
]
# These adjustments shouldn't affect the output.
irrelevant_adjustments = [
{ # Sid Not Requested
'sid': 0,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Sid Unknown
'sid': 9999,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date Not Requested
'sid': 2,
'start_date': self.dates[1],
'end_date': self.dates[2],
'apply_date': self.dates[3],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date Before Known Data
'sid': 2,
'start_date': self.dates[0] - (2 * trading_day),
'end_date': self.dates[0] - trading_day,
'apply_date': self.dates[0] - trading_day,
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date After Known Data
'sid': 2,
'start_date': self.dates[-1] + trading_day,
'end_date': self.dates[-1] + (2 * trading_day),
'apply_date': self.dates[-1] + (3 * trading_day),
'value': -9999.0,
'kind': OVERWRITE,
},
]
adjustments = DataFrame(relevant_adjustments + irrelevant_adjustments)
loader = DataFrameLoader(
USEquityPricing.close,
baseline,
adjustments=adjustments,
)
expected_baseline = baseline.iloc[dates_slice, sids_slice]
formatted_adjustments = loader.format_adjustments(
self.dates[dates_slice],
self.sids[sids_slice],
)
expected_formatted_adjustments = {
6: [
Float64Multiply(
first_row=0,
last_row=5,
first_col=0,
last_col=0,
value=0.5,
),
Float64Add(
first_row=0,
last_row=5,
first_col=1,
last_col=1,
value=1.0,
),
],
7: [
Float64Add(
first_row=5,
last_row=6,
first_col=1,
last_col=1,
value=1.0,
),
],
8: [
Float64Overwrite(
first_row=6,
last_row=7,
first_col=2,
last_col=2,
value=99.0,
)
],
}
self.assertEqual(formatted_adjustments, expected_formatted_adjustments)
mask = self.mask[dates_slice, sids_slice]
with patch('zipline.pipeline.loaders.frame.adjusted_array') as m:
loader.load_adjusted_array(
columns=[USEquityPricing.close],
dates=self.dates[dates_slice],
assets=self.sids[sids_slice],
mask=mask,
)
self.assertEqual(m.call_count, 1)
args, kwargs = m.call_args
assert_array_equal(kwargs['data'], expected_baseline.values)
assert_array_equal(kwargs['mask'], mask)
self.assertEqual(kwargs['adjustments'], expected_formatted_adjustments)
| apache-2.0 |
RTsGIT/LocalSentic | CreateGraph.py | 1 | 1550 | #! /usr/bin/env python
import networkx as nx
import GetConcepts
import nltk
import matplotlib.pyplot as plt
import pickle
class CreateGraph():
def __init__(self):
self.ConceptList = GetConcepts.GetConcepts()
def GraphCreate(self, Concepts):
if( Concepts ):
ConceptsToGraph = Concepts
else:
ConceptsToGraph = self.ConceptList
NodeGraph = nx.DiGraph()
for word in ConceptsToGraph:
nodes = nltk.word_tokenize(word)
if ( NodeGraph.has_edge( "root", nodes[0] )):
for i in range(len(nodes) - 1):
NodeGraph.add_edge(nodes[i], nodes[i+1])
else:
NodeGraph.add_edge("root", nodes[0])
for i in range(len(nodes) - 1):
NodeGraph.add_edge(nodes[i], nodes[i+1])
return NodeGraph
def CheckGraph(self, Graph, concepts):
if(concepts):
ToCheck = concepts
else:
print "Sorry ! The knowledge base does not contain any concept for your query"
ConceptsToBeSearched = []
for i in range(len(ToCheck)):
tokens = nltk.word_tokenize(ToCheck[i])
length = len(tokens)
print length
if ( length > 1):
for j in range(len(tokens) - 1):
if( G.has_edge(tokens[j], tokens[j+1])):
print "Match Found in Database"
s = tokens[j]+" "+tokens[j + 1]
ConceptsToBeSearched.append(s)
return ConceptsToBeSearched
'''
f = open('Concepts.txt','r')
concepts = f.readlines()
f.close()
for i in range(len(concepts)):
concepts[i] = concepts[i].replace("\n","")
G = CreateGraph()
graph = G.GraphCreate(concepts)
nx.write_gpickle(graph,"test.gpickle")
''' | gpl-2.0 |
fbagirov/scikit-learn | doc/conf.py | 210 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
sozos/RAS | source/experiments.py | 1 | 11487 | from data import *
from schedule import *
from Interval import *
from time import time
from collections import *
from matplotlib.pyplot import *
dates = []
for i in xrange(1, 32):
day = str(i)
if i < 10:
day = '0' + day
dates.append('2014-01-' + day)
folder = '../graph/'
min_flights = 100
def q2b():
print 'Q2B'
print 'How many gates are needed?'
print 'Is there any relationship between the number of flights and the number of gates needed?'
flights_to_gates = []
for date in dates:
airport_to_flight_infos = get_airports_from_date('test_data.csv', date, min_flights)
for airport, flight_infos in airport_to_flight_infos.items():
intervals = [Interval(flight_info) for flight_info in flight_infos]
gates = assign(intervals)
flights_to_gates.append((len(intervals), len(gates)))
# with open(folder + '2b.dat', 'w') as f:
# f.write('# Number of flights\tNumber of gates needed\n')
# print '# Number of flights', '\t', 'Number of gates needed'
# for flights, gates in flights_to_gates:
# print flights, '\t', gates
# f.write('{}\t{}\n'.format(flights, gates))
xs = 'Number of flights'
ys = 'Number of gates needed'
xlabel(xs)
ylabel(ys)
scatter(*zip(*flights_to_gates))
axis('tight')
savefig(folder + '2b.png', bbox_inches='tight')
cla()
def q3a():
print 'Q3A'
print 'How much random delay is needed before we see a collision?'
slack_to_delays = []
p = 0.948 # avg probability to be delayed from our data
for date in dates:
airport_to_flight_infos = get_airports_from_date('test_data.csv', date, min_flights)
for airport, flight_infos in airport_to_flight_infos.items():
for i in xrange(0, 1000):
delay_flight_infos(flight_infos, p, -i, i)
delayed_intervals = [Interval(flight_info) for flight_info in flight_infos]
gates = assign(delayed_intervals)
slack = min([min(gate.slacks()) for gate in gates])
num_collisions = sum([gate.collisions() for gate in gates])
if num_collisions > 0:
break
try:
assert slack <= 2*i
except:
print 'error', slack, i
slack_to_delays.append((slack, i))
# with open(folder + '3ai.dat', 'w') as f:
# f.write('# Slack (mins)\tMagnitude of delay (mins)\n')
# print '# Slack (mins)\tMagnitude of delay (mins)'
# for slack, delays in slack_to_delays:
# print slack, '\t', delays
# f.write('{}\t{}\n'.format(slack, delays))
xs = 'Slack (mins)'
ys = 'Magnitude of random delay (mins)'
xlabel(xs)
ylabel(ys)
scatter(*zip(*slack_to_delays))
axis('tight')
xlim(-1, 16)
savefig(folder + '3ai.png', bbox_inches='tight')
cla()
quit()
print 'For a given level of random delay, how many gate collisions do we see?'
slack_to_collisions = []
delay = 24 # average delay from our data
for date in dates:
airport_to_flight_infos = get_airports_from_date('test_data.csv', date, min_flights)
for airport, flight_infos in airport_to_flight_infos.items():
delay_flight_infos(flight_infos, 0.95, -delay, delay)
delayed_intervals = [Interval(flight_info) for flight_info in flight_infos]
gates = assign(delayed_intervals)
slack = min([min(gate.slacks()) for gate in gates])
if slack > 50:
continue
num_collisions = sum([gate.collisions() for gate in gates])
slack_to_collisions.append((slack, num_collisions))
# with open(folder + '3aii.dat', 'w') as f:
# header = '# Slack (mins)\tNumber of gate collisions'
# print header
# f.write(header + '\n')
# for slack, collisions in slack_to_collisions:
# print slack, '\t', collisions
# f.write('{}\t{}\n'.format(slack, collisions))
xs = 'Slack (mins)'
ys = 'Number of gate collisions'
xlabel(xs)
ylabel(ys)
scatter(*zip(*slack_to_collisions))
axis('tight')
savefig(folder + '3aii.png', bbox_inches='tight')
cla()
print 'Now, use the actual delays from the real dataset. How many gate collisions do we see?'
delays_to_collisions = []
for date in dates:
airport_to_flight_infos = get_airports_from_date('test_data.csv', date, min_flights)
for airport, flight_infos in airport_to_flight_infos.items():
gates = assign(intervals(flight_infos))
num_collisions = sum([gate.collisions() for gate in gates])
delays_to_collisions.append((avg_delay(flight_infos), num_collisions))
# with open(folder + '3aiii.dat', 'w') as f:
# print '# Magnitude of mean delay (mins)\tNumber of gate collisions'
# f.write('# Magnitude of mean delay (mins)\tNumber of gate collisions\n')
# for delays, collisions in delays_to_collisions:
# print delays, '\t', collisions
# f.write('{}\t{}\n'.format(delays, collisions))
xs = 'Magnitude of mean delay (mins)'
ys = 'Number of gate collisions'
xlabel(xs)
ylabel(ys)
scatter(*zip(*delays_to_collisions))
axis('tight')
savefig(folder + '3aiii.png', bbox_inches='tight')
cla()
def q3b():
print 'Q3B'
delays_to_reassignment_optimal_gates = []
for date in dates:
airport_to_flight_infos = get_airports_from_date('test_data.csv', date, min_flights)
for airport, flight_infos in airport_to_flight_infos.items():
intervals = [Interval(flight_info) for flight_info in flight_infos]
gates = assign(intervals)
optimal_gates = assign(intervals, 0, delayed_start_time, delayed_end_time)
reassignment = reassign(gates, intervals)
delays_to_reassignment_optimal_gates.append((avg_delay(flight_infos), (reassignment, optimal_gates)))
print 'For the delays in the dataset, how many gates do we need?'
# with open(folder + '3bi.dat', 'w') as f:
# header = '# Magnitude of mean delay (mins)\tNumber of gates\tNumber of optimal gates\tDifference'
# print header
# f.write(header + '\n')
# for delays, reassignment_optimal_gates in delays_to_reassignment_optimal_gates:
# reassignment = reassignment_optimal_gates[0]
# len_gates = len(reassignment[0]) + len(reassignment[1])
# len_optimal_gates = len(reassignment_optimal_gates[1])
# difference = len_gates - len_optimal_gates
# print delays, '\t', len_gates, '\t', len_optimal_gates, '\t', difference
# f.write('{}\t{}\t{}\t{}\n'.format(delays, len_gates, len_optimal_gates, difference))
xs = 'Magnitude of mean delay (mins)'
ys = 'Number of gates'
xlabel(xs)
ylabel(ys)
x = []
y1 = []
y2 = []
y3 = []
for delays, reassignment_optimal_gates in delays_to_reassignment_optimal_gates:
reassignment = reassignment_optimal_gates[0]
len_gates = len(reassignment[0]) + len(reassignment[1])
len_optimal_gates = len(reassignment_optimal_gates[1])
difference = len_gates - len_optimal_gates
x.append(delays)
y1.append(len_gates)
y2.append(len_optimal_gates)
y3.append(difference)
scatter(x, y1, color='green', label='Our algorithm')
scatter(x, y2, color='blue', label='Optimal')
scatter(x, y3, color='red', label='Difference')
legend()
axis('tight')
savefig(folder + '3bi.png', bbox_inches='tight')
cla()
print 'How does the number of extra gates needed scale with the delays?'
# with open(folder + '3bii.dat', 'w') as f:
# header = '# Magnitude of mean delay (mins)\tNumber of extra gates\tNumber of optimal extra gates\tDifference'
# print header
# f.write(header + '\n')
# for delays, reassignment_optimal_gates in delays_to_reassignment_optimal_gates:
# reassignment = reassignment_optimal_gates[0]
# len_extra_gates = len(reassignment[1])
# len_optimal_extra_gates = len(reassignment_optimal_gates[1]) - len(reassignment[0])
# difference = len_extra_gates - len_optimal_extra_gates
# print delays, '\t', len_extra_gates, '\t', len_optimal_extra_gates, '\t', difference
# f.write('{}\t{}\t{}\t{}\n'.format(delays, len_extra_gates, len_optimal_extra_gates, difference))
xs = 'Magnitude of mean delay (mins)'
ys = 'Number of extra gates'
xlabel(xs)
ylabel(ys)
x = []
y1 = []
y2 = []
y3 = []
for delays, reassignment_optimal_gates in delays_to_reassignment_optimal_gates:
reassignment = reassignment_optimal_gates[0]
len_extra_gates = len(reassignment[1])
len_optimal_extra_gates = len(reassignment_optimal_gates[1]) - len(reassignment[0])
difference = len_extra_gates - len_optimal_extra_gates
x.append(delays)
y1.append(len_extra_gates)
y2.append(len_optimal_extra_gates)
y3.append(difference)
scatter(x, y1, color='green', label='Our algorithm')
scatter(x, y2, color='blue', label='Optimal')
scatter(x, y3, color='red', label='Difference')
legend()
axis('tight')
savefig(folder + '3bii.png', bbox_inches='tight')
cla()
def q4():
print 'Q4'
extra_gates_to_reassignment = []
for date in dates:
print date
airport_to_flight_infos = get_airports_from_date('test_data.csv', date, min_flights)
for airport, flight_infos in airport_to_flight_infos.items():
intervals = [Interval(flight_info) for flight_info in flight_infos]
original_gates = assign(intervals)
for i in xrange(0, 101):
num_extra_gates = int((float(i)/100) * len(intervals))
gates = assign(intervals, len(original_gates) + num_extra_gates)
reassignment = reassign(gates, intervals)
# [number of gates, number of overflow gates, number of reassignments]
reassignment = [len(reassignment[0]), len(reassignment[1]), reassignment[2]]
extra_gates_to_reassignment.append((i, reassignment))
print 'Show, via experiment, that this minimizes the number of gate reassignments.'
# with open(folder + '4i.dat', 'w') as f:
# header = '# Percentage of extra gates\tNumber of reassignment\tNumber of overflow gates'
# f.write(header + '\n')
# # print header
# for extra_gates, reassignment in extra_gates_to_reassignment:
# # print extra_gates, '\t', reassignment[2], '\t', len(reassignment[1])
# f.write('{}\t{}\t{}\n'.format(extra_gates, reassignment[2], reassignment[1]))
# xs = 'Percentage of extra gates'
# ys = 'Number of reassignment'
# xlabel(xs)
# ylabel(ys)
# x = []
# y = []
# for extra_gates, reassignment in extra_gates_to_reassignment:
# x.append(extra_gates)
# y.append(reassignment[2])
# scatter(x, y)
# axis('tight')
# savefig(folder + '4i.png', bbox_inches='tight')
# cla()
print 'Prove that your assignment can handle a certain level of delay.'
# with open(folder + '4ii.dat', 'w') as f:
# header = '# Percentage of extra gates\tPercentage of success'
# f.write(header + '\n')
# # print header
# extra_gates_to_num_success = defaultdict(int)
# extra_gates_to_num_total = defaultdict(int)
# for extra_gates, reassignment in extra_gates_to_reassignment:
# if reassignment[1] == 0:
# extra_gates_to_num_success[extra_gates] += 1
# extra_gates_to_num_total[extra_gates] += 1
# for extra_gates, num_success in extra_gates_to_num_success.items():
# percentage_success = float(num_success)/extra_gates_to_num_total[extra_gates]
# # print extra_gates, '\t', percentage_success
# f.write('{}\t{}\n'.format(extra_gates, percentage_success))
xs = 'Percentage of extra gates'
ys = 'Percentage of success'
xlabel(xs)
ylabel(ys)
x = []
y = []
extra_gates_to_num_success = defaultdict(int)
extra_gates_to_num_total = defaultdict(int)
for extra_gates, reassignment in extra_gates_to_reassignment:
if reassignment[1] == 0:
extra_gates_to_num_success[extra_gates] += 1
extra_gates_to_num_total[extra_gates] += 1
for extra_gates, num_success in extra_gates_to_num_success.items():
percentage_success = float(num_success)/extra_gates_to_num_total[extra_gates]
x.append(extra_gates)
y.append(percentage_success)
scatter(x, y)
axis('tight')
ylim(-0.05, 1.05)
savefig(folder + '4ii.png', bbox_inches='tight')
cla()
# q2b()
q3a()
# q3b()
# q4() | mit |
aravart/speech-games | theory/simulate.py | 1 | 5827 | import heapq
import itertools
import numpy
import pandas
import tqdm
import random
verbose = False
class Node:
def __init__(self, x):
self.x = x
self.edges = []
self.accept = []
self.parents = []
def __repr__(self):
return str(self.x)
def construct(k=3, d=3, m=3, undirected=False):
v = []
for i in range(d+1):
v += list(itertools.product(range(k), repeat=i))
v = dict(zip(v, map(Node, v)))
for node in v.values():
if len(node.x) < d:
for j in range(min(m, d-len(node.x))):
for s in succ(node.x, k, j+1):
node.edges.append(v[s])
v[s].parents.append(node)
if undirected:
v[s].edges.append(node)
# if undirected:
# for node in v.values():
# for child in node.edges:
# child.edges.append(node)
return v
def succ(x, k, j=1):
res = [x[:i] + c + x[i:]
for c in itertools.product(range(k), repeat=j)
for i in range(len(x)+1)]
res.sort()
res = list(s for s, _ in itertools.groupby(res))
return res
def dijkstra(vs, edges, source, target):
h = []
prev = {}
dist = {}
q = set([source])
heapq.heappush(h, (0, source))
dist[source] = 0
explored = 0
while q:
cost, u = heapq.heappop(h)
explored += 1
if verbose:
print cost, u
q.remove(u)
if u == target:
return True, prev, explored
for e in edges(u):
alt = cost + 1
if e not in dist or alt < dist[e]:
q.add(e)
heapq.heappush(h, (alt, e))
dist[e] = alt
prev[e] = u
return False, prev, explored
def drop(vs, accept=0.5):
for v in vs:
v.accept = [random.uniform(0, 1) < accept for _ in range(len(v.edges))]
def stochasticedges(n):
return map(lambda x: x[1], filter(lambda x: x[0], zip(n.accept, n.edges)))
def path(prev, source, target):
path = [target]
while path[0] != source:
path.insert(0, prev[path[0]])
return path
def simulate(n=100, k=3, d=3, m=3, undirected=False, progress=False):
"""Simulates search
Args:
n: The number of samples per probability
k: The size of the alphabet
d: The size of the largest node in the graph (depth)
m: Edges are up to m blocks larger than their source
"""
v = construct(k, d, m, undirected)
source = v[()]
targets = filter(lambda x: len(x.x) == d, v.values())
res = []
for i in tqdm.trange(11):
for _ in tqdm.trange(n):
p = i / 10.0
drop(v.values(), accept=p)
target = targets[random.randint(0, len(targets)-1)]
found, prev, explored = dijkstra(v.values(),
stochasticedges,
source,
target)
l = None
if found:
l = len(path(prev, source, target))-1
res.append((p, found, l, explored, target))
if verbose:
print p, found, l, explored, target
df = pandas.DataFrame(res, columns=('p', 'found', 'length', 'explored', 'target'))
return df
def simulate_small(target, n=100, k=3, d=3, m=3, p=0.5, undirected=False, progress=False):
"""Simulates search for a given target and p
Args:
n: The number of samples per probability
k: The size of the alphabet
d: The size of the largest node in the graph (depth)
m: Edges are up to m blocks larger than their source
"""
v = construct(k, d, m, undirected)
source = v[()]
target = v[target]
res = []
for _ in tqdm.trange(n):
drop(v.values(), accept=p)
found, prev, explored = dijkstra(v.values(),
stochasticedges,
source,
target)
l = None
if found:
l = len(path(prev, source, target))-1
res.append((p, found, l, explored, target))
if verbose:
print p, found, l, explored, target
df = pandas.DataFrame(res, columns=('p', 'found', 'length', 'explored', 'target'))
return df
def plot_1(df, show=True):
import matplotlib.pyplot as plt
df.groupby(['p']).mean()['found'].plot()
plt.title('Proportion of targets found')
plt.xlabel('Probability of retaining edge')
if show:
plt.show()
def plot_2(df, show=True):
import matplotlib.pyplot as plt
df[df['found']].groupby(['p']).mean()['length'].plot(kind='bar')
plt.title('Mean path length if found')
plt.xlabel('Probability of retaining edge')
if show:
plt.show()
def plot_3(df, show=True):
import matplotlib.pyplot as plt
df.groupby(['p']).mean()['explored'].plot(kind='bar')
plt.title('Mean nodes expanded')
plt.xlabel('Probability of retaining edge')
if show:
plt.show()
def dfs(n, res=[]):
"""Topological sort"""
for child in reversed(n.edges):
dfs(child, res)
res.append(n)
return res
def count(v):
return (len(v.values()), sum(map(lambda x: len(x.edges), v.values())))
def ancestors(leaf):
current = [leaf]
visited = []
while current:
c = current.pop()
visited.append(c)
for p in c.parents:
if p not in visited and p not in current:
current.append(p)
return visited
def prune(v, s):
for n in v.values():
if n not in s:
del v[n.x]
else:
for e in list(n.edges):
if e not in s:
n.edges.remove(e)
| mit |
JackIron/Q_Learning_Games | First_Q_Learning_Game/Taxi.py | 1 | 1330 | import gym
#import taxi env
from gym.envs.toy_text import taxi
#import tabular q agent
import tabular_q_agent_taxi
#libreria per generare grafici
import matplotlib.pyplot as plt
#lib to remove files
import os
env=taxi.TaxiEnv()#make TaxiEnv
agent=tabular_q_agent_taxi.TabularQAgent(env.observation_space,env.action_space)
print("BEGIN THE Q-LEARNING")
for i in range(50):
agent.learn(env) #learn best choices and act on the env
print("trial number: ",i)
print("Make the Rewards Plot")
plt.rc('xtick', labelsize=8)
plt.rc('ytick', labelsize=8)
plt.figure(figsize=(10, 5))
f=open("rewards.txt","r")
stringa=f.readline()
n=0
while stringa!="":#count the number of rewards
n+=1
stringa=f.readline()
newRewards=[ 0 for i in range(n)]
f=open("rewards.txt","r")
stringa=f.readline()
n=0
while stringa!="":#make the rewards list
newRewards[n]=stringa
n+=1
stringa=f.readline()
f.close()
#eps list with numRewards slots
eps=range(0,50)
plt.plot(eps,newRewards)
plt.title("Rewards collected over the time")
plt.xlabel("Trials")
plt.ylabel("Rewards")
plt.grid()#put the grid
plt.show()#print in output the plot and give the possibility to save it on your computer
os.remove("/home/giacomo/Scrivania/Q_Learning_Games/First_Q_Learning_Game/rewards.txt")#to remove the file
| mit |
MechCoder/scikit-learn | benchmarks/bench_glmnet.py | 111 | 3890 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of matplotlib.pyplot
import matplotlib.pyplot as plt
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
plt.clf()
xx = range(0, n * step, step)
plt.title('Lasso regression on sample dataset (%d features)' % n_features)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of samples to classify')
plt.ylabel('Time (s)')
plt.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
plt.figure('scikit-learn vs. glmnet benchmark results')
plt.title('Regression in high dimensional spaces (%d samples)' % n_samples)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of features')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
| bsd-3-clause |
arter97/android_kernel_nvidia_shieldtablet | scripts/tracing/dma-api/plotting.py | 96 | 4043 | """Ugly graph drawing tools"""
import matplotlib.pyplot as plt
import matplotlib.cm as cmap
#import numpy as np
from matplotlib import cbook
# http://stackoverflow.com/questions/4652439/is-there-a-matplotlib-equivalent-of-matlabs-datacursormode
class DataCursor(object):
"""A simple data cursor widget that displays the x,y location of a
matplotlib artist when it is selected."""
def __init__(self, artists, tolerance=5, offsets=(-20, 20),
template='x: %0.2f\ny: %0.2f', display_all=False):
"""Create the data cursor and connect it to the relevant figure.
"artists" is the matplotlib artist or sequence of artists that will be
selected.
"tolerance" is the radius (in points) that the mouse click must be
within to select the artist.
"offsets" is a tuple of (x,y) offsets in points from the selected
point to the displayed annotation box
"template" is the format string to be used. Note: For compatibility
with older versions of python, this uses the old-style (%)
formatting specification.
"display_all" controls whether more than one annotation box will
be shown if there are multiple axes. Only one will be shown
per-axis, regardless.
"""
self.template = template
self.offsets = offsets
self.display_all = display_all
if not cbook.iterable(artists):
artists = [artists]
self.artists = artists
self.axes = tuple(set(art.axes for art in self.artists))
self.figures = tuple(set(ax.figure for ax in self.axes))
self.annotations = {}
for ax in self.axes:
self.annotations[ax] = self.annotate(ax)
for artist in self.artists:
artist.set_picker(tolerance)
for fig in self.figures:
fig.canvas.mpl_connect('pick_event', self)
def annotate(self, ax):
"""Draws and hides the annotation box for the given axis "ax"."""
annotation = ax.annotate(self.template, xy=(0, 0), ha='right',
xytext=self.offsets, textcoords='offset points', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')
)
annotation.set_visible(False)
return annotation
def __call__(self, event):
"""Intended to be called through "mpl_connect"."""
# Rather than trying to interpolate, just display the clicked coords
# This will only be called if it's within "tolerance", anyway.
x, y = event.mouseevent.xdata, event.mouseevent.ydata
try:
annotation = self.annotations[event.artist.axes]
except KeyError:
return
if x is not None:
if not self.display_all:
# Hide any other annotation boxes...
for ann in self.annotations.values():
ann.set_visible(False)
# Update the annotation in the current axis..
annotation.xy = x, y
annotation.set_text(self.template % (x, y))
annotation.set_visible(True)
event.canvas.draw()
def plotseries(*serieslabels):
"""Plot lists of series in separate axes, tie time axis together"""
global fig
fig, axes = plt.subplots(nrows=len(serieslabels), sharex=True)
for subplot, ax in zip(serieslabels, axes):
for ser, lab in zip(*subplot): # subplot = ([x], [y])
ax.step(ser[0], ser[1], label=lab, where="post")
ax.grid(True)
ax.legend()
(DataCursor(ax.lines))
plt.grid(True)
plt.show()
def disp_pic(bitmap):
"""Display the allocation bitmap. TODO."""
fig=plt.figure()
a=fig.add_subplot(1,1,1)
fig.clf()
implt=plt.imshow(bitmap, extent=(0, len(bitmap[0]), 0, len(bitmap)),
interpolation="nearest", cmap=cmap.gist_heat)
fig.canvas.draw()
plt.show()
| gpl-2.0 |
North-Guard/BigToolsComplicatedData | Week2/exercise_2_optimized.py | 1 | 1113 | from time import time
import matplotlib.pyplot as plt
import numpy as np
plt.close("all")
start = time()
# Load the data
with open('Week2/pizza-train.json', 'r') as file:
lines = [line.split() for line in file.readlines()]
# Get request_text's
lines = [line[1:] for line in lines if line[0] == '"request_text":']
n_lines = len(lines)
# Find all distinct words in vocabulary
words = set()
idx = 0
for line in lines:
text = line
words.update(text)
# Make a word-list and mapping to indices
words = sorted(list(words))
word2idx = {word: idx for idx, word in enumerate(words)}
n_words = len(words)
# Bag-of-words list of lists
bow = [[0] * n_words for _ in range(n_lines)]
for line_nr, line in enumerate(lines):
text = line
bow_row = bow[line_nr]
for word in text:
ix = word2idx[word]
bow_row[ix] = 1
end = time()
# Sanity check that we did it right
im = np.array(bow)
plt.imshow(im)
plt.show()
# Most popular word
s = sum(im)
max_ix = np.where(s == max(s))
print('Most popular word: ')
print(words[max_ix[0][0]])
print("Script time: {:.2f}s".format(end - start))
| mit |
spbguru/repo1 | external/linux32/lib/python2.6/site-packages/matplotlib/colorbar.py | 69 | 27260 | '''
Colorbar toolkit with two classes and a function:
:class:`ColorbarBase`
the base class with full colorbar drawing functionality.
It can be used as-is to make a colorbar for a given colormap;
a mappable object (e.g., image) is not needed.
:class:`Colorbar`
the derived class for use with images or contour plots.
:func:`make_axes`
a function for resizing an axes and adding a second axes
suitable for a colorbar
The :meth:`~matplotlib.figure.Figure.colorbar` method uses :func:`make_axes`
and :class:`Colorbar`; the :func:`~matplotlib.pyplot.colorbar` function
is a thin wrapper over :meth:`~matplotlib.figure.Figure.colorbar`.
'''
import numpy as np
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.ticker as ticker
import matplotlib.cbook as cbook
import matplotlib.lines as lines
import matplotlib.patches as patches
import matplotlib.collections as collections
import matplotlib.contour as contour
make_axes_kw_doc = '''
========== ====================================================
Property Description
========== ====================================================
*fraction* 0.15; fraction of original axes to use for colorbar
*pad* 0.05 if vertical, 0.15 if horizontal; fraction
of original axes between colorbar and new image axes
*shrink* 1.0; fraction by which to shrink the colorbar
*aspect* 20; ratio of long to short dimensions
========== ====================================================
'''
colormap_kw_doc = '''
=========== ====================================================
Property Description
=========== ====================================================
*extend* [ 'neither' | 'both' | 'min' | 'max' ]
If not 'neither', make pointed end(s) for out-of-
range values. These are set for a given colormap
using the colormap set_under and set_over methods.
*spacing* [ 'uniform' | 'proportional' ]
Uniform spacing gives each discrete color the same
space; proportional makes the space proportional to
the data interval.
*ticks* [ None | list of ticks | Locator object ]
If None, ticks are determined automatically from the
input.
*format* [ None | format string | Formatter object ]
If None, the
:class:`~matplotlib.ticker.ScalarFormatter` is used.
If a format string is given, e.g. '%.3f', that is
used. An alternative
:class:`~matplotlib.ticker.Formatter` object may be
given instead.
*drawedges* [ False | True ] If true, draw lines at color
boundaries.
=========== ====================================================
The following will probably be useful only in the context of
indexed colors (that is, when the mappable has norm=NoNorm()),
or other unusual circumstances.
============ ===================================================
Property Description
============ ===================================================
*boundaries* None or a sequence
*values* None or a sequence which must be of length 1 less
than the sequence of *boundaries*. For each region
delimited by adjacent entries in *boundaries*, the
color mapped to the corresponding value in values
will be used.
============ ===================================================
'''
colorbar_doc = '''
Add a colorbar to a plot.
Function signatures for the :mod:`~matplotlib.pyplot` interface; all
but the first are also method signatures for the
:meth:`~matplotlib.figure.Figure.colorbar` method::
colorbar(**kwargs)
colorbar(mappable, **kwargs)
colorbar(mappable, cax=cax, **kwargs)
colorbar(mappable, ax=ax, **kwargs)
arguments:
*mappable*
the :class:`~matplotlib.image.Image`,
:class:`~matplotlib.contour.ContourSet`, etc. to
which the colorbar applies; this argument is mandatory for the
:meth:`~matplotlib.figure.Figure.colorbar` method but optional for the
:func:`~matplotlib.pyplot.colorbar` function, which sets the
default to the current image.
keyword arguments:
*cax*
None | axes object into which the colorbar will be drawn
*ax*
None | parent axes object from which space for a new
colorbar axes will be stolen
Additional keyword arguments are of two kinds:
axes properties:
%s
colorbar properties:
%s
If *mappable* is a :class:`~matplotlib.contours.ContourSet`, its *extend*
kwarg is included automatically.
Note that the *shrink* kwarg provides a simple way to keep a vertical
colorbar, for example, from being taller than the axes of the mappable
to which the colorbar is attached; but it is a manual method requiring
some trial and error. If the colorbar is too tall (or a horizontal
colorbar is too wide) use a smaller value of *shrink*.
For more precise control, you can manually specify the positions of
the axes objects in which the mappable and the colorbar are drawn. In
this case, do not use any of the axes properties kwargs.
returns:
:class:`~matplotlib.colorbar.Colorbar` instance; see also its base class,
:class:`~matplotlib.colorbar.ColorbarBase`. Call the
:meth:`~matplotlib.colorbar.ColorbarBase.set_label` method
to label the colorbar.
''' % (make_axes_kw_doc, colormap_kw_doc)
class ColorbarBase(cm.ScalarMappable):
'''
Draw a colorbar in an existing axes.
This is a base class for the :class:`Colorbar` class, which is the
basis for the :func:`~matplotlib.pyplot.colorbar` method and pylab
function.
It is also useful by itself for showing a colormap. If the *cmap*
kwarg is given but *boundaries* and *values* are left as None,
then the colormap will be displayed on a 0-1 scale. To show the
under- and over-value colors, specify the *norm* as::
colors.Normalize(clip=False)
To show the colors versus index instead of on the 0-1 scale,
use::
norm=colors.NoNorm.
Useful attributes:
:attr:`ax`
the Axes instance in which the colorbar is drawn
:attr:`lines`
a LineCollection if lines were drawn, otherwise None
:attr:`dividers`
a LineCollection if *drawedges* is True, otherwise None
Useful public methods are :meth:`set_label` and :meth:`add_lines`.
'''
_slice_dict = {'neither': slice(0,1000000),
'both': slice(1,-1),
'min': slice(1,1000000),
'max': slice(0,-1)}
def __init__(self, ax, cmap=None,
norm=None,
alpha=1.0,
values=None,
boundaries=None,
orientation='vertical',
extend='neither',
spacing='uniform', # uniform or proportional
ticks=None,
format=None,
drawedges=False,
filled=True,
):
self.ax = ax
if cmap is None: cmap = cm.get_cmap()
if norm is None: norm = colors.Normalize()
self.alpha = alpha
cm.ScalarMappable.__init__(self, cmap=cmap, norm=norm)
self.values = values
self.boundaries = boundaries
self.extend = extend
self._inside = self._slice_dict[extend]
self.spacing = spacing
self.orientation = orientation
self.drawedges = drawedges
self.filled = filled
self.solids = None
self.lines = None
self.dividers = None
self.set_label('')
if cbook.iterable(ticks):
self.locator = ticker.FixedLocator(ticks, nbins=len(ticks))
else:
self.locator = ticks # Handle default in _ticker()
if format is None:
if isinstance(self.norm, colors.LogNorm):
self.formatter = ticker.LogFormatter()
else:
self.formatter = ticker.ScalarFormatter()
elif cbook.is_string_like(format):
self.formatter = ticker.FormatStrFormatter(format)
else:
self.formatter = format # Assume it is a Formatter
# The rest is in a method so we can recalculate when clim changes.
self.draw_all()
def draw_all(self):
'''
Calculate any free parameters based on the current cmap and norm,
and do all the drawing.
'''
self._process_values()
self._find_range()
X, Y = self._mesh()
C = self._values[:,np.newaxis]
self._config_axes(X, Y)
if self.filled:
self._add_solids(X, Y, C)
self._set_label()
def _config_axes(self, X, Y):
'''
Make an axes patch and outline.
'''
ax = self.ax
ax.set_frame_on(False)
ax.set_navigate(False)
xy = self._outline(X, Y)
ax.update_datalim(xy)
ax.set_xlim(*ax.dataLim.intervalx)
ax.set_ylim(*ax.dataLim.intervaly)
self.outline = lines.Line2D(xy[:, 0], xy[:, 1], color=mpl.rcParams['axes.edgecolor'],
linewidth=mpl.rcParams['axes.linewidth'])
ax.add_artist(self.outline)
self.outline.set_clip_box(None)
self.outline.set_clip_path(None)
c = mpl.rcParams['axes.facecolor']
self.patch = patches.Polygon(xy, edgecolor=c,
facecolor=c,
linewidth=0.01,
zorder=-1)
ax.add_artist(self.patch)
ticks, ticklabels, offset_string = self._ticker()
if self.orientation == 'vertical':
ax.set_xticks([])
ax.yaxis.set_label_position('right')
ax.yaxis.set_ticks_position('right')
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.yaxis.get_major_formatter().set_offset_string(offset_string)
else:
ax.set_yticks([])
ax.xaxis.set_label_position('bottom')
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
ax.xaxis.get_major_formatter().set_offset_string(offset_string)
def _set_label(self):
if self.orientation == 'vertical':
self.ax.set_ylabel(self._label, **self._labelkw)
else:
self.ax.set_xlabel(self._label, **self._labelkw)
def set_label(self, label, **kw):
'''
Label the long axis of the colorbar
'''
self._label = label
self._labelkw = kw
self._set_label()
def _outline(self, X, Y):
'''
Return *x*, *y* arrays of colorbar bounding polygon,
taking orientation into account.
'''
N = X.shape[0]
ii = [0, 1, N-2, N-1, 2*N-1, 2*N-2, N+1, N, 0]
x = np.take(np.ravel(np.transpose(X)), ii)
y = np.take(np.ravel(np.transpose(Y)), ii)
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if self.orientation == 'horizontal':
return np.hstack((y, x))
return np.hstack((x, y))
def _edges(self, X, Y):
'''
Return the separator line segments; helper for _add_solids.
'''
N = X.shape[0]
# Using the non-array form of these line segments is much
# simpler than making them into arrays.
if self.orientation == 'vertical':
return [zip(X[i], Y[i]) for i in range(1, N-1)]
else:
return [zip(Y[i], X[i]) for i in range(1, N-1)]
def _add_solids(self, X, Y, C):
'''
Draw the colors using :meth:`~matplotlib.axes.Axes.pcolor`;
optionally add separators.
'''
## Change to pcolorfast after fixing bugs in some backends...
if self.orientation == 'vertical':
args = (X, Y, C)
else:
args = (np.transpose(Y), np.transpose(X), np.transpose(C))
kw = {'cmap':self.cmap, 'norm':self.norm,
'shading':'flat', 'alpha':self.alpha}
# Save, set, and restore hold state to keep pcolor from
# clearing the axes. Ordinarily this will not be needed,
# since the axes object should already have hold set.
_hold = self.ax.ishold()
self.ax.hold(True)
col = self.ax.pcolor(*args, **kw)
self.ax.hold(_hold)
#self.add_observer(col) # We should observe, not be observed...
self.solids = col
if self.drawedges:
self.dividers = collections.LineCollection(self._edges(X,Y),
colors=(mpl.rcParams['axes.edgecolor'],),
linewidths=(0.5*mpl.rcParams['axes.linewidth'],)
)
self.ax.add_collection(self.dividers)
def add_lines(self, levels, colors, linewidths):
'''
Draw lines on the colorbar.
'''
N = len(levels)
dummy, y = self._locate(levels)
if len(y) <> N:
raise ValueError("levels are outside colorbar range")
x = np.array([0.0, 1.0])
X, Y = np.meshgrid(x,y)
if self.orientation == 'vertical':
xy = [zip(X[i], Y[i]) for i in range(N)]
else:
xy = [zip(Y[i], X[i]) for i in range(N)]
col = collections.LineCollection(xy, linewidths=linewidths)
self.lines = col
col.set_color(colors)
self.ax.add_collection(col)
def _ticker(self):
'''
Return two sequences: ticks (colorbar data locations)
and ticklabels (strings).
'''
locator = self.locator
formatter = self.formatter
if locator is None:
if self.boundaries is None:
if isinstance(self.norm, colors.NoNorm):
nv = len(self._values)
base = 1 + int(nv/10)
locator = ticker.IndexLocator(base=base, offset=0)
elif isinstance(self.norm, colors.BoundaryNorm):
b = self.norm.boundaries
locator = ticker.FixedLocator(b, nbins=10)
elif isinstance(self.norm, colors.LogNorm):
locator = ticker.LogLocator()
else:
locator = ticker.MaxNLocator()
else:
b = self._boundaries[self._inside]
locator = ticker.FixedLocator(b, nbins=10)
if isinstance(self.norm, colors.NoNorm):
intv = self._values[0], self._values[-1]
else:
intv = self.vmin, self.vmax
locator.create_dummy_axis()
formatter.create_dummy_axis()
locator.set_view_interval(*intv)
locator.set_data_interval(*intv)
formatter.set_view_interval(*intv)
formatter.set_data_interval(*intv)
b = np.array(locator())
b, ticks = self._locate(b)
formatter.set_locs(b)
ticklabels = [formatter(t, i) for i, t in enumerate(b)]
offset_string = formatter.get_offset()
return ticks, ticklabels, offset_string
def _process_values(self, b=None):
'''
Set the :attr:`_boundaries` and :attr:`_values` attributes
based on the input boundaries and values. Input boundaries
can be *self.boundaries* or the argument *b*.
'''
if b is None:
b = self.boundaries
if b is not None:
self._boundaries = np.asarray(b, dtype=float)
if self.values is None:
self._values = 0.5*(self._boundaries[:-1]
+ self._boundaries[1:])
if isinstance(self.norm, colors.NoNorm):
self._values = (self._values + 0.00001).astype(np.int16)
return
self._values = np.array(self.values)
return
if self.values is not None:
self._values = np.array(self.values)
if self.boundaries is None:
b = np.zeros(len(self.values)+1, 'd')
b[1:-1] = 0.5*(self._values[:-1] - self._values[1:])
b[0] = 2.0*b[1] - b[2]
b[-1] = 2.0*b[-2] - b[-3]
self._boundaries = b
return
self._boundaries = np.array(self.boundaries)
return
# Neither boundaries nor values are specified;
# make reasonable ones based on cmap and norm.
if isinstance(self.norm, colors.NoNorm):
b = self._uniform_y(self.cmap.N+1) * self.cmap.N - 0.5
v = np.zeros((len(b)-1,), dtype=np.int16)
v[self._inside] = np.arange(self.cmap.N, dtype=np.int16)
if self.extend in ('both', 'min'):
v[0] = -1
if self.extend in ('both', 'max'):
v[-1] = self.cmap.N
self._boundaries = b
self._values = v
return
elif isinstance(self.norm, colors.BoundaryNorm):
b = list(self.norm.boundaries)
if self.extend in ('both', 'min'):
b = [b[0]-1] + b
if self.extend in ('both', 'max'):
b = b + [b[-1] + 1]
b = np.array(b)
v = np.zeros((len(b)-1,), dtype=float)
bi = self.norm.boundaries
v[self._inside] = 0.5*(bi[:-1] + bi[1:])
if self.extend in ('both', 'min'):
v[0] = b[0] - 1
if self.extend in ('both', 'max'):
v[-1] = b[-1] + 1
self._boundaries = b
self._values = v
return
else:
if not self.norm.scaled():
self.norm.vmin = 0
self.norm.vmax = 1
b = self.norm.inverse(self._uniform_y(self.cmap.N+1))
if self.extend in ('both', 'min'):
b[0] = b[0] - 1
if self.extend in ('both', 'max'):
b[-1] = b[-1] + 1
self._process_values(b)
def _find_range(self):
'''
Set :attr:`vmin` and :attr:`vmax` attributes to the first and
last boundary excluding extended end boundaries.
'''
b = self._boundaries[self._inside]
self.vmin = b[0]
self.vmax = b[-1]
def _central_N(self):
'''number of boundaries **before** extension of ends'''
nb = len(self._boundaries)
if self.extend == 'both':
nb -= 2
elif self.extend in ('min', 'max'):
nb -= 1
return nb
def _extended_N(self):
'''
Based on the colormap and extend variable, return the
number of boundaries.
'''
N = self.cmap.N + 1
if self.extend == 'both':
N += 2
elif self.extend in ('min', 'max'):
N += 1
return N
def _uniform_y(self, N):
'''
Return colorbar data coordinates for *N* uniformly
spaced boundaries, plus ends if required.
'''
if self.extend == 'neither':
y = np.linspace(0, 1, N)
else:
if self.extend == 'both':
y = np.zeros(N + 2, 'd')
y[0] = -0.05
y[-1] = 1.05
elif self.extend == 'min':
y = np.zeros(N + 1, 'd')
y[0] = -0.05
else:
y = np.zeros(N + 1, 'd')
y[-1] = 1.05
y[self._inside] = np.linspace(0, 1, N)
return y
def _proportional_y(self):
'''
Return colorbar data coordinates for the boundaries of
a proportional colorbar.
'''
if isinstance(self.norm, colors.BoundaryNorm):
b = self._boundaries[self._inside]
y = (self._boundaries - self._boundaries[0])
y = y / (self._boundaries[-1] - self._boundaries[0])
else:
y = self.norm(self._boundaries.copy())
if self.extend in ('both', 'min'):
y[0] = -0.05
if self.extend in ('both', 'max'):
y[-1] = 1.05
yi = y[self._inside]
norm = colors.Normalize(yi[0], yi[-1])
y[self._inside] = norm(yi)
return y
def _mesh(self):
'''
Return X,Y, the coordinate arrays for the colorbar pcolormesh.
These are suitable for a vertical colorbar; swapping and
transposition for a horizontal colorbar are done outside
this function.
'''
x = np.array([0.0, 1.0])
if self.spacing == 'uniform':
y = self._uniform_y(self._central_N())
else:
y = self._proportional_y()
self._y = y
X, Y = np.meshgrid(x,y)
if self.extend in ('min', 'both'):
X[0,:] = 0.5
if self.extend in ('max', 'both'):
X[-1,:] = 0.5
return X, Y
def _locate(self, x):
'''
Given a possible set of color data values, return the ones
within range, together with their corresponding colorbar
data coordinates.
'''
if isinstance(self.norm, (colors.NoNorm, colors.BoundaryNorm)):
b = self._boundaries
xn = x
xout = x
else:
# Do calculations using normalized coordinates so
# as to make the interpolation more accurate.
b = self.norm(self._boundaries, clip=False).filled()
# We do our own clipping so that we can allow a tiny
# bit of slop in the end point ticks to allow for
# floating point errors.
xn = self.norm(x, clip=False).filled()
in_cond = (xn > -0.001) & (xn < 1.001)
xn = np.compress(in_cond, xn)
xout = np.compress(in_cond, x)
# The rest is linear interpolation with clipping.
y = self._y
N = len(b)
ii = np.minimum(np.searchsorted(b, xn), N-1)
i0 = np.maximum(ii - 1, 0)
#db = b[ii] - b[i0]
db = np.take(b, ii) - np.take(b, i0)
db = np.where(i0==ii, 1.0, db)
#dy = y[ii] - y[i0]
dy = np.take(y, ii) - np.take(y, i0)
z = np.take(y, i0) + (xn-np.take(b,i0))*dy/db
return xout, z
def set_alpha(self, alpha):
self.alpha = alpha
class Colorbar(ColorbarBase):
def __init__(self, ax, mappable, **kw):
mappable.autoscale_None() # Ensure mappable.norm.vmin, vmax
# are set when colorbar is called,
# even if mappable.draw has not yet
# been called. This will not change
# vmin, vmax if they are already set.
self.mappable = mappable
kw['cmap'] = mappable.cmap
kw['norm'] = mappable.norm
kw['alpha'] = mappable.get_alpha()
if isinstance(mappable, contour.ContourSet):
CS = mappable
kw['boundaries'] = CS._levels
kw['values'] = CS.cvalues
kw['extend'] = CS.extend
#kw['ticks'] = CS._levels
kw.setdefault('ticks', ticker.FixedLocator(CS.levels, nbins=10))
kw['filled'] = CS.filled
ColorbarBase.__init__(self, ax, **kw)
if not CS.filled:
self.add_lines(CS)
else:
ColorbarBase.__init__(self, ax, **kw)
def add_lines(self, CS):
'''
Add the lines from a non-filled
:class:`~matplotlib.contour.ContourSet` to the colorbar.
'''
if not isinstance(CS, contour.ContourSet) or CS.filled:
raise ValueError('add_lines is only for a ContourSet of lines')
tcolors = [c[0] for c in CS.tcolors]
tlinewidths = [t[0] for t in CS.tlinewidths]
# The following was an attempt to get the colorbar lines
# to follow subsequent changes in the contour lines,
# but more work is needed: specifically, a careful
# look at event sequences, and at how
# to make one object track another automatically.
#tcolors = [col.get_colors()[0] for col in CS.collections]
#tlinewidths = [col.get_linewidth()[0] for lw in CS.collections]
#print 'tlinewidths:', tlinewidths
ColorbarBase.add_lines(self, CS.levels, tcolors, tlinewidths)
def update_bruteforce(self, mappable):
'''
Manually change any contour line colors. This is called
when the image or contour plot to which this colorbar belongs
is changed.
'''
# We are using an ugly brute-force method: clearing and
# redrawing the whole thing. The problem is that if any
# properties have been changed by methods other than the
# colorbar methods, those changes will be lost.
self.ax.cla()
self.draw_all()
#if self.vmin != self.norm.vmin or self.vmax != self.norm.vmax:
# self.ax.cla()
# self.draw_all()
if isinstance(self.mappable, contour.ContourSet):
CS = self.mappable
if not CS.filled:
self.add_lines(CS)
#if self.lines is not None:
# tcolors = [c[0] for c in CS.tcolors]
# self.lines.set_color(tcolors)
#Fixme? Recalculate boundaries, ticks if vmin, vmax have changed.
#Fixme: Some refactoring may be needed; we should not
# be recalculating everything if there was a simple alpha
# change.
def make_axes(parent, **kw):
orientation = kw.setdefault('orientation', 'vertical')
fraction = kw.pop('fraction', 0.15)
shrink = kw.pop('shrink', 1.0)
aspect = kw.pop('aspect', 20)
#pb = transforms.PBox(parent.get_position())
pb = parent.get_position(original=True).frozen()
if orientation == 'vertical':
pad = kw.pop('pad', 0.05)
x1 = 1.0-fraction
pb1, pbx, pbcb = pb.splitx(x1-pad, x1)
pbcb = pbcb.shrunk(1.0, shrink).anchored('C', pbcb)
anchor = (0.0, 0.5)
panchor = (1.0, 0.5)
else:
pad = kw.pop('pad', 0.15)
pbcb, pbx, pb1 = pb.splity(fraction, fraction+pad)
pbcb = pbcb.shrunk(shrink, 1.0).anchored('C', pbcb)
aspect = 1.0/aspect
anchor = (0.5, 1.0)
panchor = (0.5, 0.0)
parent.set_position(pb1)
parent.set_anchor(panchor)
fig = parent.get_figure()
cax = fig.add_axes(pbcb)
cax.set_aspect(aspect, anchor=anchor, adjustable='box')
return cax, kw
make_axes.__doc__ ='''
Resize and reposition a parent axes, and return a child
axes suitable for a colorbar::
cax, kw = make_axes(parent, **kw)
Keyword arguments may include the following (with defaults):
*orientation*
'vertical' or 'horizontal'
%s
All but the first of these are stripped from the input kw set.
Returns (cax, kw), the child axes and the reduced kw dictionary.
''' % make_axes_kw_doc
| gpl-3.0 |
adamgreenhall/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
russel1237/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
mattilyra/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 85 | 2698 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
mortada/scipy | scipy/stats/_discrete_distns.py | 15 | 20781 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, gammaln as gamln
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
self.a = max(N-(M-n), 0)
self.b = min(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
g1 = sqrt(1.0 / tmp)
g2 = 1.0 / tmp
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = np.inf
return 1
elif (lambda_ < 0):
self.a = -np.inf
self.b = 0
return 1
else:
return 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
Note the difference to the numpy ``random_integers`` which
returns integers on a *closed* interval ``[low, high]``.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high=None):
"""An array of *size* random integers >= ``low`` and < ``high``.
If ``high`` is ``None``, then range is >=0 and < low
"""
return self._random_state.randint(low, high, self._size)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| bsd-3-clause |
bikong2/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
bigdataelephants/scikit-learn | sklearn/cluster/__init__.py | 12 | 1331 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, Ward, WardAgglomeration,
AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'Ward',
'WardAgglomeration',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
yask123/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
jseabold/scikit-learn | sklearn/covariance/tests/test_covariance.py | 34 | 11120 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert_greater(np.amin(mahal_dist), 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
qifeigit/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
victorbergelin/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
lpsinger/astropy | astropy/modeling/functional_models.py | 2 | 90578 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Mathematical models."""
# pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name
import numpy as np
from astropy import units as u
from astropy.units import Quantity, UnitsError
from astropy.utils.decorators import deprecated
from .core import (Fittable1DModel, Fittable2DModel)
from .parameters import Parameter, InputParameterError
from .utils import ellipse_extent
__all__ = ['AiryDisk2D', 'Moffat1D', 'Moffat2D', 'Box1D', 'Box2D', 'Const1D',
'Const2D', 'Ellipse2D', 'Disk2D', 'Gaussian1D', 'Gaussian2D',
'Linear1D', 'Lorentz1D', 'RickerWavelet1D', 'RickerWavelet2D',
'RedshiftScaleFactor', 'Multiply', 'Planar2D', 'Scale',
'Sersic1D', 'Sersic2D', 'Shift', 'Sine1D', 'Trapezoid1D',
'TrapezoidDisk2D', 'Ring2D', 'Voigt1D', 'KingProjectedAnalytic1D',
'Exponential1D', 'Logarithmic1D']
TWOPI = 2 * np.pi
FLOAT_EPSILON = float(np.finfo(np.float32).tiny)
# Note that we define this here rather than using the value defined in
# astropy.stats to avoid importing astropy.stats every time astropy.modeling
# is loaded.
GAUSSIAN_SIGMA_TO_FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0))
class Gaussian1D(Fittable1DModel):
"""
One dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian - for a normalized profile
(integrating to 1), set amplitude = 1 / (stddev * np.sqrt(2 * np.pi))
mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian.
stddev : float or `~astropy.units.Quantity`.
Standard deviation of the Gaussian with FWHM = 2 * stddev * np.sqrt(2 * np.log(2)).
Notes
-----
Either all or none of input ``x``, ``mean`` and ``stddev`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that 'mean' is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Gaussian1D
plt.figure()
s1 = Gaussian1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
See Also
--------
Gaussian2D, Box1D, Moffat1D, Lorentz1D
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the Gaussian")
mean = Parameter(default=0, description="Position of peak (Gaussian)")
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
stddev = Parameter(default=1, bounds=(FLOAT_EPSILON, None), description="Standard deviation of the Gaussian")
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``
Parameters
----------
factor : float
The multiple of `stddev` used to define the limits.
The default is 5.5, corresponding to a relative error < 1e-7.
Examples
--------
>>> from astropy.modeling.models import Gaussian1D
>>> model = Gaussian1D(mean=0, stddev=2)
>>> model.bounding_box
(-11.0, 11.0)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor,
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
(-4.0, 4.0)
"""
x0 = self.mean
dx = factor * self.stddev
return (x0 - dx, x0 + dx)
@property
def fwhm(self):
"""Gaussian full width at half maximum."""
return self.stddev * GAUSSIAN_SIGMA_TO_FWHM
@staticmethod
def evaluate(x, amplitude, mean, stddev):
"""
Gaussian1D model function.
"""
return amplitude * np.exp(- 0.5 * (x - mean) ** 2 / stddev ** 2)
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3
return [d_amplitude, d_mean, d_stddev]
@property
def input_units(self):
if self.mean.unit is None:
return None
return {self.inputs[0]: self.mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'mean': inputs_unit[self.inputs[0]],
'stddev': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Gaussian2D(Fittable2DModel):
r"""
Two dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian.
x_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in x.
y_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in y.
x_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in x before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
y_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in y before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
theta : float or `~astropy.units.Quantity`, optional.
Rotation angle (value in radians). The rotation angle increases
counterclockwise. Must be None if a covariance matrix (``cov_matrix``)
is provided. If no ``cov_matrix`` is given, ``None`` means the default
value (0).
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the ``x_stddev``,
``y_stddev``, and ``theta`` defaults.
Notes
-----
Either all or none of input ``x, y``, ``[x,y]_mean`` and ``[x,y]_stddev``
must be provided consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right)
\left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}}
Using the following definitions:
.. math::
a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} -
\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right)
c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
If using a ``cov_matrix``, the model is of the form:
.. math::
f(x, y) = A e^{-0.5 \left(\vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0}\right)}
where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`,
and :math:`\Sigma` is the covariance matrix:
.. math::
\Sigma = \left(\begin{array}{ccc}
\sigma_x^2 & \rho \sigma_x \sigma_y \\
\rho \sigma_x \sigma_y & \sigma_y^2
\end{array}\right)
:math:`\rho` is the correlation between ``x`` and ``y``, which should
be between -1 and +1. Positive correlation corresponds to a
``theta`` in the range 0 to 90 degrees. Negative correlation
corresponds to a ``theta`` in the range of 0 to -90 degrees.
See [1]_ for more details about the 2D Gaussian function.
See Also
--------
Gaussian1D, Box2D, Moffat2D
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
amplitude = Parameter(default=1, description="Amplitude of the Gaussian")
x_mean = Parameter(default=0, description="Peak position (along x axis) of Gaussian")
y_mean = Parameter(default=0, description="Peak position (along y axis) of Gaussian")
x_stddev = Parameter(default=1, description="Standard deviation of the Gaussian (along x axis)")
y_stddev = Parameter(default=1, description="Standard deviation of the Gaussian (along y axis)")
theta = Parameter(default=0.0, description="Rotation angle [in radians] (Optional parameter)")
def __init__(self, amplitude=amplitude.default, x_mean=x_mean.default,
y_mean=y_mean.default, x_stddev=None, y_stddev=None,
theta=None, cov_matrix=None, **kwargs):
if cov_matrix is None:
if x_stddev is None:
x_stddev = self.__class__.x_stddev.default
if y_stddev is None:
y_stddev = self.__class__.y_stddev.default
if theta is None:
theta = self.__class__.theta.default
else:
if x_stddev is not None or y_stddev is not None or theta is not None:
raise InputParameterError("Cannot specify both cov_matrix and "
"x/y_stddev/theta")
# Compute principle coordinate system transformation
cov_matrix = np.array(cov_matrix)
if cov_matrix.shape != (2, 2):
raise ValueError("Covariance matrix must be 2x2")
eig_vals, eig_vecs = np.linalg.eig(cov_matrix)
x_stddev, y_stddev = np.sqrt(eig_vals)
y_vec = eig_vecs[:, 0]
theta = np.arctan2(y_vec[1], y_vec[0])
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
# TODO: Investigate why setting this in Parameter above causes
# convolution tests to hang.
kwargs.setdefault('bounds', {})
kwargs['bounds'].setdefault('x_stddev', (FLOAT_EPSILON, None))
kwargs['bounds'].setdefault('y_stddev', (FLOAT_EPSILON, None))
super().__init__(
amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta, **kwargs)
@property
def x_fwhm(self):
"""Gaussian full width at half maximum in X."""
return self.x_stddev * GAUSSIAN_SIGMA_TO_FWHM
@property
def y_fwhm(self):
"""Gaussian full width at half maximum in Y."""
return self.y_stddev * GAUSSIAN_SIGMA_TO_FWHM
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits in each dimension,
``((y_low, y_high), (x_low, x_high))``
The default offset from the mean is 5.5-sigma, corresponding
to a relative error < 1e-7. The limits are adjusted for rotation.
Parameters
----------
factor : float, optional
The multiple of `x_stddev` and `y_stddev` used to define the limits.
The default is 5.5.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2)
>>> model.bounding_box
((-11.0, 11.0), (-5.5, 5.5))
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
((-4.0, 4.0), (-2.0, 2.0))
"""
a = factor * self.x_stddev
b = factor * self.y_stddev
theta = self.theta.value
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_mean - dy, self.y_mean + dy),
(self.x_mean - dx, self.x_mean + dx))
@staticmethod
def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function"""
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xdiff = x - x_mean
ydiff = y - y_mean
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
return amplitude * np.exp(-((a * xdiff ** 2) + (b * xdiff * ydiff) +
(c * ydiff ** 2)))
@staticmethod
def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function derivative with respect to parameters"""
cost = np.cos(theta)
sint = np.sin(theta)
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
cos2t = np.cos(2. * theta)
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xstd3 = x_stddev ** 3
ystd3 = y_stddev ** 3
xdiff = x - x_mean
ydiff = y - y_mean
xdiff2 = xdiff ** 2
ydiff2 = ydiff ** 2
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) +
(c * ydiff2)))
da_dtheta = (sint * cost * ((1. / ystd2) - (1. / xstd2)))
da_dx_stddev = -cost2 / xstd3
da_dy_stddev = -sint2 / ystd3
db_dtheta = (cos2t / xstd2) - (cos2t / ystd2)
db_dx_stddev = -sin2t / xstd3
db_dy_stddev = sin2t / ystd3
dc_dtheta = -da_dtheta
dc_dx_stddev = -sint2 / xstd3
dc_dy_stddev = -cost2 / ystd3
dg_dA = g / amplitude
dg_dx_mean = g * ((2. * a * xdiff) + (b * ydiff))
dg_dy_mean = g * ((b * xdiff) + (2. * c * ydiff))
dg_dx_stddev = g * (-(da_dx_stddev * xdiff2 +
db_dx_stddev * xdiff * ydiff +
dc_dx_stddev * ydiff2))
dg_dy_stddev = g * (-(da_dy_stddev * xdiff2 +
db_dy_stddev * xdiff * ydiff +
dc_dy_stddev * ydiff2))
dg_dtheta = g * (-(da_dtheta * xdiff2 +
db_dtheta * xdiff * ydiff +
dc_dtheta * ydiff2))
return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev,
dg_dtheta]
@property
def input_units(self):
if self.x_mean.unit is None and self.y_mean.unit is None:
return None
return {self.inputs[0]: self.x_mean.unit,
self.inputs[1]: self.y_mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_mean': inputs_unit[self.inputs[0]],
'y_mean': inputs_unit[self.inputs[0]],
'x_stddev': inputs_unit[self.inputs[0]],
'y_stddev': inputs_unit[self.inputs[0]],
'theta': u.rad,
'amplitude': outputs_unit[self.outputs[0]]}
class Shift(Fittable1DModel):
"""
Shift a coordinate.
Parameters
----------
offset : float
Offset to add to a coordinate.
"""
offset = Parameter(default=0, description="Offset to add to a model")
linear = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.offset.unit is None:
return None
return {self.inputs[0]: self.offset.unit}
@property
def inverse(self):
"""One dimensional inverse Shift model function"""
inv = self.copy()
inv.offset *= -1
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.offset) for x in self.bounding_box)
return inv
@staticmethod
def evaluate(x, offset):
"""One dimensional Shift model function"""
return x + offset
@staticmethod
def sum_of_implicit_terms(x):
"""Evaluate the implicit term (x) of one dimensional Shift model"""
return x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Shift model derivative with respect to parameter"""
d_offset = np.ones_like(x)
return [d_offset]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'offset': outputs_unit[self.outputs[0]]}
class Scale(Fittable1DModel):
"""
Multiply a model by a dimensionless factor.
Parameters
----------
factor : float
Factor by which to scale a coordinate.
Notes
-----
If ``factor`` is a `~astropy.units.Quantity` then the units will be
stripped before the scaling operation.
"""
factor = Parameter(default=1, description="Factor by which to scale a model")
linear = True
fittable = True
_input_units_strict = True
_input_units_allow_dimensionless = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.factor.unit is None:
return None
return {self.inputs[0]: self.factor.unit}
@property
def inverse(self):
"""One dimensional inverse Scale model function"""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.factor) for x in self.bounding_box)
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional Scale model function"""
if isinstance(factor, u.Quantity):
factor = factor.value
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Scale model derivative with respect to parameter"""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'factor': outputs_unit[self.outputs[0]]}
class Multiply(Fittable1DModel):
"""
Multiply a model by a quantity or number.
Parameters
----------
factor : float
Factor by which to multiply a coordinate.
"""
factor = Parameter(default=1, description="Factor by which to multiply a model")
linear = True
fittable = True
_has_inverse_bounding_box = True
@property
def inverse(self):
"""One dimensional inverse multiply model function"""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.factor) for x in self.bounding_box)
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional multiply model function"""
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional multiply model derivative with respect to parameter"""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'factor': outputs_unit[self.outputs[0]]}
class RedshiftScaleFactor(Fittable1DModel):
"""
One dimensional redshift scale factor model.
Parameters
----------
z : float
Redshift value.
Notes
-----
Model formula:
.. math:: f(x) = x (1 + z)
"""
z = Parameter(description='Redshift', default=0)
_has_inverse_bounding_box = True
@staticmethod
def evaluate(x, z):
"""One dimensional RedshiftScaleFactor model function"""
return (1 + z) * x
@staticmethod
def fit_deriv(x, z):
"""One dimensional RedshiftScaleFactor model derivative"""
d_z = x
return [d_z]
@property
def inverse(self):
"""Inverse RedshiftScaleFactor model"""
inv = self.copy()
inv.z = 1.0 / (1.0 + self.z) - 1.0
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.z) for x in self.bounding_box)
return inv
class Sersic1D(Fittable1DModel):
r"""
One dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
See Also
--------
Gaussian1D, Moffat1D, Lorentz1D
Notes
-----
Model formula:
.. math::
I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic1D
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(111, xscale='log', yscale='log')
s1 = Sersic1D(amplitude=1, r_eff=5)
r=np.arange(0, 100, .01)
for n in range(1, 10):
s1.n = n
plt.plot(r, s1(r), color=str(float(n) / 15))
plt.axis([1e-1, 30, 1e-2, 1e3])
plt.xlabel('log Radius')
plt.ylabel('log Surface Brightness')
plt.text(.25, 1.5, 'n=1')
plt.text(.25, 300, 'n=10')
plt.xticks([])
plt.yticks([])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
_gammaincinv = None
@classmethod
def evaluate(cls, r, amplitude, r_eff, n):
"""One dimensional Sersic profile function."""
if cls._gammaincinv is None:
try:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
except ValueError:
raise ImportError('Sersic1D model requires scipy.')
return (amplitude * np.exp(
-cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1)))
@property
def input_units(self):
if self.r_eff.unit is None:
return None
return {self.inputs[0]: self.r_eff.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'r_eff': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Sine1D(Fittable1DModel):
"""
One dimensional Sine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Sine1D
plt.figure()
s1 = Sine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
amplitude = Parameter(default=1, description="Oscillation amplitude")
frequency = Parameter(default=1, description="Oscillation frequency")
phase = Parameter(default=0, description="Oscillation phase")
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Sine model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.sin(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Sine model derivative"""
d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase)
d_frequency = (TWOPI * x * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
d_phase = (TWOPI * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
return [d_amplitude, d_frequency, d_phase]
@property
def input_units(self):
if self.frequency.unit is None:
return None
return {self.inputs[0]: 1. / self.frequency.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'frequency': inputs_unit[self.inputs[0]] ** -1,
'amplitude': outputs_unit[self.outputs[0]]}
class Linear1D(Fittable1DModel):
"""
One dimensional Line model.
Parameters
----------
slope : float
Slope of the straight line
intercept : float
Intercept of the straight line
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x) = a x + b
"""
slope = Parameter(default=1, description="Slope of the straight line")
intercept = Parameter(default=0, description="Intercept of the straight line")
linear = True
@staticmethod
def evaluate(x, slope, intercept):
"""One dimensional Line model function"""
return slope * x + intercept
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Line model derivative with respect to parameters"""
d_slope = x
d_intercept = np.ones_like(x)
return [d_slope, d_intercept]
@property
def inverse(self):
new_slope = self.slope ** -1
new_intercept = -self.intercept / self.slope
return self.__class__(slope=new_slope, intercept=new_intercept)
@property
def input_units(self):
if self.intercept.unit is None and self.slope.unit is None:
return None
return {self.inputs[0]: self.intercept.unit / self.slope.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'intercept': outputs_unit[self.outputs[0]],
'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]]}
class Planar2D(Fittable2DModel):
"""
Two dimensional Plane model.
Parameters
----------
slope_x : float
Slope of the plane in X
slope_y : float
Slope of the plane in Y
intercept : float
Z-intercept of the plane
Notes
-----
Model formula:
.. math:: f(x, y) = a x + b y + c
"""
slope_x = Parameter(default=1, description="Slope of the plane in X")
slope_y = Parameter(default=1, description="Slope of the plane in Y")
intercept = Parameter(default=0, description="Z-intercept of the plane")
linear = True
@staticmethod
def evaluate(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model function"""
return slope_x * x + slope_y * y + intercept
@staticmethod
def fit_deriv(x, y, *params):
"""Two dimensional Plane model derivative with respect to parameters"""
d_slope_x = x
d_slope_y = y
d_intercept = np.ones_like(x)
return [d_slope_x, d_slope_y, d_intercept]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'intercept': outputs_unit['z'],
'slope_x': outputs_unit['z'] / inputs_unit['x'],
'slope_y': outputs_unit['z'] / inputs_unit['y']}
class Lorentz1D(Fittable1DModel):
"""
One dimensional Lorentzian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Peak value - for a normalized profile (integrating to 1),
set amplitude = 2 / (np.pi * fwhm)
x_0 : float or `~astropy.units.Quantity`.
Position of the peak
fwhm : float or `~astropy.units.Quantity`.
Full width at half maximum (FWHM)
See Also
--------
Gaussian1D, Box1D, RickerWavelet1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and ``fwhm`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}}
where :math:`\\gamma` is half of given FWHM.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Lorentz1D
plt.figure()
s1 = Lorentz1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Peak value")
x_0 = Parameter(default=0, description="Position of the peak")
fwhm = Parameter(default=1, description="Full width at half maximum")
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model function"""
return (amplitude * ((fwhm / 2.) ** 2) / ((x - x_0) ** 2 +
(fwhm / 2.) ** 2))
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model derivative with respect to parameters"""
d_amplitude = fwhm ** 2 / (fwhm ** 2 + (x - x_0) ** 2)
d_x_0 = (amplitude * d_amplitude * (2 * x - 2 * x_0) /
(fwhm ** 2 + (x - x_0) ** 2))
d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
def bounding_box(self, factor=25):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
Default is chosen to include most (99%) of the
area under the curve, while still showing the
central feature of interest.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'fwhm': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Voigt1D(Fittable1DModel):
"""
One dimensional model for the Voigt profile.
Parameters
----------
x_0 : float or `~astropy.units.Quantity`
Position of the peak
amplitude_L : float or `~astropy.units.Quantity`.
The Lorentzian amplitude (peak of the associated Lorentz function)
- for a normalized profile (integrating to 1), set
amplitude_L = 2 / (np.pi * fwhm_L)
fwhm_L : float or `~astropy.units.Quantity`
The Lorentzian full width at half maximum
fwhm_G : float or `~astropy.units.Quantity`.
The Gaussian full width at half maximum
method : str, optional
Algorithm for computing the complex error function; one of
'Humlicek2' (default, fast and generally more accurate than ``rtol=3.e-5``) or
'Scipy', alternatively 'wofz' (requires ``scipy``, almost as fast and
reference in accuracy).
See Also
--------
Gaussian1D, Lorentz1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and the ``fwhm_*`` must be provided
consistently with compatible units or as unitless numbers.
Voigt function is calculated as real part of the complex error function computed from either
Humlicek's rational approximations (JQSRT 21:309, 1979; 27:437, 1982) following
Schreier 2018 (MNRAS 479, 3068; and ``hum2zpf16m`` from his cpfX.py module); or
`~scipy.special.wofz` (implementing 'Faddeeva.cc').
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Voigt1D
import matplotlib.pyplot as plt
plt.figure()
x = np.arange(0, 10, 0.01)
v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
plt.plot(x, v1(x))
plt.show()
"""
x_0 = Parameter(default=0,
description="Position of the peak")
amplitude_L = Parameter(default=1, # noqa: N815
description="The Lorentzian amplitude")
fwhm_L = Parameter(default=2/np.pi, # noqa: N815
description="The Lorentzian full width at half maximum")
fwhm_G = Parameter(default=np.log(2), # noqa: N815
description="The Gaussian full width at half maximum")
sqrt_pi = np.sqrt(np.pi)
sqrt_ln2 = np.sqrt(np.log(2))
sqrt_ln2pi = np.sqrt(np.log(2) * np.pi)
_last_z = np.zeros(1, dtype=complex)
_last_w = np.zeros(1, dtype=float)
_faddeeva = None
def __init__(self, x_0=x_0.default, amplitude_L=amplitude_L.default, # noqa: N803
fwhm_L=fwhm_L.default, fwhm_G=fwhm_G.default, method='humlicek2', # noqa: N803
**kwargs):
if str(method).lower() in ('wofz', 'scipy'):
try:
from scipy.special import wofz
except (ValueError, ImportError) as err:
raise ImportError(f'Voigt1D method {method} requires scipy: {err}.') from err
self._faddeeva = wofz
elif str(method).lower() == 'humlicek2':
self._faddeeva = self._hum2zpf16c
else:
raise ValueError(f'Not a valid method for Voigt1D Faddeeva function: {method}.')
self.method = self._faddeeva.__name__
super().__init__(x_0=x_0, amplitude_L=amplitude_L, fwhm_L=fwhm_L, fwhm_G=fwhm_G, **kwargs)
def _wrap_wofz(self, z):
"""Call complex error (Faddeeva) function w(z) implemented by algorithm `method`;
cache results for consecutive calls from `evaluate`, `fit_deriv`."""
if (z.shape == self._last_z.shape and
np.allclose(z, self._last_z, rtol=1.e-14, atol=1.e-15)):
return self._last_w
self._last_w = self._faddeeva(z)
self._last_z = z
return self._last_w
def evaluate(self, x, x_0, amplitude_L, fwhm_L, fwhm_G): # noqa: N803
"""One dimensional Voigt function scaled to Lorentz peak amplitude."""
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * self.sqrt_ln2 / fwhm_G
# The normalised Voigt profile is w.real * self.sqrt_ln2 / (self.sqrt_pi * fwhm_G) * 2 ;
# for the legacy definition we multiply with np.pi * fwhm_L / 2 * amplitude_L
return self._wrap_wofz(z).real * self.sqrt_ln2pi / fwhm_G * fwhm_L * amplitude_L
def fit_deriv(self, x, x_0, amplitude_L, fwhm_L, fwhm_G): # noqa: N803
"""Derivative of the one dimensional Voigt function with respect to parameters."""
s = self.sqrt_ln2 / fwhm_G
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * s
# V * constant from McLean implementation (== their Voigt function)
w = self._wrap_wofz(z) * s * fwhm_L * amplitude_L * self.sqrt_pi
# Schreier (2018) Eq. 6 == (dvdx + 1j * dvdy) / (sqrt(pi) * fwhm_L * amplitude_L)
dwdz = -2 * z * w + 2j * s * fwhm_L * amplitude_L
return [-dwdz.real * 2 * s,
w.real / amplitude_L,
w.real / fwhm_L - dwdz.imag * s,
(-w.real - s * (2 * (x - x_0) * dwdz.real - fwhm_L * dwdz.imag)) / fwhm_G]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'fwhm_L': inputs_unit[self.inputs[0]],
'fwhm_G': inputs_unit[self.inputs[0]],
'amplitude_L': outputs_unit[self.outputs[0]]}
@staticmethod
def _hum2zpf16c(z, s=10.0):
"""Complex error function w(z) for z = x + iy combining Humlicek's rational approximations:
|x| + y > 10: Humlicek (JQSRT, 1982) rational approximation for region II;
else: Humlicek (JQSRT, 1979) rational approximation with n=16 and delta=y0=1.35
Version using a mask and np.place;
single complex argument version of Franz Schreier's cpfX.hum2zpf16m.
Originally licensed under a 3-clause BSD style license - see
https://atmos.eoc.dlr.de/tools/lbl4IR/cpfX.py
"""
# Optimized (single fraction) Humlicek region I rational approximation for n=16, delta=1.35
AA = np.array([+46236.3358828121, -147726.58393079657j, # noqa: N806
-206562.80451354137, 281369.1590631087j,
+183092.74968253175, -184787.96830696272j,
-66155.39578477248, 57778.05827983565j,
+11682.770904216826, -9442.402767960672j,
-1052.8438624933142, 814.0996198624186j,
+45.94499030751872, -34.59751573708725j,
-0.7616559377907136, 0.5641895835476449j]) # 1j/sqrt(pi) to the 12. digit
bb = np.array([+7918.06640624997, 0.0,
-126689.0625, 0.0,
+295607.8125, 0.0,
-236486.25, 0.0,
+84459.375, 0.0,
-15015.0, 0.0,
+1365.0, 0.0,
-60.0, 0.0,
+1.0])
sqrt_piinv = 1.0 / np.sqrt(np.pi)
zz = z * z
w = 1j * (z * (zz * sqrt_piinv - 1.410474)) / (0.75 + zz*(zz - 3.0))
if np.any(z.imag < s):
mask = abs(z.real) + z.imag < s # returns true for interior points
# returns small complex array covering only the interior region
Z = z[np.where(mask)] + 1.35j
ZZ = Z * Z
numer = (((((((((((((((AA[15]*Z + AA[14])*Z + AA[13])*Z + AA[12])*Z + AA[11])*Z +
AA[10])*Z + AA[9])*Z + AA[8])*Z + AA[7])*Z + AA[6])*Z +
AA[5])*Z + AA[4])*Z+AA[3])*Z + AA[2])*Z + AA[1])*Z + AA[0])
denom = (((((((ZZ + bb[14])*ZZ + bb[12])*ZZ + bb[10])*ZZ+bb[8])*ZZ + bb[6])*ZZ +
bb[4])*ZZ + bb[2])*ZZ + bb[0]
np.place(w, mask, numer / denom)
return w
class Const1D(Fittable1DModel):
"""
One dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const2D
Notes
-----
Model formula:
.. math:: f(x) = A
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Const1D
plt.figure()
s1 = Const1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Value of the constant function")
linear = True
@staticmethod
def evaluate(x, amplitude):
"""One dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(x, subok=False)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False)
return x
@staticmethod
def fit_deriv(x, amplitude):
"""One dimensional Constant model derivative with respect to parameters"""
d_amplitude = np.ones_like(x)
return [d_amplitude]
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'amplitude': outputs_unit[self.outputs[0]]}
class Const2D(Fittable2DModel):
"""
Two dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x, y) = A
"""
amplitude = Parameter(default=1, description="Value of the constant function")
linear = True
@staticmethod
def evaluate(x, y, amplitude):
"""Two dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(x, subok=False)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False)
return x
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'amplitude': outputs_unit[self.outputs[0]]}
class Ellipse2D(Fittable2DModel):
"""
A 2D Ellipse model.
Parameters
----------
amplitude : float
Value of the ellipse.
x_0 : float
x position of the center of the disk.
y_0 : float
y position of the center of the disk.
a : float
The length of the semimajor axis.
b : float
The length of the semiminor axis.
theta : float
The rotation angle in radians of the semimajor axis. The
rotation angle increases counterclockwise from the positive x
axis.
See Also
--------
Disk2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
\\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos
\\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 +
\\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0)
\\cos \\theta}{b}\\right]^2 \\leq 1 \\\\
0 & : \\mathrm{otherwise}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Ellipse2D
from astropy.coordinates import Angle
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
x0, y0 = 25, 25
a, b = 20, 10
theta = Angle(30, 'deg')
e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b,
theta=theta.radian)
y, x = np.mgrid[0:50, 0:50]
fig, ax = plt.subplots(1, 1)
ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r')
e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red',
facecolor='none')
ax.add_patch(e2)
plt.show()
"""
amplitude = Parameter(default=1, description="Value of the ellipse")
x_0 = Parameter(default=0, description="X position of the center of the disk.")
y_0 = Parameter(default=0, description="Y position of the center of the disk.")
a = Parameter(default=1, description="The length of the semimajor axis")
b = Parameter(default=1, description="The length of the semiminor axis")
theta = Parameter(default=0, description="The rotation angle in radians of the semimajor axis (Positive - counterclockwise)")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, a, b, theta):
"""Two dimensional Ellipse model function."""
xx = x - x_0
yy = y - y_0
cost = np.cos(theta)
sint = np.sin(theta)
numerator1 = (xx * cost) + (yy * sint)
numerator2 = -(xx * sint) + (yy * cost)
in_ellipse = (((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.)
result = np.select([in_ellipse], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
a = self.a
b = self.b
theta = self.theta.value
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'a': inputs_unit[self.inputs[0]],
'b': inputs_unit[self.inputs[0]],
'theta': u.rad,
'amplitude': outputs_unit[self.outputs[0]]}
class Disk2D(Fittable2DModel):
"""
Two dimensional radial symmetric Disk model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
R_0 : float
Radius of the disk
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r \\leq R_0 \\\\
0 & : r > R_0
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Value of disk function")
x_0 = Parameter(default=0, description="X position of center of the disk")
y_0 = Parameter(default=0, description="Y position of center of the disk")
R_0 = Parameter(default=1, description="Radius of the disk")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0):
"""Two dimensional Disk model function"""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
result = np.select([rr <= R_0 ** 2], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
return ((self.y_0 - self.R_0, self.y_0 + self.R_0),
(self.x_0 - self.R_0, self.x_0 + self.R_0))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'R_0': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Ring2D(Fittable2DModel):
"""
Two dimensional radial symmetric Ring model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
r_in : float
Inner radius of the ring
width : float
Width of the ring.
r_out : float
Outer Radius of the ring. Can be specified instead of width.
See Also
--------
Disk2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r_{in} \\leq r \\leq r_{out} \\\\
0 & : \\text{else}
\\end{array}
\\right.
Where :math:`r_{out} = r_{in} + r_{width}`.
"""
amplitude = Parameter(default=1, description="Value of the disk function")
x_0 = Parameter(default=0, description="X position of center of disc")
y_0 = Parameter(default=0, description="Y position of center of disc")
r_in = Parameter(default=1, description="Inner radius of the ring")
width = Parameter(default=1, description="Width of the ring")
def __init__(self, amplitude=amplitude.default, x_0=x_0.default,
y_0=y_0.default, r_in=r_in.default, width=width.default,
r_out=None, **kwargs):
# If outer radius explicitly given, it overrides default width.
if r_out is not None:
if width != self.width.default:
raise InputParameterError(
"Cannot specify both width and outer radius separately.")
width = r_out - r_in
elif width is None:
width = self.width.default
super().__init__(
amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width,
**kwargs)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, r_in, width):
"""Two dimensional Ring model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
r_range = np.logical_and(rr >= r_in ** 2, rr <= (r_in + width) ** 2)
result = np.select([r_range], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.r_in + self.width
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'r_in': inputs_unit[self.inputs[0]],
'width': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Box1D(Fittable1DModel):
"""
One dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
Position of the center of the box function
width : float
Width of the box
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\
0 & : \\text{else}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Box1D
plt.figure()
s1 = Box1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude A")
x_0 = Parameter(default=0, description="Position of center of box function")
width = Parameter(default=1, description="Width of the box")
@staticmethod
def evaluate(x, amplitude, x_0, width):
"""One dimensional Box model function"""
inside = np.logical_and(x >= x_0 - width / 2., x <= x_0 + width / 2.)
return np.select([inside], [amplitude], 0)
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
@property
def return_units(self):
if self.amplitude.unit is None:
return None
return {self.outputs[0]: self.amplitude.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'width': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Box2D(Fittable2DModel):
"""
Two dimensional Box model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the center of the box function
x_width : float
Width in x direction of the box
y_0 : float
y position of the center of the box function
y_width : float
Width in y direction of the box
See Also
--------
Box1D, Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\
& y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\
0 : & \\text{else}
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Amplitude")
x_0 = Parameter(default=0, description="X position of the center of the box function")
y_0 = Parameter(default=0, description="Y position of the center of the box function")
x_width = Parameter(default=1, description="Width in x direction of the box")
y_width = Parameter(default=1, description="Width in y direction of the box")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width):
"""Two dimensional Box model function"""
x_range = np.logical_and(x >= x_0 - x_width / 2.,
x <= x_0 + x_width / 2.)
y_range = np.logical_and(y >= y_0 - y_width / 2.,
y <= y_0 + y_width / 2.)
result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dx = self.x_width / 2
dy = self.y_width / 2
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[1]],
'x_width': inputs_unit[self.inputs[0]],
'y_width': inputs_unit[self.inputs[1]],
'amplitude': outputs_unit[self.outputs[0]]}
class Trapezoid1D(Fittable1DModel):
"""
One dimensional Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
Center position of the trapezoid
width : float
Width of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid
See Also
--------
Box1D, Gaussian1D, Moffat1D
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Trapezoid1D
plt.figure()
s1 = Trapezoid1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="Center position of the trapezoid")
width = Parameter(default=1, description="Width of constant part of the trapezoid")
slope = Parameter(default=1, description="Slope of the tails of trapezoid")
@staticmethod
def evaluate(x, amplitude, x_0, width, slope):
"""One dimensional Trapezoid model function"""
# Compute the four points where the trapezoid changes slope
# x1 <= x2 <= x3 <= x4
x2 = x_0 - width / 2.
x3 = x_0 + width / 2.
x1 = x2 - amplitude / slope
x4 = x3 + amplitude / slope
# Compute model values in pieces between the change points
range_a = np.logical_and(x >= x1, x < x2)
range_b = np.logical_and(x >= x2, x < x3)
range_c = np.logical_and(x >= x3, x < x4)
val_a = slope * (x - x1)
val_b = amplitude
val_c = slope * (x4 - x)
result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2 + self.amplitude / self.slope
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'width': inputs_unit[self.inputs[0]],
'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class TrapezoidDisk2D(Fittable2DModel):
"""
Two dimensional circular Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
x position of the center of the trapezoid
y_0 : float
y position of the center of the trapezoid
R_0 : float
Radius of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid in x direction.
See Also
--------
Disk2D, Box2D
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="X position of the center of the trapezoid")
y_0 = Parameter(default=0, description="Y position of the center of the trapezoid")
R_0 = Parameter(default=1, description="Radius of constant part of trapezoid")
slope = Parameter(default=1, description="Slope of tails of trapezoid in x direction")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0, slope):
"""Two dimensional Trapezoid Disk model function"""
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2)
range_1 = r <= R_0
range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope)
val_1 = amplitude
val_2 = amplitude + slope * (R_0 - r)
result = np.select([range_1, range_2], [val_1, val_2])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.R_0 + self.amplitude / self.slope
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'R_0': inputs_unit[self.inputs[0]],
'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class RickerWavelet1D(Fittable1DModel):
"""
One dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
Position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet2D, Box1D, Gaussian1D, Trapezoid1D
Notes
-----
Model formula:
.. math::
f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import RickerWavelet1D
plt.figure()
s1 = RickerWavelet1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -2, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="Position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, amplitude, x_0, sigma):
"""One dimensional Ricker Wavelet model function"""
xx_ww = (x - x_0) ** 2 / (2 * sigma ** 2)
return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww)
def bounding_box(self, factor=10.0):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of sigma used to define the limits.
"""
x0 = self.x_0
dx = factor * self.sigma
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'sigma': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class RickerWavelet2D(Fittable2DModel):
"""
Two dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the peak
y_0 : float
y position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet1D, Gaussian2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}
+ \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{\\frac{- \\left(x - x_{0}\\right)^{2}
- \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}}
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, sigma):
"""Two dimensional Ricker Wavelet model function"""
rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma ** 2)
return amplitude * (1 - rr_ww) * np.exp(- rr_ww)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'sigma': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class AiryDisk2D(Fittable2DModel):
"""
Two dimensional Airy disk model.
Parameters
----------
amplitude : float
Amplitude of the Airy function.
x_0 : float
x position of the maximum of the Airy function.
y_0 : float
y position of the maximum of the Airy function.
radius : float
The radius of the Airy disk (radius of the first zero).
See Also
--------
Box2D, TrapezoidDisk2D, Gaussian2D
Notes
-----
Model formula:
.. math:: f(r) = A \\left[\\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}}\\right]^2
Where :math:`J_1` is the first order Bessel function of the first
kind, :math:`r` is radial distance from the maximum of the Airy
function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R`
is the input ``radius`` parameter, and :math:`R_z =
1.2196698912665045`).
For an optical system, the radius of the first zero represents the
limiting angular resolution and is approximately 1.22 * lambda / D,
where lambda is the wavelength of the light and D is the diameter of
the aperture.
See [1]_ for more details about the Airy disk.
References
----------
.. [1] https://en.wikipedia.org/wiki/Airy_disk
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the Airy function")
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
radius = Parameter(default=1,
description="The radius of the Airy disk (radius of first zero crossing)")
_rz = None
_j1 = None
@classmethod
def evaluate(cls, x, y, amplitude, x_0, y_0, radius):
"""Two dimensional Airy model function"""
if cls._rz is None:
try:
from scipy.special import j1, jn_zeros
cls._rz = jn_zeros(1, 1)[0] / np.pi
cls._j1 = j1
except ValueError:
raise ImportError('AiryDisk2D model requires scipy.')
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz)
if isinstance(r, Quantity):
# scipy function cannot handle Quantity, so turn into array.
r = r.to_value(u.dimensionless_unscaled)
# Since r can be zero, we have to take care to treat that case
# separately so as not to raise a numpy warning
z = np.ones(r.shape)
rt = np.pi * r[r > 0]
z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2
if isinstance(amplitude, Quantity):
# make z quantity too, otherwise in-place multiplication fails.
z = Quantity(z, u.dimensionless_unscaled, copy=False)
z *= amplitude
return z
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'radius': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Moffat1D(Fittable1DModel):
"""
One dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian1D, Box1D
Notes
-----
Model formula:
.. math::
f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Moffat1D
plt.figure()
s1 = Moffat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the model")
x_0 = Parameter(default=0, description="X position of maximum of Moffat model")
gamma = Parameter(default=1, description="Core width of Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach <https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model function"""
return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model derivative with respect to parameters"""
fac = (1 + (x - x_0) ** 2 / gamma ** 2)
d_A = fac ** (-alpha)
d_x_0 = (2 * amplitude * alpha * (x - x_0) * d_A / (fac * gamma ** 2))
d_gamma = (2 * amplitude * alpha * (x - x_0) ** 2 * d_A /
(fac * gamma ** 3))
d_alpha = -amplitude * d_A * np.log(fac)
return [d_A, d_x_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'gamma': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Moffat2D(Fittable2DModel):
"""
Two dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
y_0 : float
y position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} +
\\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the model")
x_0 = Parameter(default=0, description="X position of the maximum of the Moffat model")
y_0 = Parameter(default=0, description="Y position of the maximum of the Moffat model")
gamma = Parameter(default=1, description="Core width of the Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach <https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model function"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
return amplitude * (1 + rr_gg) ** (-alpha)
@staticmethod
def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model derivative with respect to parameters"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
d_A = (1 + rr_gg) ** (-alpha)
d_x_0 = (2 * amplitude * alpha * d_A * (x - x_0) /
(gamma ** 2 * (1 + rr_gg)))
d_y_0 = (2 * amplitude * alpha * d_A * (y - y_0) /
(gamma ** 2 * (1 + rr_gg)))
d_alpha = -amplitude * d_A * np.log(1 + rr_gg)
d_gamma = (2 * amplitude * alpha * d_A * rr_gg /
(gamma * (1 + rr_gg)))
return [d_A, d_x_0, d_y_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'gamma': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Sersic2D(Fittable2DModel):
r"""
Two dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
x_0 : float, optional
x position of the center.
y_0 : float, optional
y position of the center.
ellip : float, optional
Ellipticity.
theta : float, optional
Rotation angle in radians, counterclockwise from
the positive x-axis.
See Also
--------
Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
I(x,y) = I(r) = I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (2n,b_n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic2D
import matplotlib.pyplot as plt
x,y = np.meshgrid(np.arange(100), np.arange(100))
mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50,
ellip=.5, theta=-1)
img = mod(x, y)
log_img = np.log10(img)
plt.figure()
plt.imshow(log_img, origin='lower', interpolation='nearest',
vmin=-1, vmax=2)
plt.xlabel('x')
plt.ylabel('y')
cbar = plt.colorbar()
cbar.set_label('Log Brightness', rotation=270, labelpad=25)
cbar.set_ticks([-1, 0, 1, 2], update_ticks=True)
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
x_0 = Parameter(default=0, description="X position of the center")
y_0 = Parameter(default=0, description="Y position of the center")
ellip = Parameter(default=0, description="Ellipticity")
theta = Parameter(default=0, description="Rotation angle in radians (counterclockwise-positive)")
_gammaincinv = None
@classmethod
def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta):
"""Two dimensional Sersic profile function."""
if cls._gammaincinv is None:
try:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
except ValueError:
raise ImportError('Sersic2D model requires scipy.')
bn = cls._gammaincinv(2. * n, 0.5)
a, b = r_eff, (1 - ellip) * r_eff
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta
x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta
z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2)
return amplitude * np.exp(-bn * (z ** (1 / n) - 1))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'r_eff': inputs_unit[self.inputs[0]],
'theta': u.rad,
'amplitude': outputs_unit[self.outputs[0]]}
class KingProjectedAnalytic1D(Fittable1DModel):
"""
Projected (surface density) analytic King Model.
Parameters
----------
amplitude : float
Amplitude or scaling factor.
r_core : float
Core radius (f(r_c) ~ 0.5 f_0)
r_tide : float
Tidal radius.
Notes
-----
This model approximates a King model with an analytic function. The derivation of this
equation can be found in King '62 (equation 14). This is just an approximation of the
full model and the parameters derived from this model should be taken with caution.
It usually works for models with a concentration (c = log10(r_t/r_c) paramter < 2.
Model formula:
.. math::
f(x) = A r_c^2 \\left(\\frac{1}{\\sqrt{(x^2 + r_c^2)}} -
\\frac{1}{\\sqrt{(r_t^2 + r_c^2)}}\\right)^2
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import KingProjectedAnalytic1D
import matplotlib.pyplot as plt
plt.figure()
rt_list = [1, 2, 5, 10, 20]
for rt in rt_list:
r = np.linspace(0.1, rt, 100)
mod = KingProjectedAnalytic1D(amplitude = 1, r_core = 1., r_tide = rt)
sig = mod(r)
plt.loglog(r, sig/sig[0], label='c ~ {:0.2f}'.format(mod.concentration))
plt.xlabel("r")
plt.ylabel(r"$\\sigma/\\sigma_0$")
plt.legend()
plt.show()
References
----------
.. [1] http://articles.adsabs.harvard.edu/pdf/1962AJ.....67..471K
"""
amplitude = Parameter(default=1, bounds=(FLOAT_EPSILON, None), description="Amplitude or scaling factor")
r_core = Parameter(default=1, bounds=(FLOAT_EPSILON, None), description="Core Radius")
r_tide = Parameter(default=2, bounds=(FLOAT_EPSILON, None), description="Tidal Radius")
@property
def concentration(self):
"""Concentration parameter of the king model"""
return np.log10(np.abs(self.r_tide/self.r_core))
@staticmethod
def evaluate(x, amplitude, r_core, r_tide):
"""
Analytic King model function.
"""
result = amplitude * r_core ** 2 * (1/np.sqrt(x ** 2 + r_core ** 2) -
1/np.sqrt(r_tide ** 2 + r_core ** 2)) ** 2
# Set invalid r values to 0
bounds = (x >= r_tide) | (x < 0)
result[bounds] = result[bounds] * 0.
return result
@staticmethod
def fit_deriv(x, amplitude, r_core, r_tide):
"""
Analytic King model function derivatives.
"""
d_amplitude = r_core ** 2 * (1/np.sqrt(x ** 2 + r_core ** 2) -
1/np.sqrt(r_tide ** 2 + r_core ** 2)) ** 2
d_r_core = 2 * amplitude * r_core ** 2 * (r_core/(r_core ** 2 + r_tide ** 2) ** (3/2) -
r_core/(r_core ** 2 + x ** 2) ** (3/2)) * \
(1./np.sqrt(r_core ** 2 + x ** 2) - 1./np.sqrt(r_core ** 2 + r_tide ** 2)) + \
2 * amplitude * r_core * (1./np.sqrt(r_core ** 2 + x ** 2) -
1./np.sqrt(r_core ** 2 + r_tide ** 2)) ** 2
d_r_tide = (2 * amplitude * r_core ** 2 * r_tide *
(1./np.sqrt(r_core ** 2 + x ** 2) -
1./np.sqrt(r_core ** 2 + r_tide ** 2)))/(r_core ** 2 + r_tide ** 2) ** (3/2)
# Set invalid r values to 0
bounds = (x >= r_tide) | (x < 0)
d_amplitude[bounds] = d_amplitude[bounds]*0
d_r_core[bounds] = d_r_core[bounds]*0
d_r_tide[bounds] = d_r_tide[bounds]*0
return [d_amplitude, d_r_core, d_r_tide]
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
The model is not defined for r > r_tide.
``(r_low, r_high)``
"""
return (0 * self.r_tide, 1 * self.r_tide)
@property
def input_units(self):
if self.r_core.unit is None:
return None
return {self.inputs[0]: self.r_core.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'r_core': inputs_unit[self.inputs[0]],
'r_tide': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Logarithmic1D(Fittable1DModel):
"""
One dimensional logarithmic model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Exponential1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.log(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
d_amplitude = np.log(x / tau)
d_tau = np.zeros(x.shape) - (amplitude / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Exponential1D(amplitude=new_amplitude, tau=new_tau)
@tau.validator
def tau(self, val):
if val == 0:
raise ValueError("0 is not an allowed value for tau")
@property
def input_units(self):
if self.tau.unit is None:
return None
return {self.inputs[0]: self.tau.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'tau': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Exponential1D(Fittable1DModel):
"""
One dimensional exponential model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Logarithmic1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.exp(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
''' Derivative with respect to parameters'''
d_amplitude = np.exp(x / tau)
d_tau = -amplitude * (x / tau**2) * np.exp(x / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Logarithmic1D(amplitude=new_amplitude, tau=new_tau)
@tau.validator
def tau(self, val):
''' tau cannot be 0'''
if val == 0:
raise ValueError("0 is not an allowed value for tau")
@property
def input_units(self):
if self.tau.unit is None:
return None
return {self.inputs[0]: self.tau.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'tau': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
@deprecated('4.0', alternative='RickerWavelet1D')
class MexicanHat1D(RickerWavelet1D):
""" Deprecated."""
@deprecated('4.0', alternative='RickerWavelet2D')
class MexicanHat2D(RickerWavelet2D):
""" Deprecated."""
| bsd-3-clause |
ngannguyen/immunoseq | lib/overlapLib.py | 1 | 51055 | #nknguyen soe ucsc edu
#Wed Jul 18 16:56:23 PDT 2012
import os, sys, re, copy, time
import random as rnd
from immunoseq.lib.immunoseqLib import *
from sonLib.bioio import system
import matplotlib.pyplot as pyplot
from matplotlib.ticker import *
from matplotlib.font_manager import FontProperties
from scipy.stats import ttest_ind
import numpy as np
###########DRAW PAIRWISE OVERLAP STATS ###################
#######HACK TO COLOR B27+ PAIRS RED #######
def drawPairOverlapData_hack(axes, group2avr, group2std, group2pairVec):
markersize = 12.0
offset = 0.03
xlabels = []
group2properLabel = {'controls':'Control - Control', 'controls-patients':'Control - Patient', 'patients':'Patient - Patient'}
#numpoints = len( group2avr.values()[0] )
#xoffset = offset*(numpoints-1)/2
miny = float('inf')
maxy = 0
lines = []
linenames = []
for index, group in enumerate( sorted(group2avr.keys()) ):
ydata = group2avr[group]
stddata = group2std[group]
upperydata = [ y + stddata[yi] for yi, y in enumerate(ydata) ]
lowerydata = [ y - stddata[yi] for yi, y in enumerate(ydata) ]
numpoints = len(ydata)
xoffset = offset*(numpoints-1)/2
i = index + 1 - xoffset
xlabels.append(group)
xdata = [i + offset*k for k in xrange(numpoints)]
#axes.errorbar(xdata, ydata, yerr=stddata, color='#377EB8', markeredgecolor = "#377EB8", markersize=markersize, fmt='o')
#Separate into red and blue:
ydataB27pos = [] #both samples in pair are B27+
stddataB27pos = []
xdataB27pos = []
ydataB27neg = [] #at least one of the sample in the pair is B27-
stddataB27neg = []
xdataB27neg = []
b27posSamples = ['as1D', 'as11D', 'as16D', 'asBD', 'as20D', 'as8D']
pairs = group2pairVec[group]
for pairIndex, pair in enumerate(pairs):
samples = pair.split('-')
if samples[0] in b27posSamples and samples[1] in b27posSamples:
ydataB27pos.append( ydata[pairIndex] )
stddataB27pos.append( stddata[pairIndex] )
#xdataB27pos.append( xdata[pairIndex] )
else:
ydataB27neg.append( ydata[pairIndex] )
stddataB27neg.append( stddata[pairIndex] )
#xdataB27neg.append( xdata[pairIndex] )
if len(ydataB27pos) > 0:
xdataB27pos = xdata[ : len(ydataB27pos)]
#l = axes.errorbar(xdataB27pos, ydataB27pos, yerr=stddataB27pos, color='#E31A1C', markeredgecolor = "#E31A1C", markersize=markersize, fmt='o')
l = axes.errorbar(xdataB27pos, ydataB27pos, yerr=stddataB27pos, color='#353535', markeredgecolor = "#353535", markersize=markersize, fmt='o')
#Average point:
b27posMean = [ np.mean(ydataB27pos) ]
b27posStd = [ np.std(ydataB27pos) ]
#avrl = axes.errorbar( [np.mean(xdataB27pos)], b27posMean, yerr=b27posStd, color='#FE8E8F', markeredgecolor='#FE8E8F', markersize=markersize + 3.0, fmt='^' )
avrl = axes.errorbar( [np.mean(xdataB27pos)], b27posMean, yerr=b27posStd, color='#BDBDBD', markeredgecolor='#BDBDBD', markersize=markersize + 3.0, fmt='^' )
if 'B27+, B27+' not in linenames:
lines.append(l[0])
linenames.append('B27+, B27+')
if 'B27+, B27+ Avr' not in linenames:
lines.append(avrl[0])
linenames.append('B27+, B27+ Avr')
if len(ydataB27neg) > 0:
xdataB27neg = xdata[ len(ydataB27pos): ]
l = axes.errorbar(xdataB27neg, ydataB27neg, yerr=stddataB27neg, color='#848484', markeredgecolor = "#848484", markersize=markersize, fmt='o')
#Average point:
b27negMean = [ np.mean(ydataB27neg) ]
b27negStd = [ np.std(ydataB27neg) ]
avrl = axes.errorbar( [np.mean(xdataB27neg)], b27negMean, yerr=b27negStd, color='#BDBDBD', markeredgecolor='#BDBDBD', markersize=markersize + 3.0, fmt='^' )
if 'B27+/-, B27-' not in linenames:
lines.append(l[0])
linenames.append('B27+/-, B27-')
if 'B27+/-, B27- Avr' not in linenames:
lines.append(avrl[0])
linenames.append('B27+/-, B27- Avr')
#update max, min:
miny = min( [min(lowerydata), miny] )
maxy = max( [max(upperydata), maxy] )
#Average point:
#groupMean = [ np.mean(ydata) ]
#groupStd = [ np.std(ydata) ]
#axes.errorbar( [np.mean(xdata)], groupMean, yerr=groupStd, color='#4DAF4A', markeredgecolor='#4DAF4A', markersize=markersize + 3.0, fmt='^' )
legend = axes.legend(lines, linenames, numpoints=1, loc="best", ncol=1)
legend.__drawFrame = False
editSpine(axes)
range = maxy - miny
#Vertical lines that separate groups:
for x in xrange(1, len(xlabels)):
axes.plot( [x + 0.5, x + 0.5], [miny - range*0.05, maxy + range*0.05], color="#3F3F3F", linestyle='-', linewidth=0.5)
axes.yaxis.grid(b=True, color="#3F3F3F", linestyle='-', linewidth=0.5)
axes.set_xlim(0.5, len(group2avr) + 0.5)
axes.set_ylim(miny -range*0.05, maxy + range*0.01)
axes.xaxis.set_ticks( xrange(1, len(xlabels) + 1) )
axes.xaxis.set_ticklabels( [ group2properLabel[xlabel] for xlabel in xlabels ] )
for label in axes.get_xticklabels():
label.set_fontsize ('x-large')
label.set_fontweight ('bold')
for label in axes.get_yticklabels():
label.set_fontsize ('large')
label.set_fontweight ('bold')
axes.set_title("Pairwise Overlap", size='xx-large', weight='bold')
axes.set_ylabel("Number of shared clones", size='x-large', weight='bold')
def drawPairOverlap_hack(outfile, group2avrVec, group2stdVec, group2pairVec):
dpi = 300
outformat = 'pdf'
fig, pdf = initImage2(10.0, 10.0, outformat, outfile, dpi)
axes = setAxes(fig)
drawPairOverlapData_hack(axes, group2avrVec, group2stdVec, group2pairVec)
writeImage2(fig, pdf, outformat, outfile, dpi)
########### END HACK ###############
def drawPairOverlapData(axes, group2avr, group2std):
markersize = 10.0
offset = 0.02
xlabels = []
#numpoints = len( group2avr.values()[0] )
#xoffset = offset*(numpoints-1)/2
miny = float('inf')
maxy = 0
for index, group in enumerate( sorted(group2avr.keys()) ):
ydata = group2avr[group]
stddata = group2std[group]
upperydata = [ y + stddata[yi] for yi, y in enumerate(ydata) ]
lowerydata = [ y - stddata[yi] for yi, y in enumerate(ydata) ]
numpoints = len(ydata)
xoffset = offset*(numpoints-1)/2
i = index + 1 - xoffset
xlabels.append(group)
xdata = [i + offset*k for k in xrange(numpoints)]
axes.errorbar(xdata, ydata, yerr=stddata, color='#377EB8', markeredgecolor = "#377EB8", markersize=markersize, fmt='o')
#axes.errorbar(xdata, ydata, yerr=stddata, color='#A6D7FE', markeredgecolor = "#A6D7FE", markersize=markersize, fmt='o')
#axes.plot(xdata, ydata, color='#377EB8', marker='o', markeredgecolor = "#377EB8", markersize=markersize, linestyle='none')
#update max, min:
miny = min( [min(lowerydata), miny] )
maxy = max( [max(upperydata), maxy] )
#Average point:
groupMean = [ np.mean(ydata) ]
groupStd = [ np.std(ydata) ]
#axes.errorbar( [np.mean(xdata)], groupMean, yerr=groupStd, color='#275880', markeredgecolor='#275880', markersize=markersize + 3.0, fmt='^' )
axes.errorbar( [np.mean(xdata)], groupMean, yerr=groupStd, color='#E31A1C', markeredgecolor='#E31A1C', markersize=markersize + 3.0, fmt='^' )
editSpine(axes)
range = maxy - miny
#Vertical lines that separate groups:
for x in xrange(1, len(xlabels)):
axes.plot( [x + 0.5, x + 0.5], [miny - range*0.05, maxy + range*0.05], color="#848484", linestyle='-', linewidth=0.005)
axes.yaxis.grid(b=True, color="#848484", linestyle='-', linewidth=0.005)
axes.set_xlim(0.5, len(group2avr) + 0.5)
axes.set_ylim(miny -range*0.05, maxy + range*0.01)
axes.xaxis.set_ticks( xrange(1, len(xlabels) + 1) )
axes.xaxis.set_ticklabels( xlabels )
for label in axes.get_xticklabels():
label.set_fontsize ('x-large')
axes.set_title("Pairwise Overlap", size='xx-large')
axes.set_ylabel("Number of clones", size='x-large')
def drawPairOverlap(outfile, group2avrVec, group2stdVec):
dpi = 300
outformat = 'pdf'
fig, pdf = initImage2(10.0, 10.0, outformat, outfile, dpi)
axes = setAxes(fig)
drawPairOverlapData(axes, group2avrVec, group2stdVec)
writeImage2(fig, pdf, outformat, outfile, dpi)
###########################################
def printPairwiseOverlap(reads1, reads2, clones1, clones2, stats1, stats2, cutoffs, outfile):
f = open(outfile, "w")
f.write("#%Cutoff\tClones1\tClones2\tOverlap1\tOverlap2\t%1overlap2\t%2overlap1\t%reads1overlap2\t%reads2overlap1\n")
for i,c in enumerate(cutoffs):
oc1 = stats1["oclones"][i]
or1 = stats1["oreads"][i]
t1 = clones1[i]
r1 = reads1[i]
oc2 = stats2["oclones"][i]
or2 = stats2["oreads"][i]
t2 = clones2[i]
r2 = reads2[i]
f.write("%.3f\t%d\t%d\t%d\t%d\t%.2f\t%.2f\t%.2f\t%.2f\n" %( c, t1, t2, oc1, oc2, getPc(oc1, t1), getPc(oc2, t2), getPc(or1, r1), getPc(or2, r2) ))
f.close()
def getPairwiseOverlap( seqs1, seqs2, aa2v2j1, aa2v2j2, cutoffs, mode, discrete ):
#Initialize stats:
stats1 = {"oclones":[], "oreads":[]}
stats2 = {"oclones":[], "oreads":[]}
for i in xrange( len(cutoffs) ):
for k in stats1:
stats1[k].append(0)
stats2[k].append(0)
#get number of clones that pass the cutoff:
reads1, clones1, total1 = getNumClones(seqs1, cutoffs, discrete)
reads2, clones2, total2 = getNumClones(seqs2, cutoffs, discrete)
if total1 == 0 or total2 == 0:
return
#get overlap:
for k in seqs1:
s1 = seqs1[k]
#s2 = seqs2[k]
s2 = findSeq(s1, aa2v2j2)
if not s2: #not found in repertoire 2
continue
for i, cutoff in enumerate(cutoffs):
if mode == 1:
if s1.freq >= cutoff and s2.freq >= cutoff:
if not discrete or i == len(cutoffs) -1 or (discrete and s1.freq < cutoffs[i+1] and s2.freq < cutoffs[i+1]):
stats1["oclones"][i] += 1
stats1["oreads"][i] += s1.count
stats2["oclones"][i] += 1
stats2["oreads"][i] += s2.count
else:
if s1.freq >= cutoff:
if not discrete or i == len(cutoffs) -1 or (discrete and s1.freq < cutoffs[i+1]):
stats1["oclones"][i] += 1
stats1["oreads"][i] += s1.count
if s2.freq >= cutoff:
if not discrete or i == len(cutoffs) -1 or (discrete and s2.freq < cutoffs[i+1]):
stats2["oclones"][i] += 1
stats2["oreads"][i] += s2.count
#printPairwiseOverlap(reads1, reads2, clones1, clones2, stats1, stats2, cutoffs, outfile)
return reads1, reads2, clones1, clones2, stats1, stats2
def getNumClones(seqs, cutoffs, discrete):
reads = [ 0 for c in cutoffs ]
clones = [ 0 for c in cutoffs ]
total = sum([ s.count for s in seqs.values() ])
for s in seqs.values():
for i,c in enumerate(cutoffs):
if s.freq >= c:
if not discrete or i == len(cutoffs) -1 or (discrete and s.freq < cutoffs[i + 1]):
clones[i] += 1
reads[i] += s.count
return reads, clones, total
################### PRINT SHARED SEQUENCES ##############################
def printSharedSeqSummary(outdir, name1, name2, seqs1, seqs2):
outfile = os.path.join(outdir, "%s-%s" %(name1, name2))
f = open(outfile, 'w')
f.write("#%s\n"%name1)
for seq in sorted(seqs1, key=lambda s:s.freq, reverse=True):
f.write("%s\t%f\n" %(seq.seq, seq.freq))
f.write("#%s\n"%name2)
for seq in sorted(seqs2, key=lambda s:s.freq, reverse=True):
f.write("%s\t%f\n" %(seq.seq, seq.freq))
f.close()
def printSharedFasta(outdir, name1, name2, seqs):
outdir = os.path.join(outdir, name1)
system("mkdir -p %s" %outdir)
outfile = os.path.join(outdir, "%s_%s.fa" %(name1, name2))
f = open(outfile, 'w')
seqs = sorted(seqs, key=lambda s: s.count, reverse=True)
for i, s in enumerate(seqs):
f.write(">%s_%s;%d|%s|%s;size=%d\n" %(name1, name2, i, ','.join(s.vs), ','.join(s.js), s.count) )#name;id|vs|js;size=###
f.write("%s\n" %s.seq)
f.close()
def printPairwiseOverlapSequences(name1, name2, seqs1, seqs2, aa2v2j1, aa2v2j2, outdir, cutoffs, mode, discrete):
cutoff2seqs1 = {}
cutoff2seqs2 = {}
#Initialize cutoff2seqs:
for c in cutoffs:
cutoff2seqs1[c] = []
cutoff2seqs2[c] = []
for k in seqs1:
s1 = seqs1[k]
if not hasSeq(s1, aa2v2j2):
continue
#s2 = seqs2[k]
s2 = findSeq(s1, aa2v2j2)
for i, cutoff in enumerate(cutoffs):
if mode == 1:
if s1.freq >= cutoff and s2.freq >= cutoff:
if not discrete or i == len(cutoffs) -1 or (discrete and s1.freq < cutoffs[i+1] and s2.freq < cutoffs[i+1]):
cutoff2seqs1[cutoff].append(s1)
cutoff2seqs2[cutoff].append(s2)
else:
if s1.freq >= cutoff:
if not discrete or i == len(cutoffs) -1 or (discrete and s1.freq < cutoffs[i+1]):
cutoff2seqs1[cutoff].append(s1)
if s2.freq >= cutoff:
if not discrete or i == len(cutoffs) -1 or (discrete and s2.freq < cutoffs[i+1]):
cutoff2seqs2[cutoff].append(s2)
#Print the sequences
for c in cutoffs:
cOutdir = os.path.join(outdir, "%.3f" %c)
system("mkdir -p %s" %cOutdir)
printSharedFasta(cOutdir, name1, name2, cutoff2seqs1[c])
printSharedFasta(cOutdir, name2, name1, cutoff2seqs2[c])
#Print frequencies of shared sequences:
freqdir = os.path.join(outdir, "freq", "%.3f" %c)
system("mkdir -p %s" %freqdir)
printSharedSeqSummary(freqdir, name1, name2, cutoff2seqs1[c], cutoff2seqs2[c])
################### END PRINT SHARED SEQUENCES ##############################
################## PAIRWISE OVERLAP PLOT (ROBINS et al.) #####################
#def fitPower(xdata, ydata):
# #Try to fit a Y=aX^b to the observed data. i.e try to estimate a and b
def sample2colorFixed():
colors = ["#E31A1C", "#FE8E8F", "#377EB8", "#A6D7FE", "#4DAF4A", "#B8FEB5", "#984EA3", "#F6BDFE",
"#FF7F00", "#FEBF80",
"#525252", "#737373", "#969696", "#BDBDBD",
#"#006837", "#31A354", "#78C679", "#C2E699",
"#2B8CBE", "#7BCCC4", "#BAE4BC",
"#225EA8", "#41B6C4", "#A1DAB4",
"#253494",
"#993404", "#D95F0E",
"#6A51A3", "#9E9AC8", "#CBC9E2"
]
s2i = {#'as10R':0, 'as11R': 1, 'as12R': 2, 'as13R': 3, 'as1R': 4, 'as1D': 5, 'as8D': 6, 'as15D': 7,
#'asBR': 8, 'asBD': 9,
'adaptBSD1': 10, 'adaptBSD1cd8n': 11, 'adaptBSD2R': 12, 'adaptBSD3': 13,
'adaptF28cd4': 14, 'adaptF57cd4': 15, 'adaptM35cd4': 16,
'adaptF28cd8': 17, 'adaptF57cd8': 18, 'adaptM35cd8': 19,
'adaptF28': 14, 'adaptF57': 15, 'adaptM35': 16,
'adaptRep': 20,
'wangAll': 21, 'wangCyt': 22,
'warrenM1': 23, 'warrenM2': 24, 'warrenF': 25,
'f28CD4memory': 0, 'f28CD4naive': 1, 'f28CD8memory': 2, 'f28CD8naive': 3,
'f57CD4memory': 4, 'f57CD4naive': 5, 'f57CD8memory': 6, 'f57CD8naive': 7,
'm35CD4memory': 8, 'm35CD4naive': 9, 'm35CD8memory': 10, 'm35CD8naive': 11,
#'as11D':12, 'as16D':13, 'as1Ddraw2':21, 'as1Ddraw2notcd8':23,
#'asBDdraw2':24, 'as20D':25,
'as1D':0, 'as8D':1, 'as11D':2, 'as15D':3, 'as16D':4,
'asBD':5, 'as20D':6,
'adaptAS':7, 'adaptCD':8, 'adaptMA':9, 'adaptBSD1n':11,
'as1Ddraw2':1, 'as1R':2, 'irep1D': 3, 'irep1R':4,
}
s2c = {}
for s, i in s2i.iteritems():
s2c[s] = colors[i]
return s2c
def sample2color(names):
s2cfixed = sample2colorFixed()
sample2color = {}
#colors = iseqlib.getColors0()
colors = getColors0()
for i, name in enumerate(names):
if name in s2cfixed:
sample2color[name] = s2cfixed[name]
else:
sample2color[name] = colors[i]
return sample2color
def drawOverlapPlotData(data, axes, labels, sam2color):
#colors = iseqlib.getColors0()
lines = []
maxy = 0
maxx = 0
for i, d in enumerate(data):
xdata = d[0]
ydata = d[1]
maxy = max([maxy, max(ydata)])
maxx = max([maxx, max(xdata)])
name = labels[i]
#l = axes.plot(xdata, ydata, color=colors[i], linestyle='-', linewidth=3)
l = axes.plot(xdata, ydata, color=sam2color[name], linestyle='-', linewidth=3)
lines.append(l)
#HACK:
#axes.set_xlim(0, 1.5*(10**9))
#axes.set_xlim(0, 3.5*(10**9))
axes.set_ylim(0, max([maxy*0.5, 50]) )
axes.set_xlim(0, maxx*0.5)
legend = pyplot.legend(lines, labels, numpoints=1, loc='best')
#axes.set_xlabel('Product of number of top unique sequences n_i1 x n_i2', size='x-large')
axes.set_title("Number of shared clones over an increasing sampling space")
axes.set_xlabel('n1 x n2', size='x-large')
axes.set_ylabel('Number of shared unique sequences', size='x-large')
def drawOverlapPlot(data, file, labels, options, sam2color):
options.out = file
fig, pdf = initImage(8.0, 10.0, options)
axes = setAxes(fig)
drawOverlapPlotData(data, axes, labels, sam2color)
writeImage(fig, pdf, options)
def addSeq2aa2v2j(seq, aa2v2j):
v2j = {}
if seq.seq in aa2v2j:
v2j = aa2v2j[seq.seq]
else:
aa2v2j[seq.seq] = v2j
for v in seq.vs:
j2seq = {}
if v in v2j:
j2seq = v2j[v]
else:
v2j[v] = j2seq
for j in seq.js:
if j not in j2seq:
j2seq[j] = seq
def cmpseq(seq1, seq2):
if seq1.seq == seq2.seq: #same amino acid sequence
for v1 in seq1.vs:
if v1 in seq2.vs:
for j1 in seq1.js:
if j1 in seq2.js: #at least one (v, j) pair of seq1 presents in seq2
return True
return False
#def pairwiseOverlapPlot(seqs1, seqs2, outfile):
def pairwiseOverlapPlot(seqs1, seqs2, isRandom):
#sorting seqs1
seqs1list = [(header, seq) for header, seq in seqs1.iteritems()]
seqs2list = [(header, seq) for header, seq in seqs2.iteritems()]
if not isRandom:
seqs1list = sorted( seqs1list, key=lambda item:item[1].count, reverse=True )
seqs2list = sorted( seqs2list, key=lambda item:item[1].count, reverse=True )
else:
rheaders1 = rnd.sample( seqs1.keys(), len(seqs1.keys()) )
rheaders2 = rnd.sample( seqs2.keys(), len(seqs2.keys()) )
seqs1list = [(header, seqs1[header]) for header in rheaders1]
seqs2list = [(header, seqs2[header]) for header in rheaders2]
top1 = {} #aa2v2j
top2 = {}
xdata = []
ydata = []
currx = 0
curry = 0
for i in xrange( max([len(seqs1list), len(seqs2list)]) ):
if i >= len(seqs1list):
(h1, s1) = ('', None)
else:
(h1, s1) = seqs1list[i]
if i >= len(seqs2list):
(h2, s2) = ('', None)
else:
(h2, s2) = seqs2list[i]
if h1 != '' and h2 != '' and cmpseq(s1, s2): #s1 and s2 are the same sequence
curry += 1
else: #s1 and s2 are not the same sequence, check to see if s1 is already in top2, and s2 is already in top1
if h1 != '' and hasSeq(s1, top2):
curry += 1
if h2 != '' and hasSeq(s2, top1):
curry += 1
#Update top1 and top2
if h1 != '':
addSeq2aa2v2j(s1, top1)
if h2 != '':
addSeq2aa2v2j(s2, top2)
i1 = min([i + 1, len(seqs1list)])
i2 = min([i + 1, len(seqs2list)])
currx = i1*i2
xdata.append( currx )
ydata.append(curry)
return xdata, ydata
################## END PAIRWISE OVERLAP PLOT (ROBINS et al.) #####################
#############################################################################
################## DRAW OVERLAP PLOT COMBINED (ALL) #########################
#############################################################################
def countOverlap(seqs1, aa2v2j2):
numSharedSeqs = 0
for h1, s1 in seqs1.iteritems():
if hasSeq(s1, aa2v2j2):
numSharedSeqs += 1
return numSharedSeqs
def drawOverlapPlotAllData(sample2caseYdata, sample2controlYdata, axesList, group2samples, sample2group, group2color, samplesPerPlot):
sampleNames = []
for group in sorted(group2samples.keys()):
sampleNames.extend( group2samples[group] )
markers = ['^', 'o']
markersize = 8.0
lines = []
offset = 0.02 #x offset between data points
miny = float('inf')
maxy = 0
for i in xrange( len(axesList) ):
xlabels = []
axes = axesList[i]
startIndex = i*samplesPerPlot
endIndex = min( [startIndex + samplesPerPlot, len(sampleNames)] )
for j in xrange(startIndex, endIndex):
name = sampleNames[j]
xlabels.append(name)
caseYdata = sample2caseYdata[name]
xdata = [ j - startIndex + offset*k for k in xrange( len(caseYdata) ) ]
color = group2color[ sample2group[name] ][0]
darkcolor = group2color[ sample2group[name] ][1]
l1 = axes.plot(xdata, caseYdata, color= darkcolor, marker=markers[0], markeredgecolor= darkcolor , markersize=markersize, linestyle='none')
controlYdata = sample2controlYdata[name]
#xdata = [ j - startIndex for y in controlYdata ]
xdata = [ j - startIndex + offset*(k + len(caseYdata)) for k in xrange( len(controlYdata) ) ]
l2 = axes.plot(xdata, controlYdata, color= color, marker=markers[1], markeredgecolor=color, markersize=markersize, linestyle='none')
if j == 0:
lines = [l1, l2]
miny = min( [miny, min(caseYdata), min(controlYdata)] )
maxy = max( [maxy, max(caseYdata), max(controlYdata)] )
editSpine(axes)
axes.xaxis.set_ticklabels( xlabels )
numSamples = len(xlabels)
xoffset = offset*(numSamples-2)/2
range = maxy - miny
#Draw vertical grid:
for x in xrange(numSamples - 1):
axes.plot( [x + xoffset + 0.5, x + xoffset + 0.5], [miny - range*0.05, maxy + range*0.05], color="#848484", linestyle='-', linewidth=0.005)
axes.set_xlim(xoffset - 0.5, min([samplesPerPlot, len(sampleNames)]) + xoffset - 0.5 )
axes.xaxis.set_ticks( [ x + xoffset for x in xrange(numSamples) ] )
#HACK:
#yticks = [ float(y)/(10**7) for y in xrange(2, 11, 2) ]
#yticklabels = [ str(y) for y in xrange(2, 11, 2) ]
#yticks = [ float(y)/(10**8) for y in xrange(2, 21, 2) ]
#yticklabels = [ "%.2f" % (y*(10**7)) for y in yticks ]
#axes.yaxis.set_ticks(yticks)
#axes.yaxis.set_ticklabels( yticklabels )
#for l in axes.get_yticklabels():
# l.set_fontsize('medium')
axes.set_ylim( miny - range*0.01 , maxy + range*0.01 )
#axes.set_ylim(-0.00000001 , 0.0000003)
#axes.set_ylim(-0.0000001 , 0.000005)
for label in axes.get_xticklabels():
label.set_fontsize( 'medium' )
label.set_rotation( 45 )
#axes.xaxis.grid(b=True, color="#CCCCCC", linestyle='-', linewidth=0.005)
axes.yaxis.grid(b=True, color="#848484", linestyle='-', linewidth=0.005)
if i == 0:
axes.set_title("Shared sequences", size='xx-large')
#HACK
#legend = axes.legend(lines, ["RNA", "DNA"], numpoints=1, loc="best", ncol=1)
legend = axes.legend(lines, ["AS", "Healthy"], numpoints=1, loc="best", ncol=1)
#if i == len(axesList) /2:
# axes.set_ylabel("Ratio of shared sequences to sampling space (x 10^-7)", size='large')
if i == len(axesList) - 1:
axes.set_xlabel("Samples", size='large')
legend.__drawFrame = False
def drawOverlapPlotAll(samples, sample2aa2v2j, cases, controls, options, group2samples, sample2group):
name2sample = {}
for s in samples:
name2sample[s.name] = s
#Get data
sample2caseYdata = {}
sample2controlYdata = {}
for s1 in samples:#each sample
sample2caseYdata[s1.name] = []
#aa2v2j1 = sample2aa2v2j[s1.name]
for name in cases:
if s1.name == name:
continue
s2 = name2sample[name]
aa2v2j2 = sample2aa2v2j[s2.name]
y = countOverlap(s1.seqs, aa2v2j2)*1.0/( len(s1.seqs)*len(s2.seqs) )
sample2caseYdata[s1.name].append( y )
sample2controlYdata[s1.name] = []
for name in controls:
if s1.name == name:
continue
s2 = name2sample[name]
aa2v2j2 = sample2aa2v2j[s2.name]
y = countOverlap(s1.seqs, aa2v2j2)*1.0/( len(s1.seqs)*len(s2.seqs) )
sample2controlYdata[s1.name].append( y )
#Draw plot:
options.out = os.path.join(options.outdir, "overlapAll")
fig, pdf = initImage(10.0, 10.0, options)
axesList = setAxes2(fig, len( sample2group.keys() ), options.samplesPerPlot)
colors = getColors6()
colorsDark = getColors6dark()
group2color = {}
for i, group in enumerate( sorted( group2samples.keys() ) ):
group2color[group] = (colors[i + 1], colorsDark[i + 1])
drawOverlapPlotAllData(sample2caseYdata, sample2controlYdata, axesList, group2samples, sample2group, group2color, options.samplesPerPlot)
writeImage(fig, pdf, options)
################## END DRAW OVERLAP PLOT COMBINED (ALL) #########################
def countOverlap_3way(seqs1, aa2v2j2, aa2v2j3):
numSharedSeqs = 0
for h1, s1 in seqs1.iteritems():
if hasSeq(s1, aa2v2j2) and hasSeq(s1, aa2v2j3):
numSharedSeqs += 1
return numSharedSeqs
def drawOverlapPlotAllData_3way(sample2casecaseYdata, sample2controlcontrolYdata, sample2casecontrolYdata, axesList, group2samples, sample2group, group2color, samplesPerPlot):
sampleNames = []
for group in sorted(group2samples.keys()):
sampleNames.extend( group2samples[group] )
markers = ['^', 'o', 'd']
markersize = 8.0
lines = []
offset = 0.02 #x offset between data points
miny = float('inf')
maxy = 0
for i in xrange( len(axesList) ):
xlabels = []
axes = axesList[i]
startIndex = i*samplesPerPlot
endIndex = min( [startIndex + samplesPerPlot, len(sampleNames)] )
numx = 0
for j in xrange(startIndex, endIndex):
name = sampleNames[j]
xlabels.append(name)
lightcolor = group2color[ sample2group[name] ][0]
color = group2color[ sample2group[name] ][1]
darkcolor = group2color[ sample2group[name] ][2]
#casecase
casecaseYdata = sample2casecaseYdata[name]
xdata = [ j - startIndex + offset*k for k in xrange( len(casecaseYdata) ) ]
l1 = axes.plot(xdata, casecaseYdata, color= darkcolor, marker=markers[0], markeredgecolor= darkcolor , markersize=markersize, linestyle='none')
#casecontrol
casecontrolYdata = sample2casecontrolYdata[name]
xdata = [ j - startIndex + offset*(k + len(casecaseYdata)) for k in xrange( len(casecontrolYdata) ) ]
l2 = axes.plot(xdata, casecontrolYdata, color = color, marker=markers[1], markeredgecolor=color, markersize=markersize, linestyle='none')
controlcontrolYdata = sample2controlcontrolYdata[name]
#xdata = [ j - startIndex for y in controlYdata ]
xdata = [ j - startIndex + offset*( k + len(casecontrolYdata) + len(casecaseYdata) ) for k in xrange( len(controlcontrolYdata) ) ]
l3 = axes.plot(xdata, controlcontrolYdata, color= lightcolor, marker=markers[2], markeredgecolor= lightcolor, markersize=markersize, linestyle='none')
if j == 0:
lines = [l1, l2, l3]
miny = min( [miny, min(casecaseYdata), min(casecontrolYdata), min(controlcontrolYdata)] )
maxy = max( [maxy, max(casecaseYdata), max(casecontrolYdata), max(controlcontrolYdata)] )
if numx == 0:
numx = len(casecaseYdata) + len(casecontrolYdata) + len(controlcontrolYdata)
editSpine(axes)
axes.xaxis.set_ticklabels( xlabels )
numSamples = len(xlabels)
xoffset = offset*(numx-1)/2
range = maxy - miny
#Draw vertical grid:
for x in xrange(numSamples - 1):
axes.plot( [x + xoffset + 0.5, x + xoffset + 0.5], [miny - range*0.05, maxy + range*0.05], color="#848484", linestyle='-', linewidth=0.005)
axes.set_xlim(xoffset - 0.5, min([samplesPerPlot, len(sampleNames)]) + xoffset - 0.5 )
axes.xaxis.set_ticks( [ x + xoffset for x in xrange(numSamples) ] )
#HACK:
#yticks = [ float(y)/(10**7) for y in xrange(2, 11, 2) ]
#yticklabels = [ str(y) for y in xrange(2, 11, 2) ]
#yticks = [ float(y)/(10**8) for y in xrange(2, 21, 2) ]
#yticklabels = [ "%.2f" % (y*(10**7)) for y in yticks ]
#axes.yaxis.set_ticks(yticks)
#axes.yaxis.set_ticklabels( yticklabels )
#for l in axes.get_yticklabels():
# l.set_fontsize('medium')
#axes.set_ylim( miny - range*0.01 , maxy + range*0.01 )
for label in axes.get_xticklabels():
label.set_fontsize( 'medium' )
label.set_rotation( 45 )
#axes.xaxis.grid(b=True, color="#CCCCCC", linestyle='-', linewidth=0.005)
axes.yaxis.grid(b=True, color="#848484", linestyle='-', linewidth=0.005)
if i == 0:
axes.set_title("Shared sequences", size='xx-large')
#HACK
#legend = axes.legend(lines, ["RNA", "DNA"], numpoints=1, loc="best", ncol=1)
legend = axes.legend(lines, ["AS-AS", "AS-Healthy", "Healthy-Healthy"], numpoints=1, loc="best", ncol=1)
#if i == len(axesList) /2:
# axes.set_ylabel("Ratio of shared sequences to sampling space (x 10^-7)", size='large')
if i == len(axesList) - 1:
axes.set_xlabel("Samples", size='large')
legend.__drawFrame = False
def drawOverlapPlotAll_3way(samples, sample2aa2v2j, cases, controls, options, group2samples, sample2group):
name2sample = {}
for s in samples:
name2sample[s.name] = s
#for each sample, there are going to be 3 types of data: case-case, case-control, control-control
casecasePairs = []
casecontrolPairs = []
controlcontrolPairs = []
for i in xrange( len(cases) -1 ):
for j in xrange(i + 1, len(cases)):
casecasePairs.append( [cases[i], cases[j]] )
for i in xrange( len(controls) -1 ):
for j in xrange(i + 1, len(controls)):
controlcontrolPairs.append( [controls[i], controls[j]] )
for case in cases:
for control in controls:
casecontrolPairs.append( [case, control] )
#Get data
normFactor = 10**10
sample2casecaseYdata = {}
sample2casecontrolYdata = {}
sample2controlcontrolYdata = {}
for s in samples:#each sample
sample2casecaseYdata[s.name] = []
for [name1, name2] in casecasePairs:
if s.name == name1 or s.name == name2:
continue
s1 = name2sample[name1]
s2 = name2sample[name2]
aa2v2j1 = sample2aa2v2j[s1.name]
aa2v2j2 = sample2aa2v2j[s2.name]
y = countOverlap_3way(s.seqs, aa2v2j1, aa2v2j2)
y = y*1.0*normFactor/( len(s.seqs)*len(s1.seqs)*len(s2.seqs) )
sample2casecaseYdata[s.name].append( y )
sample2controlcontrolYdata[s.name] = []
for [name1, name2] in controlcontrolPairs:
if s.name == name1 or s.name == name2:
continue
s1 = name2sample[name1]
s2 = name2sample[name2]
aa2v2j1 = sample2aa2v2j[s1.name]
aa2v2j2 = sample2aa2v2j[s2.name]
y = countOverlap_3way(s.seqs, aa2v2j1, aa2v2j2)
y = y*1.0*normFactor/( len(s.seqs)*len(s1.seqs)*len(s2.seqs) )
sample2controlcontrolYdata[s.name].append( y )
sample2casecontrolYdata[s.name] = []
for [name1, name2] in casecontrolPairs:
if s.name == name1 or s.name == name2:
continue
s1 = name2sample[name1]
s2 = name2sample[name2]
aa2v2j1 = sample2aa2v2j[s1.name]
aa2v2j2 = sample2aa2v2j[s2.name]
y = countOverlap_3way(s.seqs, aa2v2j1, aa2v2j2)
y = y*1.0*normFactor/( len(s.seqs)*len(s1.seqs)*len(s2.seqs) )
sample2casecontrolYdata[s.name].append( y )
#Draw plot:
options.out = os.path.join(options.outdir, "overlapAll_3way")
fig, pdf = initImage(10.0, 10.0, options)
axesList = setAxes2(fig, len( sample2group.keys() ), options.samplesPerPlot)
colors = getColors6()
colorsDark = getColors6dark()
colorsLight = getColors6light()
group2color = {}
for i, group in enumerate( sorted( group2samples.keys() ) ):
group2color[group] = (colorsLight[i+1], colors[i + 1], colorsDark[i + 1])
drawOverlapPlotAllData(sample2casecaseYdata, sample2controlcontrolYdata, axesList, group2samples, sample2group, group2color, options.samplesPerPlot)
#drawOverlapPlotAllData_3way(sample2casecaseYdata, sample2controlcontrolYdata, sample2casecontrolYdata, axesList, group2samples, sample2group, group2color, options.samplesPerPlot)
writeImage(fig, pdf, options)
################## END DRAW OVERLAP PLOT COMBINED (ALL) 3 WAYS #########################
################# GET FASTA FILES OF UNIQUE SEQUENCES, SHARED WITH SAME GROUP SEQUENCES, SHARED WITH OTHER GROUPS SEQUENCES, SHARED SEQUENCES ###################
def convertHeaderNt2aa(header):
items = header.split('|')
aa = nt2aa(items[0])
return '|'.join( [ aa, items[1], items[2] ] )
def getSeq2sample2count(samples, nt2aa): #key = seq|vs|js, val = {sample: (count, freq)}
seq2sample2count = {}
for sample in samples:
for header, seq in sample.seqs.iteritems():
if nt2aa:
header = convertHeaderNt2aa(header)
if header not in seq2sample2count:
seq2sample2count[header] = { sample.name: [ (seq.count, seq.freq) ] }
else:
if sample.name not in seq2sample2count[header]:
seq2sample2count[header][sample.name] = [ (seq.count, seq.freq) ]
else:
seq2sample2count[header][sample.name].append( (seq.count, seq.freq) )
return seq2sample2count
def uniqAndSharedSequences(outdir, sample, seq2sample2count, group2samples, group, nt2aa):
uf = open( os.path.join(outdir, 'uniq.fa'), 'w' )
sgf = open( os.path.join(outdir, 'sameGroupShared.fa'), 'w' )
dgf = open( os.path.join(outdir, 'diffGroupShared.fa'), 'w' )
sf = open( os.path.join(outdir, 'shared.fa'), 'w' )
total = sum([ seq.count for seq in sample.seqs.values()])
ucount = 0
sgcount = 0
dgcount = 0
scount = 0
for header, seq in sample.seqs.iteritems():
if nt2aa:
header = convertHeaderNt2aa(header)
if header not in seq2sample2count:
raise ValueError("Sequence %s of sample %s is not in seq2sample2count\n" %(header, sample.name))
sample2count = seq2sample2count[header]
fastaheader = "%s;freq=%f|%s|%s;size=%d" %(sample.name, seq.freq, ','.join(seq.vs), ','.join(seq.js), seq.count)
samegroup = False
diffgroup = False
#Search to see if current sequence present in other samples of the same group and of different group(s)
for g, samples in group2samples.iteritems():
if g == group: #same group
for s in samples:
if s != sample.name and s in sample2count:
samegroup = True
break
else: #diff group
for s in samples:
if s != sample.name and s in sample2count:
diffgroup = True
break
if samegroup or diffgroup:
sf.write(">%s\n" % fastaheader)
sf.write("%s\n" % seq.seq)
scount += seq.count
if samegroup and not diffgroup:
sgf.write(">%s\n" % fastaheader)
sgf.write("%s\n" % seq.seq)
sgcount += seq.count
if diffgroup:
dgf.write(">%s\n" % fastaheader)
dgf.write("%s\n" % seq.seq)
dgcount += seq.count
else: #unique sequence
uf.write(">%s\n" % fastaheader)
uf.write("%s\n" % seq.seq)
ucount += seq.count
uf.write(">%s;others||;size=%d\n%s\n" %(sample.name, total - ucount, "OTHERS"))
sgf.write(">%s;others||;size=%d\n%s\n" %(sample.name, total - sgcount, "OTHERS"))
dgf.write(">%s;others||;size=%d\n%s\n" %(sample.name, total - dgcount, "OTHERS"))
sf.write(">%s;others||;size=%d\n%s\n" %(sample.name, total - scount, "OTHERS"))
uf.close()
sgf.close()
dgf.close()
sf.close()
################# END OF GET FASTA FILES OF UNIQUE SEQUENCES, SHARED WITH SAME GROUP SEQUENCES, SHARED WITH OTHER GROUPS SEQUENCES, SHARED SEQUENCES ###################
################## TTEST ######################
#ttest to test the hypothesis: cases have more shared sequences than a case and a control or two controls.
#Calculate the below shared sequences stats for all posible pairs of (case-case) and compare with stats of all pairs of (case-control) and (control-control)
#For each pair, the stats is calculated as follow:
#Steps involved: 1/ randomly select N (ttestSamplingSize) uniq sequences from each sample
# 2/ calculate the number of shared sequences between the two samples
# 3/ Repeat (1) and (2) M times (M = ttestSamplingNum)
# 4/ Take the average of all the samplings
def getAvrSharedSeqs(sample1, sample2, samplingSize, numSamplings):
stime = time.clock()
data = []
maxTotal = max( [ sum([seq.count for seq in sample.seqs.values()]) for sample in [sample1, sample2] ] )
for i in xrange(numSamplings): #each sampling:
samplingtime = time.clock()
s1 = sample1
s2 = sample2
sys.stderr.write("%s, %s\n" %(s1.name, s2.name))
if samplingSize > 0:
s1 = samplingSample_uniq(sample1, samplingSize, maxTotal)
s2 = samplingSample_uniq(sample2, samplingSize, maxTotal)
#seqs1 = sample1.seqs.values()
#seqs2 = sample2.seqs.values()
seqs1 = s1.seqs
seqs2 = s2.seqs
sys.stderr.write("Number of uniq sequences in sample %s : %d, %s: %d\n" %(s1.name, len(seqs1), s2.name, len(seqs2)) )
shared = 0
for seq1 in seqs1:
if seq1 in seqs2:
shared += 1
data.append(shared)
sys.stderr.write("One sampling in %f s.\n" %(time.clock() - samplingtime))
sys.stderr.write("Calculate shared sequences for %d samplings took %f seconds.\n" %(numSamplings, time.clock() - stime))
return np.mean(data)
def ttest( outfile, samples, cases, controls, samplingSize, numSamplings, mode ):
#if mode == 1: group1 includes: case-case; group2 includes control-control, control-case
#elif mode == 2: group2 includes: case-case and control-control, group2 includes control-case
#
if len(cases) < 2 or len(controls) == 0 :
raise ValueError("Cannot perform ttest with < 2 cases or 0 control.\n")
casePairs = [] #stats of case-case pairs
controlPairs = [] #stats of case-control and control-control pairs
caseNames = []
controlNames = []
#Convert samples two a hash for easy look up:
name2sample = {} #key = name, val = Sample
for s in samples:
name2sample[s.name] = s
#Case-case pairs:
for i in xrange( len(cases) -1 ):
case1 = cases[i]
sample1 = name2sample[case1]
for j in xrange(i+1, len(cases)):
case2 = cases[j]
#HACK
#if sample2patient[case1] == sample2patient[case2]:
# continue
sample2 = name2sample[case2]
sys.stderr.write("Sample1: %s, sample2: %s\n" %(sample1.name, sample2.name))
casePairs.append( getAvrSharedSeqs(sample1, sample2, samplingSize, numSamplings) )
caseNames.append("%s-%s" %(case1, case2))
#Control-control pairs:
for i in xrange( len(controls) -1 ):
control1 = controls[i]
sample1 = name2sample[control1]
for j in xrange(i+1, len(controls)):
control2 = controls[j]
#HACK
#if sample2patient[control1] == sample2patient[control2]:
# continue
sample2 = name2sample[control2]
if mode == 1:
controlPairs.append( getAvrSharedSeqs(sample1, sample2, samplingSize, numSamplings) )
controlNames.append("%s-%s" %(control1, control2))
elif mode == 2:
casePairs.append( getAvrSharedSeqs(sample1, sample2, samplingSize, numSamplings) )
caseNames.append("%s-%s" %(control1, control2))
#Control-case pairs:
for control in controls:
sample1 = name2sample[control]
for case in cases:
sample2 = name2sample[case]
controlPairs.append( getAvrSharedSeqs(sample1, sample2, samplingSize, numSamplings) )
controlNames.append("%s-%s" %(control, case))
#ttest:
stime = time.clock()
tval, pval = ttest_ind(casePairs, controlPairs)
sys.stderr.write("ttest after gotten the data in %f s\n" %(time.clock() - stime))
f = open(outfile, 'w')
f.write("#Group 1: %s\n" %(','.join(caseNames) ) )
f.write("#Group 2: %s\n" %(','.join(controlNames) ) )
f.write("#Pval\tTval\tMean1\tStd1\tMean2\tStd2\n")
f.write("%f\t%f\t%f\t%f\t%f\t%f\n" %(pval, tval, np.mean(casePairs), np.std(casePairs), np.mean(controlPairs), np.std(controlPairs) ))
f.close()
################## END TTEST ##################
################ OLD MAIN, TO BE DELETED #############
def main():
starttime = time.clock()
parser = initOptions()
addOptions(parser)
initPlotOptions(parser)
options, args = parser.parse_args()
checkOptions(parser, args, options)
checkPlotOptions(options, parser)
stime = time.clock() - starttime
if options.verbose:
sys.stderr.write("Reading and checking options in %d seconds.\n" %stime)
sys.stderr.write("Reading in input files...\n")
stime = time.clock()
samples = readfiles(options.indir, options.minCount, 1)
sample2aa2v2j = getsample2aa2v2j(samples)
#if options.nt2aa:
# transNt2aa(samples)
if options.verbose:
sys.stderr.write("Read input files in %f seconds.\n" %(time.clock() - stime))
if options.sampling > 0:
print options.sampling
stime = time.clock()
samples = sampling(samples, options.sampling, options.samplingUniq)
if options.verbose:
sys.stderr.write("Done sampling in %f seconds.\n" %(time.clock() - stime) )
if options.overlapPlot:
overlapPlotDir = os.path.join(options.outdir, "overlapPlots")
system("mkdir -p %s" %(overlapPlotDir))
if not options.noPairwiseOverlap:
statdir = os.path.join(options.outdir, "overlapStats")
system("mkdir -p %s" %statdir)
###########################################
#Pairwise overlap statistics and sequences#
###########################################
if options.verbose:
sys.stderr.write("Pairwise overlap statistics and fasta, if specified:\n")
pairwiseTime = time.clock()
for i in xrange( len(samples) - 1 ):
sample1 = samples[i]
aa2v2j1 = sample2aa2v2j[ sample1.name ]
for j in xrange(i+1, len(samples)):
sample2 = samples[j]
aa2v2j2 = sample2aa2v2j[ sample2.name ]
pair = "%s_%s" %(sample1.name, sample2.name)
if not options.noPairwiseOverlap:
if options.verbose:
sys.stderr.write("Getting pairwise overlap statistics for samples %s and %s.\n" %(sample1.name, sample2.name))
stime = time.clock()
outfile = os.path.join(statdir, "%s.txt" %pair)
reads1, reads2, clones1, clones2, stats1, stats2 = getPairwiseOverlap(sample1.seqs, sample2.seqs, aa2v2j1, aa2v2j2, options.cutoffs, options.mode, options.discrete)
printPairwiseOverlap(reads1, reads2, clones1, clones2, stats1, stats2, options.cutoffs, outfile)
if options.verbose:
sys.stderr.write("Done in %f s.\n" %(time.clock() - stime))
if options.fasta:
if options.verbose:
sys.stderr.write("Print fasta of the pairwise overlap sequences between samples %s and %s\n" %(sample1.name, sample2.name))
stime = time.clock()
fastaDir = os.path.join(options.outdir, "pairwiseSharedSeqs")
system("mkdir -p %s" %fastaDir)
printPairwiseOverlapSequences(sample1.name, sample2.name, sample1.seqs, sample2.seqs, aa2v2j1, aa2v2j2, fastaDir, options.cutoffs, options.mode, options.discrete)
if options.verbose:
sys.stderr.write("Done in %f s.\n" %(time.clock() - stime))
if options.verbose:
sys.stderr.write("Done Pairwise Overlap Statistics and Fasta in %f s.\n" %(time.clock() - pairwiseTime))
############################
#HARLAN ROBINS OVERLAP PLOT
#(Y axis = number of shared sequences, X axis = n1j x n2j where nij (i=1,2) is the number of sequences of sample i in the top jth clones.
############################
if options.verbose:
sys.stderr.write("Starting overlapPlot if specified.\n")
plottime = time.clock()
if options.overlapPlot:
sampleNames = sorted( [sample.name for sample in samples] )
sam2color = sample2color(sampleNames)
for i in xrange( len(samples) ):
sample1 = samples[i]
aa2v2j1 = sample2aa2v2j[sample1.name]
data = []
labels = []
for j in xrange(len(samples) ):
if i == j:
continue
sample2 = samples[j]
aa2v2j2 = sample2aa2v2j[sample2.name]
#Robins overlap plot:
stime = time.clock()
xdata, ydata = pairwiseOverlapPlot(sample1.seqs, sample2.seqs, options.overlapPlotRandom)
data.append( [xdata, ydata] )
labels.append(sample2.name)
if options.verbose:
sys.stderr.write("Calculated data for overlap plot of pair %s - %s in %f seconds\n" %(sample1.name, sample2.name, time.clock() - stime))
#ofile = os.path.join(overlapPlotDir, '%s-%s' %(sample1.name, sample2.name) )
#drawOverlapPlot([ [xdata, ydata] ], ofile, [ "%s-%s" %(sample1.name, sample2.name)], options)
ofile = os.path.join(overlapPlotDir, '%s' %(sample1.name) )
drawOverlapPlot(data, ofile, labels, options, sam2color)
if options.verbose:
sys.stderr.write("Done OverlapPlot in %f s.\n" %(time.clock() - plottime))
############################
##DONE HARLAN ROBINS PLOTS##
############################
#################################
#====== IF GROUP2SAMPLEs =======#
#################################
uniqtime = time.clock()
if options.verbose:
sys.stderr.write("Getting uniq and shared sequences if requested...\n")
#if options.group2samples:
if options.group2samples:
group2samples, sample2group = readGroup2samples(options.group2samples)
if options.uniqAndSharedSeqs:
seq2sample2count = getSeq2sample2count(samples, options.nt2aa) #key = seq, val = {sample: (count, freq)}
for sample in samples:
outdir = os.path.join(options.outdir, "uniqAndSharedSeqs", sample.name)
system("mkdir -p %s" %outdir)
stime = time.clock()
uniqAndSharedSequences(outdir, sample, seq2sample2count, group2samples, sample2group[sample.name], options.nt2aa)
if options.verbose:
sys.stderr.write("uniq and shared sequences for sample %s in %f seconds.\n" %(sample.name, time.clock() - stime))
if options.verbose:
sys.stderr.write("Done uniq and shared sequences in %f seconds.\n" %(time.clock() - uniqtime))
###############################
#=== TTEST ===
###############################
if options.ttest or options.overlapPlotAll:
sep = ','
cases = readList(options.cases, sep)
controls = readList(options.controls, sep)
if options.ttest:
ttesttime = time.clock()
if options.verbose:
sys.stderr.write("Ttest...\n")
ttestOutfile = os.path.join(options.outdir, "ttest.txt")
ttest( ttestOutfile, samples, cases, controls, options.ttestSamplingSize, options.ttestSamplingNum, options.ttestMode )
if options.verbose:
sys.stderr.write("Done in %f s\n" %(time.clock() - ttesttime))
###############################
#==== COMBINE OVERLAP PLOT ===#
###############################
if options.overlapPlotAll:
drawOverlapPlotAll(samples, sample2aa2v2j, cases, controls, options, group2samples, sample2group)
#drawOverlapPlotAll_3way(samples, sample2aa2v2j, cases, controls, options, group2samples, sample2group)
| mit |
jorik041/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
yanlend/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
marcocaccin/scikit-learn | sklearn/datasets/__init__.py | 15 | 3741 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_breast_cancer
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
Vimos/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 108 | 2026 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
clemkoa/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 67 | 1990 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
s = 25
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s,
edgecolor="black", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s,
edgecolor="black", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="red", s=s,
edgecolor="black", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s,
edgecolor="black", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Multi-output Decision Tree Regression")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
Parallel-in-Time/pySDC | pySDC/playgrounds/deprecated/pmesh/visualize_Temperature.py | 1 | 3225 | import json
import glob
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import imageio
def plot_data(name=''):
"""
Visualization using numpy arrays (written via MPI I/O) and json description
Produces one png file per time-step, combine as movie via e.g.
> ffmpeg -i data/name_%08d.png name.mp4
Args:
name (str): name of the simulation (expects data to be in data path)
"""
json_files = sorted(glob.glob(f'./data/{name}_*.json'))
data_files = sorted(glob.glob(f'./data/{name}_*.dat'))
for json_file, data_file in zip(json_files, data_files):
with open(json_file, 'r') as fp:
obj = json.load(fp)
index = json_file.split('_')[1].split('.')[0]
print(f'Working on step {index}...')
array = np.fromfile(data_file, dtype=obj['datatype'])
array = array.reshape(obj['shape'], order='C')
plt.figure()
plt.imshow(array[..., 0], vmin=0, vmax=1)
plt.colorbar()
plt.title(f"Field - Time: {obj['time']:6.4f}")
plt.savefig(f'data/{name}_field_{index}.png', bbox_inches='tight')
plt.close()
plt.figure()
plt.imshow(array[..., 1], vmin=0, vmax=1)
plt.colorbar()
plt.title(f"Temperature - Time: {obj['time']:6.4f}")
plt.savefig(f'data/{name}_temperature_{index}.png', bbox_inches='tight')
plt.close()
def make_gif(name=''):
"""
Visualization using numpy arrays (written via MPI I/O) and json description
Produces one png file per time-step, combine as movie via e.g.
> ffmpeg -i data/name_%08d.png name.mp4
Args:
name (str): name of the simulation (expects data to be in data path)
"""
json_files = sorted(glob.glob(f'./data/{name}_*.json'))
data_files = sorted(glob.glob(f'./data/{name}_*.dat'))
img_list = []
c = 0
for json_file, data_file in zip(json_files, data_files):
with open(json_file, 'r') as fp:
obj = json.load(fp)
index = json_file.split('_')[1].split('.')[0]
print(f'Working on step {index}...')
array = np.fromfile(data_file, dtype=obj['datatype'])
array = array.reshape(obj['shape'], order='C')
fig, ax = plt.subplots(1,2)
ax[0].imshow(array[..., 1], vmin=0, vmax=1)
ax[1].imshow(array[..., 0], vmin=0, vmax=1)
# ax.set_colorbar()
ax[0].set_title(f"Temperature - Time: {obj['time']:6.4f}")
ax[1].set_title(f"Field - Time: {obj['time']:6.4f}")
fig.tight_layout()
fig.canvas.draw() # draw the canvas, cache the renderer
image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
img_list.append(image.reshape(fig.canvas.get_width_height()[::-1] + (3,)))
plt.close()
# c +=1
# if c == 3:
# break
# imageio.mimsave('./test.gif', img_list, fps=8, subrectangles=True)
imageio.mimsave('./test.mp4', img_list, fps=8)
if __name__ == "__main__":
# name = 'AC-test'
name = 'AC-temperature-test'
# name = 'AC-2D-application'
# name = 'AC-2D-application-forced'
# plot_data(name=name)
make_gif(name=name)
| bsd-2-clause |
alekz112/statsmodels | statsmodels/examples/ex_regressionplots.py | 34 | 4457 | # -*- coding: utf-8 -*-
"""Examples for Regression Plots
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
import statsmodels.graphics.regressionplots as smrp
#example from tut.ols with changes
#fix a seed for these examples
np.random.seed(9876789)
# OLS non-linear curve but linear in parameters
# ---------------------------------------------
nsample = 100
sig = 0.5
x1 = np.linspace(0, 20, nsample)
x2 = 5 + 3* np.random.randn(nsample)
X = np.c_[x1, x2, np.sin(0.5*x1), (x2-5)**2, np.ones(nsample)]
beta = [0.5, 0.5, 1, -0.04, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
#estimate only linear function, misspecified because of non-linear terms
exog0 = sm.add_constant(np.c_[x1, x2], prepend=False)
# plt.figure()
# plt.plot(x1, y, 'o', x1, y_true, 'b-')
res = sm.OLS(y, exog0).fit()
#print res.params
#print res.bse
plot_old = 0 #True
if plot_old:
#current bug predict requires call to model.results
#print res.model.predict
prstd, iv_l, iv_u = wls_prediction_std(res)
plt.plot(x1, res.fittedvalues, 'r-o')
plt.plot(x1, iv_u, 'r--')
plt.plot(x1, iv_l, 'r--')
plt.title('blue: true, red: OLS')
plt.figure()
plt.plot(res.resid, 'o')
plt.title('Residuals')
fig2 = plt.figure()
ax = fig2.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
plt.plot(x1, res.resid, 'o')
ax.set_title('residuals versus exog')# + namestr)
ax = fig2.add_subplot(2,1,2)
plt.plot(x2, res.resid, 'o')
fig3 = plt.figure()
ax = fig3.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
plt.plot(x1, res.fittedvalues, 'o')
ax.set_title('Fitted values versus exog')# + namestr)
ax = fig3.add_subplot(2,1,2)
plt.plot(x2, res.fittedvalues, 'o')
fig4 = plt.figure()
ax = fig4.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
plt.plot(x1, res.fittedvalues + res.resid, 'o')
ax.set_title('Fitted values plus residuals versus exog')# + namestr)
ax = fig4.add_subplot(2,1,2)
plt.plot(x2, res.fittedvalues + res.resid, 'o')
# see http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/partregr.htm
fig5 = plt.figure()
ax = fig5.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
res1a = sm.OLS(y, exog0[:,[0,2]]).fit()
res1b = sm.OLS(x1, exog0[:,[0,2]]).fit()
plt.plot(res1b.resid, res1a.resid, 'o')
res1c = sm.OLS(res1a.resid, res1b.resid).fit()
plt.plot(res1b.resid, res1c.fittedvalues, '-')
ax.set_title('Partial Regression plot')# + namestr)
ax = fig5.add_subplot(2,1,2)
#plt.plot(x2, res.fittedvalues + res.resid, 'o')
res2a = sm.OLS(y, exog0[:,[0,1]]).fit()
res2b = sm.OLS(x2, exog0[:,[0,1]]).fit()
plt.plot(res2b.resid, res2a.resid, 'o')
res2c = sm.OLS(res2a.resid, res2b.resid).fit()
plt.plot(res2b.resid, res2c.fittedvalues, '-')
# see http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/ccpr.htm
fig6 = plt.figure()
ax = fig6.add_subplot(2,1,1)
#namestr = ' for %s' % self.name if self.name else ''
x1beta = x1*res.params[1]
x2beta = x2*res.params[2]
plt.plot(x1, x1beta + res.resid, 'o')
plt.plot(x1, x1beta, '-')
ax.set_title('X_i beta_i plus residuals versus exog (CCPR)')# + namestr)
ax = fig6.add_subplot(2,1,2)
plt.plot(x2, x2beta + res.resid, 'o')
plt.plot(x2, x2beta, '-')
#print res.summary()
doplots = 1
if doplots:
fig1 = smrp.plot_fit(res, 0, y_true=None)
smrp.plot_fit(res, 1, y_true=None)
smrp.plot_partregress_grid(res, exog_idx=[0,1])
smrp.plot_regress_exog(res, exog_idx=0)
smrp.plot_ccpr(res, exog_idx=0)
smrp.plot_ccpr_grid(res, exog_idx=[0,1])
from statsmodels.graphics.tests.test_regressionplots import TestPlot
tp = TestPlot()
tp.test_plot_fit()
fig1 = smrp.plot_partregress_grid(res, exog_idx=[0,1])
#add lowess
ax = fig1.axes[0]
y0 = ax.get_lines()[0]._y
x0 = ax.get_lines()[0]._x
lres = sm.nonparametric.lowess(y0, x0, frac=0.2)
ax.plot(lres[:,0], lres[:,1], 'r', lw=1.5)
ax = fig1.axes[1]
y0 = ax.get_lines()[0]._y
x0 = ax.get_lines()[0]._x
lres = sm.nonparametric.lowess(y0, x0, frac=0.2)
ax.plot(lres[:,0], lres[:,1], 'r', lw=1.5)
#plt.show()
| bsd-3-clause |
jdavidrcamacho/Tests_GP | 03 - RV tests/Tests.py | 1 | 6550 | # -*- coding: utf-8 -*-
import Gedi as gedi
import numpy as np
import matplotlib.pylab as pl
##### INITIAL DATA ###########################################################
#np.random.seed(12345)
x = 10 * np.sort(np.random.rand(101))
yerr = 0.2 * np.ones_like(x)
y = np.sin(x) + yerr * np.random.randn(len(x))
###############################################################################
#TESTS FOR THE LIKELIHOOD AND GRADIENT
print 'test 1'
#this sets the kernel
kernel1=gedi.kernel.ExpSquared(11.0,7.0)
#this calculates the kernel's log likelihood
kernel1_test1= gedi.kernel_likelihood.likelihood(kernel1,x,y,yerr)
#this calculates the gradients of the parameters
kernel1_test2= gedi.kernel_likelihood.gradient_likelihood(kernel1,x,y,yerr)
print 'kernel =',kernel1
print 'likelihood =',kernel1_test1
print 'gradients =',kernel1_test2
print 'test 2'
kernel2=gedi.kernel.ExpSineSquared(10.1,1.2,5.1)
kernel2_test1= gedi.kernel_likelihood.likelihood(kernel2,x,y,yerr)
kernel2_test2= gedi.kernel_likelihood.gradient_likelihood(kernel2,x,y,yerr)
print 'kernel =',kernel2
print 'likelihood =',kernel2_test1
print 'gradients =',kernel2_test2
print 'test 3'
kernel3=gedi.kernel.ExpSquared(10.2,7.1)+gedi.kernel.ExpSineSquared(10.1,1.2,5.1)
kernel3_test1= gedi.kernel_likelihood.likelihood(kernel3,x,y,yerr)
kernel3_test2= gedi.kernel_likelihood.gradient_likelihood(kernel3,x,y,yerr)
print 'kernel =',kernel3
print 'likelihood =',kernel3_test1
print 'gradients =',kernel3_test2
print 'test 3.5'
kernel3=gedi.kernel.ExpSquared(10.2,7.1)*gedi.kernel.ExpSineSquared(10.1,1.2,5.1) \
*gedi.kernel.WhiteNoise(1.0)
kernel3_test1= gedi.kernel_likelihood.likelihood(kernel3,x,y,yerr)
kernel3_test2= gedi.kernel_likelihood.gradient_likelihood(kernel3,x,y,yerr)
print 'kernel =',kernel3
print 'likelihood =',kernel3_test1
print 'gradients =',kernel3_test2
print 'test 4'
kernel4=gedi.kernel.ExpSquared(10.2,7.1)*gedi.kernel.ExpSineSquared(10.1,1.2,5.1)
kernel4_test1= gedi.kernel_likelihood.likelihood(kernel4,x,y,yerr)
kernel4_test2= gedi.kernel_likelihood.gradient_likelihood(kernel4,x,y,yerr)
print 'kernel =',kernel4
print 'likelihood =',kernel4_test1
print 'gradients =',kernel4_test2
print '#####################################'
###############################################################################
#TESTS FOR THE OPTIMIZATION
print 'test 5 - optimization'
#this sets the initial kernel
kernel1=gedi.kernel.Exponential(10.0,1.0)+gedi.kernel.WhiteNoise(1.0)
print 'kernel 1 ->', kernel1
#this calculates the initial log likelihood
likelihood1=gedi.kernel_likelihood.likelihood(kernel1,x,y,yerr)
print 'likelihood 1 ->', likelihood1
#this performs the optimization
optimization1=gedi.kernel_optimization.committed_optimization(kernel1,x,y,yerr,max_opt=2)
#it returns optimization[0]=final log likelihood optimization[1]=final kernel
print 'kernel 1 final ->',optimization1[1]
print 'likelihood 1 final ->', optimization1[0]
print '#####################################'
################################################################################
##TESTS FOR GRAPHICS
#print 'test 6 - everything combined'
#kernel2=gedi.kernel.ExpSineSquared(10.0,1.0,10.0)+gedi.kernel.Exponential(5.0,1.5)
#print 'kernel =',kernel2
#
#xcalc=np.linspace(-1,11,300)
#
##computation of the initial mean and standard deviation
#[mu,std]=gedi.kernel_likelihood.compute_kernel(kernel2,x,xcalc,y,yerr)
#pl.figure()
#pl.fill_between(xcalc, mu+std, mu-std, color="k", alpha=0.1)
#pl.plot(xcalc, mu+std, color="k", alpha=1, lw=0.25)
#pl.plot(xcalc, mu-std, color="k", alpha=1, lw=0.25)
#pl.plot(xcalc, mu, color="k", alpha=1, lw=0.5)
#pl.errorbar(x, y, yerr=yerr, fmt=".k", capsize=0)
#pl.title("Pre-optimization")
#pl.xlabel("$x$")
#pl.ylabel("$y$")
#
##run of the optimization algorithms
#optimization1=gedi.kernel_optimization.committed_optimization(kernel2,x,y,yerr,max_opt=10)
#print 'final kernel = ',optimization1[1]
#print 'final likelihood = ', optimization1[0]
#
##computation of the final mean and standard deviation
#[mu,std]=gedi.kernel_likelihood.compute_kernel(optimization1[1],x,xcalc,y,yerr)
#pl.figure()
#pl.fill_between(xcalc, mu+std, mu-std, color="k", alpha=0.1)
#pl.plot(xcalc, mu+std, color="k", alpha=1, lw=0.25)
#pl.plot(xcalc, mu-std, color="k", alpha=1, lw=0.25)
#pl.plot(xcalc, mu, color="k", alpha=1, lw=0.5)
#pl.errorbar(x, y, yerr=yerr, fmt=".k", capsize=0)
#pl.title('Pos-optimization')
#pl.xlabel("$x$")
#pl.ylabel("$y$")
#
#print '#####################################'
#
################################################################################
##TESTS FOR MCMC
#print 'test 7 - mcmc'
#
##definition of the initial kernel, the values you put in here are not important
#kernel3=gedi.kernel.ExpSineSquared(10,1,10) + gedi.kernel.WhiteNoise(10.0)
#print 'initial kernel =',kernel3
#kernel3_test1= gedi.kernel_likelihood.likelihood(kernel3,x,y,yerr)
#print 'initia likelihood =',kernel3_test1
#
##the important is the intervel of the parameters, as it will create the initial
##guess, in this example we believe the amplitude of the ExpSineSquared is
##somewhere between 5 and 15, the lenght scale between 1 and 4, the period
##between 5 and 10 and the white noise amplitude between 0.1 and 1.
#parameters=[[5.0,15.0],[1.0,4.0],[5.0,10.0],[0.1,1]]
#
##we set the number of runs we want the algorithm to have
#runs=10000
#
##lets run our mcmc
#trial=gedi.kernel_mcmc.MCMC(kernel3,x,y,yerr,parameters,runs)
#
##now lets make graphics of the results
#xt=np.linspace(0,runs,runs)
#pl.figure()
#pl.title('log marginal likelihood')
#pl.plot(xt,trial[2],'k-')
#
#f, axarr = pl.subplots(2, 2)
#axarr[0, 0].plot(xt, trial[3][0])
#axarr[0, 0].set_title('amplitude')
#axarr[0, 1].plot(xt, trial[3][1])
#axarr[0, 1].set_title('lenght scale')
#axarr[1, 0].plot(xt, trial[3][2])
#axarr[1, 0].set_title('period')
#axarr[1, 1].plot(xt, trial[3][3])
#axarr[1, 1].set_title('white noise')
#pl.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False)
#
## computation of the final kernel
#xcalc=np.linspace(-1,11,300)
#[mu,std]=gedi.kernel_likelihood.compute_kernel(trial[0],x,xcalc,y,yerr)
#pl.figure() #Graphics
#pl.fill_between(xcalc, mu+std, mu-std, color="k", alpha=0.1)
#pl.plot(xcalc, mu+std, color="k", alpha=1, lw=0.25)
#pl.plot(xcalc, mu-std, color="k", alpha=1, lw=0.25)
#pl.plot(xcalc, mu, color="k", alpha=1, lw=0.5)
#pl.errorbar(x, y, yerr=yerr, fmt=".k", capsize=0)
#pl.title('After the mcmc')
#pl.xlabel("$time$")
#pl.ylabel("$y$")
#
#print 'final kernel =',trial[0]
#print 'final likelihood =',trial[1] | mit |
eadains09/scripts | sonogram.py | 1 | 5107 | #!/usr/bin/env python
# plot the waveform and a sonogram for an audio input (e.g. a bird song).
from pylab import *
from matplotlib import *
import wave
import sys
# modified specgram()
# http://stackoverflow.com/questions/19468923/cutting-of-unused-frequencies-in-specgram-matplotlib
def my_specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None, minfreq = None, maxfreq = None, **kwargs):
"""
call signature::
specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None, minfreq = None, maxfreq = None, **kwargs)
Compute a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the PSD of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the y extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*cmap*:
A :class:`matplotlib.cm.Colormap` instance; if *None* use
default determined by rc
*xextent*:
The image extent along the x-axis. xextent = (xmin,xmax)
The default is (0,max(bins)), where bins is the return
value from :func:`mlab.specgram`
*minfreq, maxfreq*
Limits y-axis. Both required
*kwargs*:
Additional kwargs are passed on to imshow which makes the
specgram image
Return value is (*Pxx*, *freqs*, *bins*, *im*):
- *bins* are the time points the spectrogram is calculated over
- *freqs* is an array of frequencies
- *Pxx* is a len(times) x len(freqs) array of power
- *im* is a :class:`matplotlib.image.AxesImage` instance
Note: If *x* is real (i.e. non-complex), only the positive
spectrum is shown. If *x* is complex, both positive and
negative parts of the spectrum are shown. This can be
overridden using the *sides* keyword argument.
**Example:**
.. plot:: mpl_examples/pylab_examples/specgram_demo.py
"""
#####################################
# modified axes.specgram() to limit
# the frequencies plotted
#####################################
# this will fail if there isn't a current axis in the global scope
ax = gca()
Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend,
window, noverlap, pad_to, sides, scale_by_freq)
# modified here
#####################################
if minfreq is not None and maxfreq is not None:
Pxx = Pxx[(freqs >= minfreq) & (freqs <= maxfreq)]
freqs = freqs[(freqs >= minfreq) & (freqs <= maxfreq)]
#####################################
Z = 10. * np.log10(Pxx)
Z = np.flipud(Z)
if xextent is None: xextent = 0, np.amax(bins)
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = ax.imshow(Z, cmap, extent=extent, **kwargs)
ax.axis('auto')
return Pxx, freqs, bins, im
def sonogram(wav_file, startsecs=None, endsecs=None):
'''Plot a sonogram for the given file,
optionally specifying the start and end time in seconds.
'''
wav = wave.open(wav_file, 'r')
frames = wav.readframes(-1)
frame_rate = wav.getframerate()
chans = wav.getnchannels()
secs = wav.getnframes() / float(frame_rate)
sound_info = pylab.fromstring(frames, 'Int16')
wav.close()
# The wave module doesn't have any way to read just part of a wave file
# (sigh), so we have to take an array slice after we've already read
# the whole thing into numpy.
if startsecs or endsecs:
if not startsecs:
startsecs = 0.0
if not endsecs:
endsecs = secs - startsecs
startpos = startsecs * frame_rate * chans
endpos = endsecs * frame_rate * chans
sound_info = sound_info[startpos:endpos]
secs = endsecs - startsecs
else:
startsecs = 0.0
print secs, "seconds"
t = arange(startsecs, startsecs + secs, 1.0 / frame_rate / chans)
ax1 = subplot(211)
title(wav_file)
plot(t, sound_info)
subplot(212, sharex=ax1)
Pxx, freqs, bins, im = my_specgram(sound_info, Fs=frame_rate*chans,
# cmap=cm.Accent,
minfreq = 0, maxfreq = 10000)
show()
close()
if __name__ == '__main__':
filename = sys.argv[1]
start = None
end = None
if len(sys.argv) > 2:
start = float(sys.argv[2])
print "Starting at", start
if len(sys.argv) > 3:
end = float(sys.argv[3])
print "ending at", end
sonogram(filename, start, end)
| gpl-2.0 |
BiaDarkia/scikit-learn | examples/cluster/plot_cluster_iris.py | 56 | 2815 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
# Though the following import is not directly being used, it is required
# for 3D projection to work
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = [('k_means_iris_8', KMeans(n_clusters=8)),
('k_means_iris_3', KMeans(n_clusters=3)),
('k_means_iris_bad_init', KMeans(n_clusters=3, n_init=1,
init='random'))]
fignum = 1
titles = ['8 clusters', '3 clusters', '3 clusters, bad initialization']
for name, est in estimators:
fig = plt.figure(fignum, figsize=(4, 3))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2],
c=labels.astype(np.float), edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
ax.set_title(titles[fignum - 1])
ax.dist = 12
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean(),
X[y == label, 2].mean() + 2, name,
horizontalalignment='center',
bbox=dict(alpha=.2, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y, edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
ax.set_title('Ground Truth')
ax.dist = 12
fig.show()
| bsd-3-clause |
jakevdp/mpld3 | setup.py | 2 | 1977 | import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from _mpld3_setup import (require_clean_submodules, UpdateSubmodules,
check_js_build_status, BuildJavascript, get_version)
DESCRIPTION = "D3 Viewer for Matplotlib"
LONG_DESCRIPTION = open('README.md').read()
NAME = "mpld3"
AUTHOR = "Jake VanderPlas"
AUTHOR_EMAIL = "[email protected]"
MAINTAINER = "Jake VanderPlas"
MAINTAINER_EMAIL = "[email protected]"
URL = 'http://mpld3.github.com'
DOWNLOAD_URL = 'http://github.com/jakevdp/mpld3'
LICENSE = 'BSD 3-clause'
VERSION = get_version()
# Make sure submodules are updated and synced
root_dir = os.path.abspath(os.path.dirname(__file__))
require_clean_submodules(root_dir, sys.argv)
# Warn if it looks like JS libs need to be built
if 'buildjs' not in sys.argv:
check_js_build_status(VERSION, root_dir)
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
cmdclass={'submodule': UpdateSubmodules,
'buildjs': BuildJavascript},
packages=['mpld3',
'mpld3/mplexporter',
'mpld3/mplexporter/renderers'],
package_data={'mpld3': ['js/*.js']},
install_requires=["jinja2", "matplotlib"],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'],
)
| bsd-3-clause |
cmantas/tiramola_v3 | lib/test_draw.py | 1 | 1568 | #! /usr/bin/env python
import itertools, sys
from persistance_module import env_vars
#import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as pl
from collections import deque
############ figure size (in inches@80bpi) #################
width = 12
height = 6
dpi = 80
fig_name=""
def moving_average(iterable, n=3):
# moving_average([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0
# http://en.wikipedia.org/wiki/Moving_average
it = iter(iterable)
d = deque(itertools.islice(it, n-1))
d.appendleft(0)
s = sum(d)
for elem in it:
s += elem - d.popleft()
d.append(elem)
yield s / float(n)
def my_avg(l, a=0.1):
prev=l[0]
rv = [prev]
for x in l[1:]:
v = (1.0-a)*prev + a*x
rv.append(v)
prev = v
return rv
def my_draw(x, y, x_label, y_label, graph_name, ax2_x=None, ax2_label=None):
fig5 = pl.figure(5, figsize=(width,height), dpi=dpi)
ax1 = fig5.add_subplot(111)
ax1.plot(x, y, 'black')
ax1.set_xlabel(x_label)
ax1.set_ylabel(y_label, color='black')
ax1.set_ylim((10, 100))
ax1.grid(True)
if not ax2_x is None:
ax2 = ax1.twinx()
ax2.plot(x, ax2_x, 'g')
ax2.set_ylabel(ax2_label, color='black')
pl.title(graph_name)
pl.savefig(fig_name + "_"+graph_name)
pl.clf()
pl.cla()
return
def draw_exp(meas_file):
return
if __name__ == '__main__':
if len(sys.argv) == 2:
draw_exp(sys.argv[1])
else:
print 'Usage: python draw_experiment.py measurements_file'
| apache-2.0 |
elijah513/scikit-learn | sklearn/neighbors/approximate.py | 128 | 22351 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[i], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[i], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
rs2/pandas | pandas/tests/indexing/multiindex/test_ix.py | 3 | 2117 | import numpy as np
import pytest
from pandas.errors import PerformanceWarning
from pandas import DataFrame, MultiIndex
import pandas._testing as tm
class TestMultiIndex:
def test_frame_setitem_loc(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
frame.loc[("bar", "two"), "B"] = 5
assert frame.loc[("bar", "two"), "B"] == 5
# with integer labels
df = frame.copy()
df.columns = list(range(3))
df.loc[("bar", "two"), 1] = 7
assert df.loc[("bar", "two"), 1] == 7
def test_loc_general(self):
# GH 2817
data = {
"amount": {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
"col": {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
"year": {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012},
}
df = DataFrame(data).set_index(keys=["col", "year"])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
return_value = df.sort_index(inplace=True)
assert return_value is None
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.0] * 3, [2012] * 3], names=["col", "year"])
expected = DataFrame({"amount": [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_loc_multiindex_missing_label_raises(self):
# GH 21593
df = DataFrame(
np.random.randn(3, 3),
columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]],
)
with pytest.raises(KeyError, match=r"^2$"):
df.loc[2]
def test_series_loc_getitem_fancy(
self, multiindex_year_month_day_dataframe_random_data
):
s = multiindex_year_month_day_dataframe_random_data["A"]
expected = s.reindex(s.index[49:51])
result = s.loc[[(2000, 3, 10), (2000, 3, 13)]]
tm.assert_series_equal(result, expected)
| bsd-3-clause |
amolkahat/pandas | pandas/tests/indexes/datetimes/test_ops.py | 1 | 20651 | import pytest
import warnings
import numpy as np
from datetime import datetime
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp,
date_range, bdate_range, Index)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay, Day, Hour
from pandas.tests.test_base import Ops
from pandas.core.dtypes.generic import ABCDateOffset
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_minmax_tz(self, tz_naive_fixture):
tz = tz_naive_fixture
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp('2011-01-01', tz=tz)
assert idx.max() == Timestamp('2011-01-03', tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
@pytest.mark.parametrize('op', ['min', 'max'])
def test_minmax_nat(self, op):
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
def test_repeat_range(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self, tz_naive_fixture):
tz = tz_naive_fixture
reps = 2
msg = "the 'axis' parameter is not supported"
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_resolution(self, tz_naive_fixture):
tz = tz_naive_fixture
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
assert idx.resolution == expected
def test_value_counts_unique(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 7735
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
assert idx[0] in idx
@pytest.mark.parametrize('idx',
[
DatetimeIndex(
['2011-01-01',
'2011-01-02',
'2011-01-03'],
freq='D', name='idx'),
DatetimeIndex(
['2011-01-01 09:00',
'2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', name='tzidx', tz='Asia/Tokyo')
])
def test_order_with_freq(self, idx):
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]),
check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
@pytest.mark.parametrize('index_dates,expected_dates', [
(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05']),
(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05']),
([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT],
[pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'])
])
def test_order_without_freq(self, index_dates, expected_dates,
tz_naive_fixture):
tz = tz_naive_fixture
# without freq
index = DatetimeIndex(index_dates, tz=tz, name='idx')
expected = DatetimeIndex(expected_dates, tz=tz, name='idx')
ordered = index.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = index.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
@pytest.mark.parametrize('freq', [
'A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S'])
def test_infer_freq(self, freq):
# GH 11018
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_nat(self, tz_naive_fixture):
tz = tz_naive_fixture
assert pd.DatetimeIndex._na_value is pd.NaT
assert pd.DatetimeIndex([])._na_value is pd.NaT
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert not idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.astype(object))
assert not idx.astype(object).equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
@pytest.mark.parametrize('values', [
['20180101', '20180103', '20180105'], []])
@pytest.mark.parametrize('freq', [
'2D', Day(2), '2B', BDay(2), '48H', Hour(48)])
@pytest.mark.parametrize('tz', [None, 'US/Eastern'])
def test_freq_setter(self, values, freq, tz):
# GH 20678
idx = DatetimeIndex(values, tz=tz)
# can set to an offset, converting from string if necessary
idx.freq = freq
assert idx.freq == freq
assert isinstance(idx.freq, ABCDateOffset)
# can reset to None
idx.freq = None
assert idx.freq is None
def test_freq_setter_errors(self):
# GH 20678
idx = DatetimeIndex(['20180101', '20180103', '20180105'])
# setting with an incompatible freq
msg = ('Inferred frequency 2D from passed values does not conform to '
'passed frequency 5D')
with tm.assert_raises_regex(ValueError, msg):
idx.freq = '5D'
# setting with non-freq string
with tm.assert_raises_regex(ValueError, 'Invalid frequency'):
idx.freq = 'foo'
def test_offset_deprecated(self):
# GH 20716
idx = pd.DatetimeIndex(['20180101', '20180102'])
# getter deprecated
with tm.assert_produces_warning(FutureWarning):
idx.offset
# setter deprecated
with tm.assert_produces_warning(FutureWarning):
idx.offset = BDay()
class TestBusinessDatetimeIndex(object):
def setup_method(self, method):
self.rng = bdate_range(START, END)
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_pickle_unpickle(self):
unpickled = tm.round_trip_pickle(self.rng)
assert unpickled.freq is not None
def test_copy(self):
cp = self.rng.copy()
repr(cp)
tm.assert_index_equal(cp, self.rng)
def test_shift(self):
shifted = self.rng.shift(5)
assert shifted[0] == self.rng[5]
assert shifted.freq == self.rng.freq
shifted = self.rng.shift(-5)
assert shifted[5] == self.rng[0]
assert shifted.freq == self.rng.freq
shifted = self.rng.shift(0)
assert shifted[0] == self.rng[0]
assert shifted.freq == self.rng.freq
rng = date_range(START, END, freq=BMonthEnd())
shifted = rng.shift(1, freq=BDay())
assert shifted[0] == rng[0] + BDay()
def test_equals(self):
assert not self.rng.equals(list(self.rng))
def test_identical(self):
t1 = self.rng.copy()
t2 = self.rng.copy()
assert t1.identical(t2)
# name
t1 = t1.rename('foo')
assert t1.equals(t2)
assert not t1.identical(t2)
t2 = t2.rename('foo')
assert t1.identical(t2)
# freq
t2v = Index(t2.values)
assert t1.equals(t2v)
assert not t1.identical(t2v)
class TestCustomDatetimeIndex(object):
def setup_method(self, method):
self.rng = bdate_range(START, END, freq='C')
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_copy(self):
cp = self.rng.copy()
repr(cp)
tm.assert_index_equal(cp, self.rng)
def test_shift(self):
shifted = self.rng.shift(5)
assert shifted[0] == self.rng[5]
assert shifted.freq == self.rng.freq
shifted = self.rng.shift(-5)
assert shifted[5] == self.rng[0]
assert shifted.freq == self.rng.freq
shifted = self.rng.shift(0)
assert shifted[0] == self.rng[0]
assert shifted.freq == self.rng.freq
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", pd.errors.PerformanceWarning)
rng = date_range(START, END, freq=BMonthEnd())
shifted = rng.shift(1, freq=CDay())
assert shifted[0] == rng[0] + CDay()
def test_shift_periods(self):
# GH #22458 : argument 'n' was deprecated in favor of 'periods'
idx = pd.DatetimeIndex(start=START, end=END,
periods=3)
tm.assert_index_equal(idx.shift(periods=0), idx)
tm.assert_index_equal(idx.shift(0), idx)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=True):
tm.assert_index_equal(idx.shift(n=0), idx)
def test_pickle_unpickle(self):
unpickled = tm.round_trip_pickle(self.rng)
assert unpickled.freq is not None
def test_equals(self):
assert not self.rng.equals(list(self.rng))
| bsd-3-clause |
ZRiddle/BoostARoota | Dev_Testing/testBAR.py | 2 | 9472 | #testBAR.py
import time
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from boruta import BorutaPy
from sklearn.model_selection import KFold
from sklearn.metrics import log_loss, roc_auc_score, mean_squared_error, mean_absolute_error
#then import BoostARoota to get the proper functions for evaluation
########################################################################################
#
# Functions for testing output
#
########################################################################################
def PrepLL(y, y_pred):
# takes y, y_hat and returns the log loss
preds = []
for i in y_pred:
preds.append([1 - i, i])
logloss = log_loss(y, preds)
return logloss
def rmse(y, y_pred):
# returns rmse for the
return mean_squared_error(y, y_pred) ** 0.5
def evalReg(df):
#input df, output regression evaluation - rmse, mae
results = [rmse(df.y_actual, df.y_hat_BR),
rmse(df.y_actual, df.y_hat_boruta),
rmse(df.y_actual, df.y_hat),
mean_absolute_error(df.y_actual, df.y_hat_BR2),
mean_absolute_error(df.y_actual, df.y_hat_boruta2),
mean_absolute_error(df.y_actual, df.y_hat2) ]
return results
def evalClass(df):
#input df, output classification evaluation - logloss, auc
results = [PrepLL(df.y_actual, df.y_hat_BR),
PrepLL(df.y_actual, df.y_hat_boruta),
PrepLL(df.y_actual, df.y_hat),
roc_auc_score(df.y_actual, df.y_hat_BR2),
roc_auc_score(df.y_actual, df.y_hat_boruta2),
roc_auc_score(df.y_actual, df.y_hat2) ]
return results
def evalResults(df, eval):
if eval == 'reg':
results = evalReg(df)
else:
results = evalClass(df)
return results
def evalBARBAR(df, eval):
if eval == 'reg':
results = [rmse(df.y_actual, df.y_hat_BR),
rmse(df.y_actual, df.y_hat2_BR),
rmse(df.y_actual, df.y_hat),
mean_absolute_error(df.y_actual, df.y_hat_BR),
mean_absolute_error(df.y_actual, df.y_hat2_BR),
mean_absolute_error(df.y_actual, df.y_hat)]
else:
results = [PrepLL(df.y_actual, df.y_hat_BR),
PrepLL(df.y_actual, df.y_hat2_BR),
PrepLL(df.y_actual, df.y_hat),
roc_auc_score(df.y_actual, df.y_hat_BR),
roc_auc_score(df.y_actual, df.y_hat2_BR),
roc_auc_score(df.y_actual, df.y_hat)]
return results
########################################################################################
#
# Testing BAR against itself
#
########################################################################################
#just run through getting the predictions for each of the folds all features
#Only test on a single metric
#Compare the results from each iteration
def testBARvSelf(X, Y, eval, folds=5):
if eval == "reg":
eval_metric = "rmse"
else:
eval_metric = "logloss"
np.random.seed(None) #removing any seed to ensure that the folds are created differently
#initialize empty lists - not the most efficient, but it works
bar_times = []
bar2_times = []
y_hat = []
y_hat_BR = []
y_hat2_BR = []
y_actual = []
fold = []
#Start the cross validation
kf = KFold(n_splits=folds)
i = 1
for train, test in kf.split(X):
X_train, X_test, y_train, y_test = X.iloc[train], X.iloc[test], Y[train], Y[test]
#Get predictions on all features
y_pred = TrainGetPreds(X_train, y_train, X_test, metric=eval_metric)
#BAR1
tmp = time.time()
BR_vars = BoostARoota(X_train, y_train, metric=eval_metric)
bar_times.append(time.time() - tmp)
BR_X = X_train[BR_vars]
BR_test = X_test[BR_vars]
BR_preds = TrainGetPreds(BR_X, y_train, BR_test, metric=eval_metric)
#BAR2
tmp = time.time()
BR_vars = BoostARoota2(X_train, y_train, metric=eval_metric)
bar2_times.append(time.time() - tmp)
BR_X = X_train[BR_vars]
BR_test = X_test[BR_vars]
BR2_preds = TrainGetPreds(BR_X, y_train, BR_test, metric=eval_metric)
# evaluate predictions and append to lists
y_hat.extend(y_pred)
y_hat_BR.extend(BR_preds)
y_hat2_BR.extend(BR2_preds)
y_actual.extend(y_test)
#Set the fold it is trained on
fold.extend([i] * len(y_pred))
i+=1
values = [np.mean(bar_times), np.mean(bar2_times)]
#Start building the array to be passed out; first is the timings, then the eval results
#Build the dataframe to pass into the evaluation functions
results = pd.DataFrame({"y_hat": y_hat,
"Fold": fold,
"y_hat_BR": y_hat_BR,
"y_hat2_BR": y_hat2_BR,
"y_actual": y_actual})
values.extend(evalBARBAR(results, eval=eval))
return pd.DataFrame(values, ["BarTime1", "BarTime2",
'BAR1_Metric1', 'BAR2_Metric1', 'AllMetric1',
'BAR1_Metric2', 'BAR2_Metric2', 'AllMetric2'])
########################################################################################
#
# Functions for rigorous testing of the approaches
#
########################################################################################
#just run through getting the predictions for each of the folds all features
def trainKFolds(X, Y, eval, folds=5):
if eval == "reg":
eval_metric = "rmse"
eval_metric2 = "mae"
else:
eval_metric = "logloss"
eval_metric2 = "auc"
np.random.seed(None) #removing any seed to ensure that the folds are created differently
#initialize empty lists - not the most efficient, but it works
bar_times = []
boruta_times = []
y_hat = []
y_hat2 = []
y_hat_BR = []
y_hat_BR2 = []
y_hat_boruta = []
y_hat_boruta2 = []
y_actual = []
fold = []
#Start the cross validation
kf = KFold(n_splits=folds)
i = 1
for train, test in kf.split(X):
X_train, X_test, y_train, y_test = X.iloc[train], X.iloc[test], Y[train], Y[test]
#Get predictions on all features
y_pred = TrainGetPreds(X_train, y_train, X_test, metric=eval_metric)
y_pred2 = TrainGetPreds(X_train, y_train, X_test, metric=eval_metric2)
#BoostARoota - tune to metric 1
tmp = time.time()
BR_vars = BoostARoota2(X_train, y_train, metric=eval_metric)
bar_times.append(time.time() - tmp)
BR_X = X_train[BR_vars]
BR_test = X_test[BR_vars]
BR_preds = TrainGetPreds(BR_X, y_train, BR_test, metric=eval_metric)
#BoostARoota - tune to metric 2
tmp = time.time()
BR_vars = BoostARoota2(X_train, y_train, metric=eval_metric2)
bar_times.append(time.time() - tmp)
BR_X = X_train[BR_vars]
BR_test = X_test[BR_vars]
BR_preds2 = TrainGetPreds(BR_X, y_train, BR_test, metric=eval_metric2)
# #Boruta - get predictions
tmp = time.time()
rf = RandomForestClassifier(n_jobs=-1, class_weight='auto', max_depth=5)
feat_selector = BorutaPy(rf, n_estimators='auto', verbose=2, random_state=1)
feat_selector.fit(X_train.values, y_train.values)
boruta_times.append(time.time() - tmp)
X_train_filter = feat_selector.transform(X_train.values)
X_test_filter = feat_selector.transform(X_test.values)
Boruta_preds = TrainGetPreds(X_train_filter, y_train, X_test_filter, metric=eval_metric)
Boruta_preds2 = TrainGetPreds(X_train_filter, y_train, X_test_filter, metric=eval_metric2)
# evaluate predictions and append to lists
y_hat.extend(y_pred)
y_hat2.extend(y_pred2)
y_hat_BR.extend(BR_preds)
y_hat_BR2.extend(BR_preds2)
y_hat_boruta.extend(Boruta_preds)
y_hat_boruta2.extend(Boruta_preds2)
y_actual.extend(y_test)
#Set the fold it is trained on
fold.extend([i] * len(y_pred))
i+=1
#Start building the array to be passed out; first is the timings, then the eval results
values = [np.mean(boruta_times), np.mean(bar_times)]
#Build the dataframe to pass into the evaluation functions
results = pd.DataFrame({"y_hat": y_hat,
"y_hat2": y_hat2,
"Fold": fold,
"y_hat_BR": y_hat_BR,
"y_hat_BR2": y_hat_BR2,
"y_hat_boruta": y_hat_boruta,
"y_hat_boruta2": y_hat_boruta2,
"y_actual": y_actual})
#then append the evaluation results to values
values.extend(evalResults(results, eval=eval))
return values
def repCV(X, Y, eval, repeats=3):
#runs trainKFolds() for however many repeats specified here
#Returns the results
names = ['BorutaTime', 'BarTime',
'BarMetric1', 'BorutaMetric1', 'AllMetric1',
'BarMetric2', 'BorutaMetric2', 'AllMetric2']
for i in range(repeats):
if i == 0:
df = pd.DataFrame(trainKFolds(X, Y, eval)).T
df = df.append(pd.DataFrame(trainKFolds(X, Y, eval)).T, ignore_index=True)
df.columns = names
return df
| mit |
costypetrisor/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
JFriel/honours_project | venv/lib/python2.7/site-packages/nltk/tokenize/texttiling.py | 7 | 16850 | # Natural Language Toolkit: TextTiling
#
# Copyright (C) 2001-2016 NLTK Project
# Author: George Boutsioukis
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
import re
import math
try:
import numpy
except ImportError:
pass
from nltk.tokenize.api import TokenizerI
BLOCK_COMPARISON, VOCABULARY_INTRODUCTION = 0, 1
LC, HC = 0, 1
DEFAULT_SMOOTHING = [0]
class TextTilingTokenizer(TokenizerI):
"""Tokenize a document into topical sections using the TextTiling algorithm.
This algorithm detects subtopic shifts based on the analysis of lexical
co-occurrence patterns.
The process starts by tokenizing the text into pseudosentences of
a fixed size w. Then, depending on the method used, similarity
scores are assigned at sentence gaps. The algorithm proceeds by
detecting the peak differences between these scores and marking
them as boundaries. The boundaries are normalized to the closest
paragraph break and the segmented text is returned.
:param w: Pseudosentence size
:type w: int
:param k: Size (in sentences) of the block used in the block comparison method
:type k: int
:param similarity_method: The method used for determining similarity scores:
`BLOCK_COMPARISON` (default) or `VOCABULARY_INTRODUCTION`.
:type similarity_method: constant
:param stopwords: A list of stopwords that are filtered out (defaults to NLTK's stopwords corpus)
:type stopwords: list(str)
:param smoothing_method: The method used for smoothing the score plot:
`DEFAULT_SMOOTHING` (default)
:type smoothing_method: constant
:param smoothing_width: The width of the window used by the smoothing method
:type smoothing_width: int
:param smoothing_rounds: The number of smoothing passes
:type smoothing_rounds: int
:param cutoff_policy: The policy used to determine the number of boundaries:
`HC` (default) or `LC`
:type cutoff_policy: constant
>>> from nltk.corpus import brown
>>> tt = TextTilingTokenizer(demo_mode=True)
>>> text = brown.raw()[:10000]
>>> s, ss, d, b = tt.tokenize(text)
>>> b
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]
"""
def __init__(self,
w=20,
k=10,
similarity_method=BLOCK_COMPARISON,
stopwords=None,
smoothing_method=DEFAULT_SMOOTHING,
smoothing_width=2,
smoothing_rounds=1,
cutoff_policy=HC,
demo_mode=False):
if stopwords is None:
from nltk.corpus import stopwords
stopwords = stopwords.words('english')
self.__dict__.update(locals())
del self.__dict__['self']
def tokenize(self, text):
"""Return a tokenized copy of *text*, where each "token" represents
a separate topic."""
lowercase_text = text.lower()
paragraph_breaks = self._mark_paragraph_breaks(text)
text_length = len(lowercase_text)
# Tokenization step starts here
# Remove punctuation
nopunct_text = ''.join(c for c in lowercase_text
if re.match("[a-z\-\' \n\t]", c))
nopunct_par_breaks = self._mark_paragraph_breaks(nopunct_text)
tokseqs = self._divide_to_tokensequences(nopunct_text)
# The morphological stemming step mentioned in the TextTile
# paper is not implemented. A comment in the original C
# implementation states that it offers no benefit to the
# process. It might be interesting to test the existing
# stemmers though.
#words = _stem_words(words)
# Filter stopwords
for ts in tokseqs:
ts.wrdindex_list = [wi for wi in ts.wrdindex_list
if wi[0] not in self.stopwords]
token_table = self._create_token_table(tokseqs, nopunct_par_breaks)
# End of the Tokenization step
# Lexical score determination
if self.similarity_method == BLOCK_COMPARISON:
gap_scores = self._block_comparison(tokseqs, token_table)
elif self.similarity_method == VOCABULARY_INTRODUCTION:
raise NotImplementedError("Vocabulary introduction not implemented")
if self.smoothing_method == DEFAULT_SMOOTHING:
smooth_scores = self._smooth_scores(gap_scores)
# End of Lexical score Determination
# Boundary identification
depth_scores = self._depth_scores(smooth_scores)
segment_boundaries = self._identify_boundaries(depth_scores)
normalized_boundaries = self._normalize_boundaries(text,
segment_boundaries,
paragraph_breaks)
# End of Boundary Identification
segmented_text = []
prevb = 0
for b in normalized_boundaries:
if b == 0:
continue
segmented_text.append(text[prevb:b])
prevb = b
if prevb < text_length: # append any text that may be remaining
segmented_text.append(text[prevb:])
if not segmented_text:
segmented_text = [text]
if self.demo_mode:
return gap_scores, smooth_scores, depth_scores, segment_boundaries
return segmented_text
def _block_comparison(self, tokseqs, token_table):
"Implements the block comparison method"
def blk_frq(tok, block):
ts_occs = filter(lambda o: o[0] in block,
token_table[tok].ts_occurences)
freq = sum([tsocc[1] for tsocc in ts_occs])
return freq
gap_scores = []
numgaps = len(tokseqs)-1
for curr_gap in range(numgaps):
score_dividend, score_divisor_b1, score_divisor_b2 = 0.0, 0.0, 0.0
score = 0.0
#adjust window size for boundary conditions
if curr_gap < self.k-1:
window_size = curr_gap + 1
elif curr_gap > numgaps-self.k:
window_size = numgaps - curr_gap
else:
window_size = self.k
b1 = [ts.index
for ts in tokseqs[curr_gap-window_size+1 : curr_gap+1]]
b2 = [ts.index
for ts in tokseqs[curr_gap+1 : curr_gap+window_size+1]]
for t in token_table:
score_dividend += blk_frq(t, b1)*blk_frq(t, b2)
score_divisor_b1 += blk_frq(t, b1)**2
score_divisor_b2 += blk_frq(t, b2)**2
try:
score = score_dividend/math.sqrt(score_divisor_b1*
score_divisor_b2)
except ZeroDivisionError:
pass # score += 0.0
gap_scores.append(score)
return gap_scores
def _smooth_scores(self, gap_scores):
"Wraps the smooth function from the SciPy Cookbook"
return list(smooth(numpy.array(gap_scores[:]),
window_len = self.smoothing_width+1))
def _mark_paragraph_breaks(self, text):
"""Identifies indented text or line breaks as the beginning of
paragraphs"""
MIN_PARAGRAPH = 100
pattern = re.compile("[ \t\r\f\v]*\n[ \t\r\f\v]*\n[ \t\r\f\v]*")
matches = pattern.finditer(text)
last_break = 0
pbreaks = [0]
for pb in matches:
if pb.start()-last_break < MIN_PARAGRAPH:
continue
else:
pbreaks.append(pb.start())
last_break = pb.start()
return pbreaks
def _divide_to_tokensequences(self, text):
"Divides the text into pseudosentences of fixed size"
w = self.w
wrdindex_list = []
matches = re.finditer("\w+", text)
for match in matches:
wrdindex_list.append((match.group(), match.start()))
return [TokenSequence(i/w, wrdindex_list[i:i+w])
for i in range(0, len(wrdindex_list), w)]
def _create_token_table(self, token_sequences, par_breaks):
"Creates a table of TokenTableFields"
token_table = {}
current_par = 0
current_tok_seq = 0
pb_iter = par_breaks.__iter__()
current_par_break = next(pb_iter)
if current_par_break == 0:
try:
current_par_break = next(pb_iter) #skip break at 0
except StopIteration:
raise ValueError(
"No paragraph breaks were found(text too short perhaps?)"
)
for ts in token_sequences:
for word, index in ts.wrdindex_list:
try:
while index > current_par_break:
current_par_break = next(pb_iter)
current_par += 1
except StopIteration:
#hit bottom
pass
if word in token_table:
token_table[word].total_count += 1
if token_table[word].last_par != current_par:
token_table[word].last_par = current_par
token_table[word].par_count += 1
if token_table[word].last_tok_seq != current_tok_seq:
token_table[word].last_tok_seq = current_tok_seq
token_table[word]\
.ts_occurences.append([current_tok_seq,1])
else:
token_table[word].ts_occurences[-1][1] += 1
else: #new word
token_table[word] = TokenTableField(first_pos=index,
ts_occurences= \
[[current_tok_seq,1]],
total_count=1,
par_count=1,
last_par=current_par,
last_tok_seq= \
current_tok_seq)
current_tok_seq += 1
return token_table
def _identify_boundaries(self, depth_scores):
"""Identifies boundaries at the peaks of similarity score
differences"""
boundaries = [0 for x in depth_scores]
avg = sum(depth_scores)/len(depth_scores)
stdev = numpy.std(depth_scores)
#SB: what is the purpose of this conditional?
if self.cutoff_policy == LC:
cutoff = avg-stdev/2.0
else:
cutoff = avg-stdev/2.0
depth_tuples = sorted(zip(depth_scores, range(len(depth_scores))))
depth_tuples.reverse()
hp = list(filter(lambda x:x[0]>cutoff, depth_tuples))
for dt in hp:
boundaries[dt[1]] = 1
for dt2 in hp: #undo if there is a boundary close already
if dt[1] != dt2[1] and abs(dt2[1]-dt[1]) < 4 \
and boundaries[dt2[1]] == 1:
boundaries[dt[1]] = 0
return boundaries
def _depth_scores(self, scores):
"""Calculates the depth of each gap, i.e. the average difference
between the left and right peaks and the gap's score"""
depth_scores = [0 for x in scores]
#clip boundaries: this holds on the rule of thumb(my thumb)
#that a section shouldn't be smaller than at least 2
#pseudosentences for small texts and around 5 for larger ones.
clip = min(max(len(scores)/10, 2), 5)
index = clip
for gapscore in scores[clip:-clip]:
lpeak = gapscore
for score in scores[index::-1]:
if score >= lpeak:
lpeak = score
else:
break
rpeak = gapscore
for score in scores[index:]:
if score >= rpeak:
rpeak = score
else:
break
depth_scores[index] = lpeak + rpeak - 2 * gapscore
index += 1
return depth_scores
def _normalize_boundaries(self, text, boundaries, paragraph_breaks):
"""Normalize the boundaries identified to the original text's
paragraph breaks"""
norm_boundaries = []
char_count, word_count, gaps_seen = 0, 0, 0
seen_word = False
for char in text:
char_count += 1
if char in " \t\n" and seen_word:
seen_word = False
word_count += 1
if char not in " \t\n" and not seen_word:
seen_word=True
if gaps_seen < len(boundaries) and word_count > \
(max(gaps_seen*self.w, self.w)):
if boundaries[gaps_seen] == 1:
#find closest paragraph break
best_fit = len(text)
for br in paragraph_breaks:
if best_fit > abs(br-char_count):
best_fit = abs(br-char_count)
bestbr = br
else:
break
if bestbr not in norm_boundaries: #avoid duplicates
norm_boundaries.append(bestbr)
gaps_seen += 1
return norm_boundaries
class TokenTableField(object):
"""A field in the token table holding parameters for each token,
used later in the process"""
def __init__(self,
first_pos,
ts_occurences,
total_count=1,
par_count=1,
last_par=0,
last_tok_seq=None):
self.__dict__.update(locals())
del self.__dict__['self']
class TokenSequence(object):
"A token list with its original length and its index"
def __init__(self,
index,
wrdindex_list,
original_length=None):
original_length=original_length or len(wrdindex_list)
self.__dict__.update(locals())
del self.__dict__['self']
#Pasted from the SciPy cookbook: http://www.scipy.org/Cookbook/SignalSmooth
def smooth(x,window_len=11,window='flat'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the beginning and end part of the output signal.
:param x: the input signal
:param window_len: the dimension of the smoothing window; should be an odd integer
:param window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
:return: the smoothed signal
example::
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
:see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve,
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w = numpy.ones(window_len,'d')
else:
w = eval('numpy.' + window + '(window_len)')
y = numpy.convolve(w/w.sum(), s, mode='same')
return y[window_len-1:-window_len+1]
def demo(text=None):
from nltk.corpus import brown
from matplotlib import pylab
tt = TextTilingTokenizer(demo_mode=True)
if text is None: text = brown.raw()[:10000]
s, ss, d, b = tt.tokenize(text)
pylab.xlabel("Sentence Gap index")
pylab.ylabel("Gap Scores")
pylab.plot(range(len(s)), s, label="Gap Scores")
pylab.plot(range(len(ss)), ss, label="Smoothed Gap scores")
pylab.plot(range(len(d)), d, label="Depth scores")
pylab.stem(range(len(b)), b)
pylab.legend()
pylab.show()
| gpl-3.0 |
bgyori/indra | indra/sources/dgi/api.py | 3 | 2882 | # -*- coding: utf-8 -*-
"""API for `Drug Gene Interaction DB <http://www.dgidb.org>`_."""
import logging
from typing import Optional, Set, Tuple
import pandas as pd
from .processor import DGIProcessor
logger = logging.getLogger(__name__)
USECOLS = [
"gene_name",
"entrez_id",
"interaction_claim_source",
"interaction_types",
"drug_name",
"drug_concept_id",
"PMIDs",
]
def process_version(
version: Optional[str] = None,
skip_databases: Optional[Set[str]] = None,
) -> DGIProcessor:
"""Get a processor that extracted INDRA Statements from DGI content.
Parameters
----------
version : Optional[str]
The optional version of DGI to use. If not given, the version is
automatically looked up.
skip_databases : Optional[set[str]]
A set of primary database sources to skip. If not given, DrugBank
is skipped since there is a dedicated module in INDRA for obtaining
DrugBank statements.
Returns
-------
dp : DGIProcessor
A DGI processor with pre-extracted INDRA statements
"""
version, df = get_version_df(version)
return process_df(df=df, version=version, skip_databases=skip_databases)
def process_df(
df: pd.DataFrame,
version: Optional[str] = None,
skip_databases: Optional[Set[str]] = None,
) -> DGIProcessor:
"""Get a processor that extracted INDRA Statements from DGI content based
on the given dataframe.
Parameters
----------
df : pd.DataFrame
A pandas DataFrame for the DGI interactions file.
version : Optional[str]
The optional version of DGI to use. If not given, statements will
not be annotated with a version number.
skip_databases : Optional[set[str]]
A set of primary database sources to skip. If not given, DrugBank
is skipped since there is a dedicated module in INDRA for obtaining
DrugBank statements.
Returns
-------
dp : DGIProcessor
A DGI processor with pre-extracted INDRA statements
"""
dp = DGIProcessor(df=df, version=version, skip_databases=skip_databases)
dp.extract_statements()
return dp
def get_version_df(version: Optional[str] = None) -> Tuple[str, pd.DataFrame]:
"""Get the latest version of the DGI interaction dataframe."""
if version is None:
try:
import bioversions
except ImportError:
version = None
else:
version = bioversions.get_version("Drug Gene Interaction Database")
if version is None:
version = "2021-Jan"
logger.warning(f"Could not find version with bioregistry, using"
f"version {version}.")
url = f"https://www.dgidb.org/data/monthly_tsvs/{version}/interactions.tsv"
df = pd.read_csv(url, usecols=USECOLS, sep="\t", dtype=str)
return version, df
| bsd-2-clause |
GaZ3ll3/scikit-image | doc/examples/plot_tinting_grayscale_images.py | 14 | 5336 | """
=========================
Tinting gray-scale images
=========================
It can be useful to artificially tint an image with some color, either to
highlight particular regions of an image or maybe just to liven up a grayscale
image. This example demonstrates image-tinting by scaling RGB values and by
adjusting colors in the HSV color-space.
In 2D, color images are often represented in RGB---3 layers of 2D arrays, where
the 3 layers represent (R)ed, (G)reen and (B)lue channels of the image. The
simplest way of getting a tinted image is to set each RGB channel to the
grayscale image scaled by a different multiplier for each channel. For example,
multiplying the green and blue channels by 0 leaves only the red channel and
produces a bright red image. Similarly, zeroing-out the blue channel leaves
only the red and green channels, which combine to form yellow.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage import color
from skimage import img_as_float
grayscale_image = img_as_float(data.camera()[::2, ::2])
image = color.gray2rgb(grayscale_image)
red_multiplier = [1, 0, 0]
yellow_multiplier = [1, 1, 0]
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4))
ax1.imshow(red_multiplier * image)
ax2.imshow(yellow_multiplier * image)
"""
.. image:: PLOT2RST.current_figure
In many cases, dealing with RGB values may not be ideal. Because of that, there
are many other `color spaces`_ in which you can represent a color image. One
popular color space is called HSV, which represents hue (~the color),
saturation (~colorfulness), and value (~brightness). For example, a color
(hue) might be green, but its saturation is how intense that green is---where
olive is on the low end and neon on the high end.
In some implementations, the hue in HSV goes from 0 to 360, since hues wrap
around in a circle. In scikit-image, however, hues are float values from 0 to
1, so that hue, saturation, and value all share the same scale.
.. _color spaces:
http://en.wikipedia.org/wiki/List_of_color_spaces_and_their_uses
Below, we plot a linear gradient in the hue, with the saturation and value
turned all the way up:
"""
import numpy as np
hue_gradient = np.linspace(0, 1)
hsv = np.ones(shape=(1, len(hue_gradient), 3), dtype=float)
hsv[:, :, 0] = hue_gradient
all_hues = color.hsv2rgb(hsv)
fig, ax = plt.subplots(figsize=(5, 2))
# Set image extent so hues go from 0 to 1 and the image is a nice aspect ratio.
ax.imshow(all_hues, extent=(0, 1, 0, 0.2))
ax.set_axis_off()
"""
.. image:: PLOT2RST.current_figure
Notice how the colors at the far left and far right are the same. That reflects
the fact that the hues wrap around like the color wheel (see HSV_ for more
info).
.. _HSV: http://en.wikipedia.org/wiki/HSL_and_HSV
Now, let's create a little utility function to take an RGB image and:
1. Transform the RGB image to HSV
2. Set the hue and saturation
3. Transform the HSV image back to RGB
"""
def colorize(image, hue, saturation=1):
""" Add color of the given hue to an RGB image.
By default, set the saturation to 1 so that the colors pop!
"""
hsv = color.rgb2hsv(image)
hsv[:, :, 1] = saturation
hsv[:, :, 0] = hue
return color.hsv2rgb(hsv)
"""
Notice that we need to bump up the saturation; images with zero saturation are
grayscale, so we need to a non-zero value to actually see the color we've set.
Using the function above, we plot six images with a linear gradient in the hue
and a non-zero saturation:
"""
hue_rotations = np.linspace(0, 1, 6)
fig, axes = plt.subplots(nrows=2, ncols=3)
for ax, hue in zip(axes.flat, hue_rotations):
# Turn down the saturation to give it that vintage look.
tinted_image = colorize(image, hue, saturation=0.3)
ax.imshow(tinted_image, vmin=0, vmax=1)
ax.set_axis_off()
fig.tight_layout()
"""
.. image:: PLOT2RST.current_figure
You can combine this tinting effect with numpy slicing and fancy-indexing to
selectively tint your images. In the example below, we set the hue of some
rectangles using slicing and scale the RGB values of some pixels found by
thresholding. In practice, you might want to define a region for tinting based
on segmentation results or blob detection methods.
"""
from skimage.filters import rank
# Square regions defined as slices over the first two dimensions.
top_left = (slice(100),) * 2
bottom_right = (slice(-100, None),) * 2
sliced_image = image.copy()
sliced_image[top_left] = colorize(image[top_left], 0.82, saturation=0.5)
sliced_image[bottom_right] = colorize(image[bottom_right], 0.5, saturation=0.5)
# Create a mask selecting regions with interesting texture.
noisy = rank.entropy(grayscale_image, np.ones((9, 9)))
textured_regions = noisy > 4
# Note that using `colorize` here is a bit more difficult, since `rgb2hsv`
# expects an RGB image (height x width x channel), but fancy-indexing returns
# a set of RGB pixels (# pixels x channel).
masked_image = image.copy()
masked_image[textured_regions, :] *= red_multiplier
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4))
ax1.imshow(sliced_image)
ax2.imshow(masked_image)
plt.show()
"""
.. image:: PLOT2RST.current_figure
For coloring multiple regions, you may also be interested in
`skimage.color.label2rgb <http://scikit-image.org/docs/0.9.x/api/skimage.color.html#label2rgb>`_.
"""
| bsd-3-clause |
pnedunuri/scikit-learn | examples/missing_values.py | 233 | 3056 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
ToFuProject/tofu | benchmarks/vmeshing_benchmark.py | 2 | 10811 | # External modules
import os
import timeit
import numpy as np
import matplotlib
import tofu.geom as tfg
import tofu.geom._GG as GG
import time
matplotlib.use("agg")
# Nose-specific
VerbHead = "tofu.geom.tests03_core"
keyVers = "Vers"
_Exp = "WEST"
def dir_path(string):
if os.path.isdir(string):
return string
else:
raise NotADirectoryError(string)
def bigger_test(here, resolution=0.02):
""" exactly like test13_get_sampleV(self) """
path = os.path.join(here, "tests03_core_data")
lf = os.listdir(path)
lf = [f for f in lf if all([s in f for s in [_Exp, ".txt"]])]
lCls = sorted(set([f.split("_")[1] for f in lf]))
dobj = {"Tor": {}} # , "Lin": {}}
for tt in dobj.keys():
for cc in lCls:
lfc = [f for f in lf if f.split("_")[1] == cc and "V0" in f]
ln = []
for f in lfc:
if "CoilCS" in f:
ln.append(f.split("_")[2].split(".")[0])
else:
ln.append(f.split("_")[2].split(".")[0])
lnu = sorted(set(ln))
if not len(lnu) == len(ln):
msg = "Non-unique name list for {0}:".format(cc)
msg += "\n ln = [{0}]".format(", ".join(ln))
msg += "\n lnu = [{0}]".format(", ".join(lnu))
raise Exception(msg)
dobj[tt][cc] = {}
for ii in range(0, len(ln)):
if "BumperOuter" in ln[ii]:
Lim = np.r_[10.0, 20.0] * np.pi / 180.0
elif "BumperInner" in ln[ii]:
t0 = np.arange(0, 360, 60) * np.pi / 180.0
Dt = 5.0 * np.pi / 180.0
Lim = (
t0[np.newaxis, :] + Dt * np.r_[-1.0,
1.0][:, np.newaxis]
) # noqa
elif "Ripple" in ln[ii]:
t0 = np.arange(0, 360, 30) * np.pi / 180.0
Dt = 2.5 * np.pi / 180.0
Lim = (
t0[np.newaxis, :] + Dt * np.r_[-1.0,
1.0][:, np.newaxis]
) # noqa
elif "IC" in ln[ii]:
t0 = np.arange(0, 360, 120) * np.pi / 180.0
Dt = 10.0 * np.pi / 180.0
Lim = (
t0[np.newaxis, :] + Dt * np.r_[-1.0,
1.0][:, np.newaxis]
) # noqa
elif "LH" in ln[ii]:
t0 = np.arange(-180, 180, 120) * np.pi / 180.0
Dt = 10.0 * np.pi / 180.0
Lim = (
t0[np.newaxis, :] + Dt * np.r_[-1.0,
1.0][:, np.newaxis]
) # noqa
elif tt == "Lin":
Lim = np.r_[0.0, 10.0]
else:
Lim = None
Poly = np.loadtxt(os.path.join(path, lfc[ii]))
assert Poly.ndim == 2
assert Poly.size >= 2 * 3
kwd = dict( # noqa
Name=ln[ii] + tt,
Exp=_Exp,
SavePath=here,
Poly=Poly,
Lim=Lim,
Type=tt,
) # noqa
dobj[tt][cc][ln[ii]] = eval("tfg.%s(**kwd)" % cc)
for typ in dobj.keys():
# Todo : introduce possibility of choosing In coordinates !
for c in dobj[typ].keys():
if issubclass(eval('tfg.%s' % c), tfg._core.StructOut):
continue
for n in dobj[typ][c].keys():
print("\n For type = " + str(typ) + " c = " + str(c)
+ " n = ", n)
obj = dobj[typ][c][n]
print("obj = ", obj)
box = None # [[2.,3.], [0.,5.], [0.,np.pi/2.]]
try:
ii = 0
reso = resolution
start = time.perf_counter()
out = obj.get_sampleV(reso, resMode='abs', DV=box,
Out='(X,Y,Z)')
print("NEW sample V total time = ",
time.perf_counter() - start)
pts0, ind = out[0], out[2]
start = time.perf_counter()
out = obj.get_sampleV(reso, resMode='abs', DV=box,
Out='(X,Y,Z)', algo="old")
print("OLD sample V total time = ",
time.perf_counter() - start)
pts1, ind1 = out[0], out[2]
assert np.allclose(ind1, ind)
ii = 1
start = time.perf_counter()
out = obj.get_sampleV(reso, resMode='abs', ind=ind,
Out='(X,Y,Z)', num_threads=48)
print("NEW sample V total time = ",
time.perf_counter() - start)
pts4 = out[0]
start = time.perf_counter()
out = obj.get_sampleV(reso, resMode='abs', ind=ind,
Out='(X,Y,Z)', algo="old")
print("OLD sample V total time = ",
time.perf_counter() - start)
pts3 = out[0]
except Exception as err:
msg = str(err)
msg += "\nFailed for {0}_{1}_{2}".format(typ, c, n)
msg += "\n ii={0}".format(ii)
msg += "\n Lim={0}".format(str(obj.Lim))
msg += "\n DS={0}".format(str(box))
raise Exception(msg)
if type(pts0) is list:
# assert all([np.allclose(pts0[ii], pts1[ii])
# for ii in range(0, len(pts0))])
assert all([np.allclose(pts3[ii], pts4[ii])
for ii in range(0, len(pts3))])
assert all([np.allclose(pts0[ii], pts4[ii])
for ii in range(0, len(pts0))])
else:
# assert np.allclose(pts0, pts1)
assert np.allclose(pts3, pts4)
assert np.allclose(pts0, pts4)
def small_test():
"""Test vmesh"""
# VPoly
thet = np.linspace(0.0, 2.0 * np.pi, 100)
VPoly = np.array([2.0 + 1.0 * np.cos(thet), 0.0 + 1.0 * np.sin(thet)])
RMinMax = np.array([np.min(VPoly[0, :]), np.max(VPoly[0, :])])
ZMinMax = np.array([np.min(VPoly[1, :]), np.max(VPoly[1, :])])
dR, dZ, dRPhi = 0.025, 0.025, 0.025
LDPhi = [
None, # noqa
[3.0 * np.pi / 4.0, 5.0 * np.pi / 4.0], # noqa
[-np.pi / 4.0, np.pi / 4.0],
] # noqa
for ii in range(0, len(LDPhi)):
Pts, dV, ind, dRr, dZr, dRPhir = GG._Ves_Vmesh_Tor_SubFromD_cython(
dR,
dZ,
dRPhi,
RMinMax,
ZMinMax,
DR=np.array([0.5, 2.0]),
DZ=np.array([0.0, 1.2]),
DPhi=LDPhi[ii],
VPoly=VPoly,
Out="(R,Z,Phi)",
margin=1.0e-9,
)
assert Pts.ndim == 2 and Pts.shape[0] == 3
assert np.all(Pts[0, :] >= 1.0)
assert np.all(Pts[0, :] <= 2.0)
assert np.all(Pts[1, :] >= 0.0)
assert np.all(Pts[1, :] <= 1.0)
marg = np.abs(np.arctan(np.mean(dRPhir) / np.min(VPoly[1, :])))
if not LDPhi[ii] is None:
LDPhi[ii][0] = np.arctan2(
np.sin(LDPhi[ii][0]), np.cos(LDPhi[ii][0])
) # noqa
LDPhi[ii][1] = np.arctan2(
np.sin(LDPhi[ii][1]), np.cos(LDPhi[ii][1])
) # noqa
if LDPhi[ii][0] <= LDPhi[ii][1]:
assert np.all(Pts[2, :] >= LDPhi[ii][0] - marg)
assert np.all(Pts[2, :] <= LDPhi[ii][1] + marg)
else:
assert np.all(
(Pts[2, :] >= LDPhi[ii][0] - marg) # noqa
| (Pts[2, :] <= LDPhi[ii][1] + marg) # noqa
)
assert dV.shape == (Pts.shape[1],)
assert ind.shape == (Pts.shape[1],)
assert ind.dtype == int
assert np.unique(ind).size == ind.size
assert np.all(ind == np.unique(ind))
assert np.all(ind >= 0)
assert all(
[
ind.shape == (Pts.shape[1],),
ind.dtype == int,
np.unique(ind).size == ind.size,
np.all(ind == np.unique(ind)),
np.all(ind >= 0),
]
)
assert dRPhir.ndim == 1
Ptsi, dVi, dRri, dZri, dRPhiri = GG._Ves_Vmesh_Tor_SubFromInd_cython(
dR, dZ, dRPhi, RMinMax, ZMinMax, ind, Out="(R,Z,Phi)",
margin=1.0e-9
)
assert np.allclose(Pts, Ptsi)
assert np.allclose(dV, dVi)
assert dRr == dRri and dZr == dZri
assert np.allclose(dRPhir, dRPhiri)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Testing vmesh algo")
parser.add_argument(
"-m",
"--mode",
help="small, big or timeit",
required=False,
choices=["big", "small"],
default="small",
)
parser.add_argument('--timeit', dest='timeit', action='store_true')
parser.add_argument('--no-timeit', dest='timeit', action='store_false')
parser.add_argument('--path', type=dir_path)
parser.add_argument('--reso', type=float)
parser.set_defaults(timeit=False)
args = parser.parse_args()
if args.path:
here = args.path
else:
here = os.path.abspath(os.path.dirname(__file__))
if args.reso:
resolution = args.reso
else:
resolution = 0.02
print(".-.-.-.-.-.-.-. ", args.mode, " .-.-.-.-.-.-.-.-")
if args.mode.lower() == "small":
if args.timeit:
print(
timeit.timeit(
"small_test()",
setup="from __main__ import small_test",
number=500, # fmt: off
)
)
else:
small_test()
elif args.mode.lower() == "big":
if args.timeit:
print(
timeit.timeit(
"bigger_test()",
setup="from __main__ import bigger_test",
number=50, # fmt: off
)
)
else:
print(".................... ONE CALL ...................")
bigger_test(here, resolution)
| mit |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/pylab_examples/demo_agg_filter.py | 3 | 9340 | import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
def smooth1d(x, window_len):
# copied from http://www.scipy.org/Cookbook/SignalSmooth
s=np.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
w = np.hanning(window_len)
y=np.convolve(w/w.sum(),s,mode='same')
return y[window_len-1:-window_len+1]
def smooth2d(A, sigma=3):
window_len = max(int(sigma), 3)*2+1
A1 = np.array([smooth1d(x, window_len) for x in np.asarray(A)])
A2 = np.transpose(A1)
A3 = np.array([smooth1d(x, window_len) for x in A2])
A4 = np.transpose(A3)
return A4
class BaseFilter(object):
def prepare_image(self, src_image, dpi, pad):
ny, nx, depth = src_image.shape
#tgt_image = np.zeros([pad*2+ny, pad*2+nx, depth], dtype="d")
padded_src = np.zeros([pad*2+ny, pad*2+nx, depth], dtype="d")
padded_src[pad:-pad, pad:-pad,:] = src_image[:,:,:]
return padded_src#, tgt_image
def get_pad(self, dpi):
return 0
def __call__(self, im, dpi):
pad = self.get_pad(dpi)
padded_src = self.prepare_image(im, dpi, pad)
tgt_image = self.process_image(padded_src, dpi)
return tgt_image, -pad, -pad
class OffsetFilter(BaseFilter):
def __init__(self, offsets=None):
if offsets is None:
self.offsets = (0, 0)
else:
self.offsets = offsets
def get_pad(self, dpi):
return int(max(*self.offsets)/72.*dpi)
def process_image(self, padded_src, dpi):
ox, oy = self.offsets
a1 = np.roll(padded_src, int(ox/72.*dpi), axis=1)
a2 = np.roll(a1, -int(oy/72.*dpi), axis=0)
return a2
class GaussianFilter(BaseFilter):
"simple gauss filter"
def __init__(self, sigma, alpha=0.5, color=None):
self.sigma = sigma
self.alpha = alpha
if color is None:
self.color=(0, 0, 0)
else:
self.color=color
def get_pad(self, dpi):
return int(self.sigma*3/72.*dpi)
def process_image(self, padded_src, dpi):
#offsetx, offsety = int(self.offsets[0]), int(self.offsets[1])
tgt_image = np.zeros_like(padded_src)
aa = smooth2d(padded_src[:,:,-1]*self.alpha,
self.sigma/72.*dpi)
tgt_image[:,:,-1] = aa
tgt_image[:,:,:-1] = self.color
return tgt_image
class DropShadowFilter(BaseFilter):
def __init__(self, sigma, alpha=0.3, color=None, offsets=None):
self.gauss_filter = GaussianFilter(sigma, alpha, color)
self.offset_filter = OffsetFilter(offsets)
def get_pad(self, dpi):
return max(self.gauss_filter.get_pad(dpi),
self.offset_filter.get_pad(dpi))
def process_image(self, padded_src, dpi):
t1 = self.gauss_filter.process_image(padded_src, dpi)
t2 = self.offset_filter.process_image(t1, dpi)
return t2
from matplotlib.colors import LightSource
class LightFilter(BaseFilter):
"simple gauss filter"
def __init__(self, sigma, fraction=0.5):
self.gauss_filter = GaussianFilter(sigma, alpha=1)
self.light_source = LightSource()
self.fraction = fraction
#hsv_min_val=0.5,hsv_max_val=0.9,
# hsv_min_sat=0.1,hsv_max_sat=0.1)
def get_pad(self, dpi):
return self.gauss_filter.get_pad(dpi)
def process_image(self, padded_src, dpi):
t1 = self.gauss_filter.process_image(padded_src, dpi)
elevation = t1[:,:,3]
rgb = padded_src[:,:,:3]
rgb2 = self.light_source.shade_rgb(rgb, elevation,
fraction=self.fraction)
tgt = np.empty_like(padded_src)
tgt[:,:,:3] = rgb2
tgt[:,:,3] = padded_src[:,:,3]
return tgt
class GrowFilter(BaseFilter):
"enlarge the area"
def __init__(self, pixels, color=None):
self.pixels = pixels
if color is None:
self.color=(1, 1, 1)
else:
self.color=color
def __call__(self, im, dpi):
pad = self.pixels
ny, nx, depth = im.shape
new_im = np.empty([pad*2+ny, pad*2+nx, depth], dtype="d")
alpha = new_im[:,:,3]
alpha.fill(0)
alpha[pad:-pad, pad:-pad] = im[:,:,-1]
alpha2 = np.clip(smooth2d(alpha, self.pixels/72.*dpi) * 5, 0, 1)
new_im[:,:,-1] = alpha2
new_im[:,:,:-1] = self.color
offsetx, offsety = -pad, -pad
return new_im, offsetx, offsety
from matplotlib.artist import Artist
class FilteredArtistList(Artist):
"""
A simple container to draw filtered artist.
"""
def __init__(self, artist_list, filter):
self._artist_list = artist_list
self._filter = filter
Artist.__init__(self)
def draw(self, renderer):
renderer.start_rasterizing()
renderer.start_filter()
for a in self._artist_list:
a.draw(renderer)
renderer.stop_filter(self._filter)
renderer.stop_rasterizing()
import matplotlib.transforms as mtransforms
def filtered_text(ax):
# mostly copied from contour_demo.py
# prepare image
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
# difference of Gaussians
Z = 10.0 * (Z2 - Z1)
# draw
im = ax.imshow(Z, interpolation='bilinear', origin='lower',
cmap=cm.gray, extent=(-3,3,-2,2))
levels = np.arange(-1.2, 1.6, 0.2)
CS = ax.contour(Z, levels,
origin='lower',
linewidths=2,
extent=(-3,3,-2,2))
ax.set_aspect("auto")
# contour label
cl = ax.clabel(CS, levels[1::2], # label every second level
inline=1,
fmt='%1.1f',
fontsize=11)
# change clable color to black
from matplotlib.patheffects import Normal
for t in cl:
t.set_color("k")
t.set_path_effects([Normal()]) # to force TextPath (i.e., same font in all backends)
# Add white glows to improve visibility of labels.
white_glows = FilteredArtistList(cl, GrowFilter(3))
ax.add_artist(white_glows)
white_glows.set_zorder(cl[0].get_zorder()-0.1)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
def drop_shadow_line(ax):
# copyed from examples/misc/svg_filter_line.py
# draw lines
l1, = ax.plot([0.1, 0.5, 0.9], [0.1, 0.9, 0.5], "bo-",
mec="b", mfc="w", lw=5, mew=3, ms=10, label="Line 1")
l2, = ax.plot([0.1, 0.5, 0.9], [0.5, 0.2, 0.7], "ro-",
mec="r", mfc="w", lw=5, mew=3, ms=10, label="Line 1")
gauss = DropShadowFilter(4)
for l in [l1, l2]:
# draw shadows with same lines with slight offset.
xx = l.get_xdata()
yy = l.get_ydata()
shadow, = ax.plot(xx, yy)
shadow.update_from(l)
# offset transform
ot = mtransforms.offset_copy(l.get_transform(), ax.figure,
x=4.0, y=-6.0, units='points')
shadow.set_transform(ot)
# adjust zorder of the shadow lines so that it is drawn below the
# original lines
shadow.set_zorder(l.get_zorder()-0.5)
shadow.set_agg_filter(gauss)
shadow.set_rasterized(True) # to support mixed-mode renderers
ax.set_xlim(0., 1.)
ax.set_ylim(0., 1.)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
def drop_shadow_patches(ax):
# copyed from barchart_demo.py
N = 5
menMeans = (20, 35, 30, 35, 27)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
rects1 = ax.bar(ind, menMeans, width, color='r', ec="w", lw=2)
womenMeans = (25, 32, 34, 20, 25)
rects2 = ax.bar(ind+width+0.1, womenMeans, width, color='y', ec="w", lw=2)
#gauss = GaussianFilter(1.5, offsets=(1,1), )
gauss = DropShadowFilter(5, offsets=(1,1), )
shadow = FilteredArtistList(rects1+rects2, gauss)
ax.add_artist(shadow)
shadow.set_zorder(rects1[0].get_zorder()-0.1)
ax.set_xlim(ind[0]-0.5, ind[-1]+1.5)
ax.set_ylim(0, 40)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
def light_filter_pie(ax):
fracs = [15,30,45, 10]
explode=(0, 0.05, 0, 0)
pies = ax.pie(fracs, explode=explode)
ax.patch.set_visible(True)
light_filter = LightFilter(9)
for p in pies[0]:
p.set_agg_filter(light_filter)
p.set_rasterized(True) # to support mixed-mode renderers
p.set(ec="none",
lw=2)
gauss = DropShadowFilter(9, offsets=(3,4), alpha=0.7)
shadow = FilteredArtistList(pies[0], gauss)
ax.add_artist(shadow)
shadow.set_zorder(pies[0][0].get_zorder()-0.1)
if 1:
plt.figure(1, figsize=(6, 6))
plt.subplots_adjust(left=0.05, right=0.95)
ax = plt.subplot(221)
filtered_text(ax)
ax = plt.subplot(222)
drop_shadow_line(ax)
ax = plt.subplot(223)
drop_shadow_patches(ax)
ax = plt.subplot(224)
ax.set_aspect(1)
light_filter_pie(ax)
ax.set_frame_on(True)
plt.show()
| gpl-2.0 |
EtienneCmb/tensorpac | paper/manuscript/code/fig_4_pac_corrected.py | 1 | 1987 | """Rectify PAC estimation by surrogate distribution."""
import json
with open("../../paper.json", 'r') as f: cfg = json.load(f) # noqa
from tensorpac.signals import pac_signals_tort, pac_signals_wavelet
from tensorpac import Pac
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn-poster')
sns.set_style("white")
plt.rc('font', family=cfg["font"])
###############################################################################
sf = 1024
n_epochs = 50
n_times = 3000
n_perm = 20
###############################################################################
data, time = pac_signals_wavelet(sf=sf, f_pha=10, f_amp=100, noise=3.,
n_epochs=n_epochs, n_times=n_times)
p = Pac(idpac=(1, 2, 3), f_pha='hres', f_amp='hres')
xpac = p.filterfit(sf, data, n_perm=n_perm, p=.05, n_jobs=-1, random_state=0)
pacn = p.pac.mean(-1)
pac = xpac.mean(-1)
surro = p.surrogates.mean(0).max(-1) # mean(perm).max(epochs)
kw_plt = dict(fz_labels=20, fz_title=22, fz_cblabel=20)
plt.figure(figsize=(20, 6))
plt.subplot(1, 3, 1)
p.comodulogram(pacn, cblabel="", title="Uncorrected PAC", cmap=cfg["cmap"],
vmin=0, **kw_plt)
ax = plt.gca()
ax.text(*tuple(cfg["nb_pos"]), 'A', transform=ax.transAxes, **cfg["nb_cfg"])
plt.subplot(1, 3, 2)
p.comodulogram(surro, cblabel="", title="Mean of the surrogate\ndistribution",
cmap=cfg["cmap"], vmin=0, **kw_plt)
plt.ylabel("")
# plt.tick_params(axis='y', which='both', labelleft=False)
ax = plt.gca()
ax.text(*tuple(cfg["nb_pos"]), 'B', transform=ax.transAxes, **cfg["nb_cfg"])
plt.subplot(1, 3, 3)
p.comodulogram(pac, cblabel="", title="Corrected PAC", cmap=cfg["cmap"],
vmin=0, **kw_plt)
plt.ylabel("")
# plt.tick_params(axis='y', which='both', labelleft=False)
ax = plt.gca()
ax.text(*tuple(cfg["nb_pos"]), 'C', transform=ax.transAxes, **cfg["nb_cfg"])
plt.tight_layout()
plt.savefig(f"../figures/Fig4.png", dpi=300, bbox_inches='tight')
p.show() | bsd-3-clause |
passoir/trading-with-python | lib/widgets.py | 78 | 3012 | # -*- coding: utf-8 -*-
"""
A collection of widgets for gui building
Copyright: Jev Kuznetsov
License: BSD
"""
from __future__ import division
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import numpy as np
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
class MatplotlibWidget(QWidget):
def __init__(self,parent=None,grid=True):
QWidget.__init__(self,parent)
self.grid = grid
self.fig = Figure()
self.canvas =FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.mpl_connect('button_press_event', self.onPick) # bind pick event
#self.axes = self.fig.add_subplot(111)
margins = [0.05,0.1,0.9,0.8]
self.axes = self.fig.add_axes(margins)
self.toolbar = NavigationToolbar(self.canvas,self)
#self.initFigure()
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
def onPick(self,event):
print 'Pick event'
print 'you pressed', event.button, event.xdata, event.ydata
def update(self):
self.canvas.draw()
def plot(self,*args,**kwargs):
self.axes.plot(*args,**kwargs)
self.axes.grid(self.grid)
self.update()
def clear(self):
self.axes.clear()
def initFigure(self):
self.axes.grid(True)
x = np.linspace(-1,1)
y = x**2
self.axes.plot(x,y,'o-')
class PlotWindow(QMainWindow):
''' a stand-alone window with embedded matplotlib widget '''
def __init__(self,parent=None):
super(PlotWindow,self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.mplWidget = MatplotlibWidget()
self.setCentralWidget(self.mplWidget)
def plot(self,dataFrame):
''' plot dataframe '''
dataFrame.plot(ax=self.mplWidget.axes)
def getAxes(self):
return self.mplWidget.axes
def getFigure(self):
return self.mplWidget.fig
def update(self):
self.mplWidget.update()
class MainForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Demo: PyQt with matplotlib')
self.plot = MatplotlibWidget()
self.setCentralWidget(self.plot)
self.plot.clear()
self.plot.plot(np.random.rand(10),'x-')
#---------------------
if __name__=='__main__':
app = QApplication(sys.argv)
form = MainForm()
form.show()
app.exec_() | bsd-3-clause |
PatrickChrist/scikit-learn | sklearn/cluster/birch.py | 207 | 22706 | # Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
chutsu/robotics | prototype/tests/vision/test_homography.py | 1 | 1380 | import unittest
import numpy as np
import matplotlib.pylab as plt
from PIL import Image
from scipy import ndimage
from prototype.vision.common import rand3dfeatures
from prototype.vision.homography import affine_transformation
class HomographyTest(unittest.TestCase):
# def test_sandbox(self):
# with Image.open("data/empire/empire.jpg") as img:
# im1 = np.array(img.convert("L"))
#
# H = np.array([[1.4, 0.05, -100],
# [0.05, 1.5, -100],
# [0, 0, 1]])
#
# im2 = ndimage.affine_transform(
# im1,
# H[:2, :2],
# (H[0, 2], H[1, 2])
# )
#
# plt.figure()
# plt.gray()
# plt.imshow(im2)
# plt.show()
def test_condition_points(self):
nb_features = 10
feature_bounds = {
"x": {"min": -1.0, "max": 1.0},
"y": {"min": -1.0, "max": 1.0},
"z": {"min": -1.0, "max": 1.0}
}
features = rand3dfeatures(nb_features, feature_bounds)
fp = np.array(features)
m = np.mean(fp[:2], axis=1)
maxstd = max(np.std(fp[:2], axis=1)) + 1e-9
C1 = np.diag([1 / maxstd, 1 / maxstd, 1])
C1[0][2] = -m[0] / maxstd
C1[1][2] = -m[1] / maxstd
fp = np.dot(C1, fp)
| gpl-3.0 |
willettk/rgz-analysis | python/elliptical_histogram.py | 2 | 2268 | import numpy as np
from matplotlib import pyplot as plt
from astropy.io import fits
plt.ion()
rgz_dir = '/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis'
# Plot the normalized histogram of (W2-W3) colors for RGZ and WISE all-sky sources
filenames = ['%s/fits/%s.fits' % (rgz_dir,x) for x in ('wise_allsky_2M','gurkan_all','rgz_75_wise')]
labels = ('WISE all-sky sources','Gurkan+14 radio galaxies','RGZ 75% radio galaxies')
wise_snr = 5.
with fits.open(filenames[0]) as f:
d = f[1].data
maglim_w1 = (d['w1snr'] > wise_snr)
maglim_w2 = (d['w2snr'] > wise_snr)
maglim_w3 = (d['w3snr'] > wise_snr)
wise = d[maglim_w1 & maglim_w2 & maglim_w3]
with fits.open(filenames[2]) as f:
d = f[1].data
rgz75 = d['ratio'] >= 0.75
snr_w1 = (d['snr1'] >= wise_snr)
snr_w2 = (d['snr2'] >= wise_snr)
snr_w3 = (d['snr3'] >= wise_snr)
rgz = d[rgz75 & snr_w1 & snr_w2 & snr_w3]
# Cut both
wise_ell = wise[((wise['w2mpro'] - wise['w3mpro']) > -0.5) & ((wise['w2mpro'] - wise['w3mpro']) < 1.5)]
rgz_ell = rgz[((rgz['w2mpro'] - rgz['w3mpro']) > -0.5) & ((rgz['w2mpro'] - rgz['w3mpro']) < 1.5)]
fig = plt.figure(2,(8,8))
ax1 = fig.add_subplot(111)
from astroML.plotting import hist as histML
c1 = '#e41a1c'
c2 = '#377eb8'
c2 = '#a6cee3'
c3 = '#386cb0'
histML(wise_ell['w2mpro'] - wise_ell['w3mpro'], bins=25, ax=ax1, histtype='stepfilled', alpha=0.5, color=c2, weights=np.zeros(len(wise_ell)) + 1./len(wise_ell), range=(-0.5,1.5),label='WISE all-sky')
histML(rgz_ell['w2mpro'] - rgz_ell['w3mpro'], bins=25, ax=ax1, histtype='step', linewidth=3, color=c1, weights=np.zeros(len(rgz_ell)) + 1./len(rgz_ell), range=(-0.5,1.5),label='RGZ 75%')
ax1.set_ylim(0,0.30)
ax1.vlines(x=np.median(wise_ell['w2mpro'] - wise_ell['w3mpro']),ymin=ax1.get_ylim()[0],ymax = ax1.get_ylim()[1],color=c3,linestyle='--')
ax1.vlines(x=np.median(rgz_ell['w2mpro'] - rgz_ell['w3mpro']),ymin=ax1.get_ylim()[0],ymax = ax1.get_ylim()[1],color=c1,linestyle='-')
ax1.set_xlabel(r'$(W2-W3)$',fontsize=24)
ax1.set_ylabel('normalized count',fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=20)
ax1.legend()
#plt.show()
fig.savefig('/Users/willettk/Astronomy/Research/GalaxyZoo/radiogalaxyzoo/paper/figures/elliptical_histogram.eps')
| mit |
jhamman/xray | doc/gallery/plot_rasterio.py | 2 | 1669 | # -*- coding: utf-8 -*-
"""
.. _recipes.rasterio:
=================================
Parsing rasterio's geocoordinates
=================================
Converting a projection's cartesian coordinates into 2D longitudes and
latitudes.
These new coordinates might be handy for plotting and indexing, but it should
be kept in mind that a grid which is regular in projection coordinates will
likely be irregular in lon/lat. It is often recommended to work in the data's
original map projection.
"""
import os
import urllib.request
import numpy as np
import xarray as xr
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from rasterio.warp import transform
# Download the file from rasterio's repository
url = 'https://github.com/mapbox/rasterio/raw/master/tests/data/RGB.byte.tif'
urllib.request.urlretrieve(url, 'RGB.byte.tif')
# Read the data
da = xr.open_rasterio('RGB.byte.tif')
# Compute the lon/lat coordinates with rasterio.warp.transform
ny, nx = len(da['y']), len(da['x'])
x, y = np.meshgrid(da['x'], da['y'])
# Rasterio works with 1D arrays
lon, lat = transform(da.crs, {'init': 'EPSG:4326'},
x.flatten(), y.flatten())
lon = np.asarray(lon).reshape((ny, nx))
lat = np.asarray(lat).reshape((ny, nx))
da.coords['lon'] = (('y', 'x'), lon)
da.coords['lat'] = (('y', 'x'), lat)
# Compute a greyscale out of the rgb image
greyscale = da.mean(dim='band')
# Plot on a map
ax = plt.subplot(projection=ccrs.PlateCarree())
greyscale.plot(ax=ax, x='lon', y='lat', transform=ccrs.PlateCarree(),
cmap='Greys_r', add_colorbar=False)
ax.coastlines('10m', color='r')
plt.show()
# Delete the file
os.remove('RGB.byte.tif')
| apache-2.0 |
tomlof/scikit-learn | examples/linear_model/plot_ard.py | 32 | 3912 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
We also plot predictions and uncertainties for ARD
for one dimensional regression using polynomial feature expansion.
Note the uncertainty starts going up on the right side of the plot.
This is because these test samples are outside of the range of the training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights, the histogram of the
# weights, and predictions with standard deviations
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2,
label="ARD estimate")
plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2,
label="OLS estimate")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='navy', log=True)
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='gold', marker='o', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=2)
plt.ylabel("Score")
plt.xlabel("Iterations")
# Plotting some predictions for polynomial regression
def f(x, noise_amount):
y = np.sqrt(x) * np.sin(x)
noise = np.random.normal(0, 1, len(x))
return y + noise_amount * noise
degree = 10
X = np.linspace(0, 10, 100)
y = f(X, noise_amount=1)
clf_poly = ARDRegression(threshold_lambda=1e5)
clf_poly.fit(np.vander(X, degree), y)
X_plot = np.linspace(0, 11, 25)
y_plot = f(X_plot, noise_amount=0)
y_mean, y_std = clf_poly.predict(np.vander(X_plot, degree), return_std=True)
plt.figure(figsize=(6, 5))
plt.errorbar(X_plot, y_mean, y_std, color='navy',
label="Polynomial ARD", linewidth=2)
plt.plot(X_plot, y_plot, color='gold', linewidth=2,
label="Ground Truth")
plt.ylabel("Output y")
plt.xlabel("Feature X")
plt.legend(loc="lower left")
plt.show()
| bsd-3-clause |
tdhopper/scikit-learn | examples/model_selection/plot_precision_recall.py | 249 | 6150 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
ephes/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
ratnania/caid | examples/predefined_geometries_2d.py | 1 | 1344 | from matplotlib import pyplot as plt
import numpy as np
# ... creates a unit square of degree (2,2) and 4 elements in each direction
from caid.cad_geometry import square
geo = square(n=[3,3], p=[2,2])
geo.plotMesh(MeshResolution=10)
plt.savefig("predefined_geometries_2d_square.png")
# ...
plt.clf()
# ... creates a unit circle of degree (2,2) and (8,4) elements in each direction
from caid.cad_geometry import circle
geo = circle(n=[7,3], p=[2,2])
geo.plotMesh(MeshResolution=10)
plt.savefig("predefined_geometries_2d_circle.png")
# ...
plt.clf()
# ... creates a unit quart_circle of degree (2,2) and 4 elements in each direction
from caid.cad_geometry import quart_circle
geo = quart_circle(n=[3,3], p=[2,2])
geo.plotMesh(MeshResolution=10)
plt.savefig("predefined_geometries_2d_quart_circle.png")
# ...
plt.clf()
# ... creates a unit annulus of degree (2,2) and 4 elements in each direction
from caid.cad_geometry import annulus
geo = annulus(n=[7,7], p=[2,2])
geo.plotMesh(MeshResolution=10)
plt.savefig("predefined_geometries_2d_annulus.png")
# ...
plt.clf()
# ... creates a unit circle with 5 patchs, of degree (2,2) and (4,4) elements in each direction
from caid.cad_geometry import circle_5mp as circle
geo = circle(n=[7,7], p=[2,2])
geo.plotMesh(MeshResolution=10)
plt.savefig("predefined_geometries_2d_circle_5mp.png")
# ...
| mit |
PatrickChrist/scikit-learn | sklearn/utils/estimator_checks.py | 33 | 48331 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature(s) (shape=(3, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
arjoly/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/matplotlib/tests/test_bbox_tight.py | 4 | 3861 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from distutils.version import LooseVersion
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import numpy as np
from matplotlib import rcParams
from matplotlib.testing.decorators import image_comparison
from matplotlib.testing.noseclasses import KnownFailureTest
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import matplotlib.patches as mpatches
from matplotlib.ticker import FuncFormatter
@image_comparison(baseline_images=['bbox_inches_tight'], remove_text=True,
savefig_kwarg=dict(bbox_inches='tight'), tol=15)
def test_bbox_inches_tight():
#: Test that a figure saved using bbox_inches='tight' is clipped correctly
data = [[ 66386, 174296, 75131, 577908, 32015],
[ 58230, 381139, 78045, 99308, 160454],
[ 89135, 80552, 152558, 497981, 603535],
[ 78415, 81858, 150656, 193263, 69638],
[139361, 331509, 343164, 781380, 52269]]
colLabels = rowLabels = [''] * 5
rows = len(data)
ind = np.arange(len(colLabels)) + 0.3 # the x locations for the groups
cellText = []
width = 0.4 # the width of the bars
yoff = np.array([0.0] * len(colLabels))
# the bottom values for stacked bar chart
fig, ax = plt.subplots(1, 1)
for row in xrange(rows):
plt.bar(ind, data[row], width, bottom=yoff)
yoff = yoff + data[row]
cellText.append([''])
plt.xticks([])
plt.legend([''] * 5, loc=(1.2, 0.2))
# Add a table at the bottom of the axes
cellText.reverse()
the_table = plt.table(cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels, loc='bottom')
@image_comparison(baseline_images=['bbox_inches_tight_suptile_legend'],
remove_text=False, savefig_kwarg={'bbox_inches': 'tight'})
def test_bbox_inches_tight_suptile_legend():
plt.plot(list(xrange(10)), label='a straight line')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, )
plt.title('Axis title')
plt.suptitle('Figure title')
# put an extra long y tick on to see that the bbox is accounted for
def y_formatter(y, pos):
if int(y) == 4:
return 'The number 4'
else:
return str(y)
plt.gca().yaxis.set_major_formatter(FuncFormatter(y_formatter))
plt.xlabel('X axis')
@image_comparison(baseline_images=['bbox_inches_tight_clipping'],
remove_text=True, savefig_kwarg={'bbox_inches': 'tight'})
def test_bbox_inches_tight_clipping():
# tests bbox clipping on scatter points, and path clipping on a patch
# to generate an appropriately tight bbox
plt.scatter(list(xrange(10)), list(xrange(10)))
ax = plt.gca()
ax.set_xlim([0, 5])
ax.set_ylim([0, 5])
# make a massive rectangle and clip it with a path
patch = mpatches.Rectangle([-50, -50], 100, 100,
transform=ax.transData,
facecolor='blue', alpha=0.5)
path = mpath.Path.unit_regular_star(5).deepcopy()
path.vertices *= 0.25
patch.set_clip_path(path, transform=ax.transAxes)
plt.gcf().artists.append(patch)
@image_comparison(baseline_images=['bbox_inches_tight_raster'],
remove_text=True, savefig_kwarg={'bbox_inches': 'tight'})
def test_bbox_inches_tight_raster():
"""Test rasterization with tight_layout"""
if LooseVersion(np.__version__) >= LooseVersion('1.11.0'):
raise KnownFailureTest("Fall out from a fixed numpy bug")
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1.0, 2.0], rasterized=True)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
shoyer/xarray | xarray/tests/test_conventions.py | 1 | 13082 | import contextlib
import warnings
import numpy as np
import pandas as pd
import pytest
from xarray import (
Dataset,
SerializationWarning,
Variable,
coding,
conventions,
open_dataset,
)
from xarray.backends.common import WritableCFDataStore
from xarray.backends.memory import InMemoryDataStore
from xarray.conventions import decode_cf
from xarray.testing import assert_identical
from . import (
assert_array_equal,
raises_regex,
requires_cftime,
requires_dask,
requires_netCDF4,
)
from .test_backends import CFEncodedBase
class TestBoolTypeArray:
def test_booltype_array(self):
x = np.array([1, 0, 1, 1, 0], dtype="i1")
bx = conventions.BoolTypeArray(x)
assert bx.dtype == np.bool
assert_array_equal(
bx, np.array([True, False, True, True, False], dtype=np.bool)
)
class TestNativeEndiannessArray:
def test(self):
x = np.arange(5, dtype=">i8")
expected = np.arange(5, dtype="int64")
a = conventions.NativeEndiannessArray(x)
assert a.dtype == expected.dtype
assert a.dtype == expected[:].dtype
assert_array_equal(a, expected)
def test_decode_cf_with_conflicting_fill_missing_value():
expected = Variable(["t"], [np.nan, np.nan, 2], {"units": "foobar"})
var = Variable(
["t"], np.arange(3), {"units": "foobar", "missing_value": 0, "_FillValue": 1}
)
with warnings.catch_warnings(record=True) as w:
actual = conventions.decode_cf_variable("t", var)
assert_identical(actual, expected)
assert "has multiple fill" in str(w[0].message)
expected = Variable(["t"], np.arange(10), {"units": "foobar"})
var = Variable(
["t"],
np.arange(10),
{"units": "foobar", "missing_value": np.nan, "_FillValue": np.nan},
)
actual = conventions.decode_cf_variable("t", var)
assert_identical(actual, expected)
var = Variable(
["t"],
np.arange(10),
{
"units": "foobar",
"missing_value": np.float32(np.nan),
"_FillValue": np.float32(np.nan),
},
)
actual = conventions.decode_cf_variable("t", var)
assert_identical(actual, expected)
@requires_cftime
class TestEncodeCFVariable:
def test_incompatible_attributes(self):
invalid_vars = [
Variable(
["t"], pd.date_range("2000-01-01", periods=3), {"units": "foobar"}
),
Variable(["t"], pd.to_timedelta(["1 day"]), {"units": "foobar"}),
Variable(["t"], [0, 1, 2], {"add_offset": 0}, {"add_offset": 2}),
Variable(["t"], [0, 1, 2], {"_FillValue": 0}, {"_FillValue": 2}),
]
for var in invalid_vars:
with pytest.raises(ValueError):
conventions.encode_cf_variable(var)
def test_missing_fillvalue(self):
v = Variable(["x"], np.array([np.nan, 1, 2, 3]))
v.encoding = {"dtype": "int16"}
with pytest.warns(Warning, match="floating point data as an integer"):
conventions.encode_cf_variable(v)
def test_multidimensional_coordinates(self):
# regression test for GH1763
# Set up test case with coordinates that have overlapping (but not
# identical) dimensions.
zeros1 = np.zeros((1, 5, 3))
zeros2 = np.zeros((1, 6, 3))
zeros3 = np.zeros((1, 5, 4))
orig = Dataset(
{
"lon1": (["x1", "y1"], zeros1.squeeze(0), {}),
"lon2": (["x2", "y1"], zeros2.squeeze(0), {}),
"lon3": (["x1", "y2"], zeros3.squeeze(0), {}),
"lat1": (["x1", "y1"], zeros1.squeeze(0), {}),
"lat2": (["x2", "y1"], zeros2.squeeze(0), {}),
"lat3": (["x1", "y2"], zeros3.squeeze(0), {}),
"foo1": (["time", "x1", "y1"], zeros1, {"coordinates": "lon1 lat1"}),
"foo2": (["time", "x2", "y1"], zeros2, {"coordinates": "lon2 lat2"}),
"foo3": (["time", "x1", "y2"], zeros3, {"coordinates": "lon3 lat3"}),
"time": ("time", [0.0], {"units": "hours since 2017-01-01"}),
}
)
orig = conventions.decode_cf(orig)
# Encode the coordinates, as they would be in a netCDF output file.
enc, attrs = conventions.encode_dataset_coordinates(orig)
# Make sure we have the right coordinates for each variable.
foo1_coords = enc["foo1"].attrs.get("coordinates", "")
foo2_coords = enc["foo2"].attrs.get("coordinates", "")
foo3_coords = enc["foo3"].attrs.get("coordinates", "")
assert set(foo1_coords.split()) == {"lat1", "lon1"}
assert set(foo2_coords.split()) == {"lat2", "lon2"}
assert set(foo3_coords.split()) == {"lat3", "lon3"}
# Should not have any global coordinates.
assert "coordinates" not in attrs
def test_do_not_overwrite_user_coordinates(self):
orig = Dataset(
coords={"x": [0, 1, 2], "y": ("x", [5, 6, 7]), "z": ("x", [8, 9, 10])},
data_vars={"a": ("x", [1, 2, 3]), "b": ("x", [3, 5, 6])},
)
orig["a"].encoding["coordinates"] = "y"
orig["b"].encoding["coordinates"] = "z"
enc, _ = conventions.encode_dataset_coordinates(orig)
assert enc["a"].attrs["coordinates"] == "y"
assert enc["b"].attrs["coordinates"] == "z"
orig["a"].attrs["coordinates"] = "foo"
with raises_regex(ValueError, "'coordinates' found in both attrs"):
conventions.encode_dataset_coordinates(orig)
@requires_dask
def test_string_object_warning(self):
original = Variable(("x",), np.array(["foo", "bar"], dtype=object)).chunk()
with pytest.warns(SerializationWarning, match="dask array with dtype=object"):
encoded = conventions.encode_cf_variable(original)
assert_identical(original, encoded)
@requires_cftime
class TestDecodeCF:
def test_dataset(self):
original = Dataset(
{
"t": ("t", [0, 1, 2], {"units": "days since 2000-01-01"}),
"foo": ("t", [0, 0, 0], {"coordinates": "y", "units": "bar"}),
"y": ("t", [5, 10, -999], {"_FillValue": -999}),
}
)
expected = Dataset(
{"foo": ("t", [0, 0, 0], {"units": "bar"})},
{
"t": pd.date_range("2000-01-01", periods=3),
"y": ("t", [5.0, 10.0, np.nan]),
},
)
actual = conventions.decode_cf(original)
assert_identical(expected, actual)
def test_invalid_coordinates(self):
# regression test for GH308
original = Dataset({"foo": ("t", [1, 2], {"coordinates": "invalid"})})
actual = conventions.decode_cf(original)
assert_identical(original, actual)
def test_decode_coordinates(self):
# regression test for GH610
original = Dataset(
{"foo": ("t", [1, 2], {"coordinates": "x"}), "x": ("t", [4, 5])}
)
actual = conventions.decode_cf(original)
assert actual.foo.encoding["coordinates"] == "x"
def test_0d_int32_encoding(self):
original = Variable((), np.int32(0), encoding={"dtype": "int64"})
expected = Variable((), np.int64(0))
actual = conventions.maybe_encode_nonstring_dtype(original)
assert_identical(expected, actual)
def test_decode_cf_with_multiple_missing_values(self):
original = Variable(["t"], [0, 1, 2], {"missing_value": np.array([0, 1])})
expected = Variable(["t"], [np.nan, np.nan, 2], {})
with warnings.catch_warnings(record=True) as w:
actual = conventions.decode_cf_variable("t", original)
assert_identical(expected, actual)
assert "has multiple fill" in str(w[0].message)
def test_decode_cf_with_drop_variables(self):
original = Dataset(
{
"t": ("t", [0, 1, 2], {"units": "days since 2000-01-01"}),
"x": ("x", [9, 8, 7], {"units": "km"}),
"foo": (
("t", "x"),
[[0, 0, 0], [1, 1, 1], [2, 2, 2]],
{"units": "bar"},
),
"y": ("t", [5, 10, -999], {"_FillValue": -999}),
}
)
expected = Dataset(
{
"t": pd.date_range("2000-01-01", periods=3),
"foo": (
("t", "x"),
[[0, 0, 0], [1, 1, 1], [2, 2, 2]],
{"units": "bar"},
),
"y": ("t", [5, 10, np.nan]),
}
)
actual = conventions.decode_cf(original, drop_variables=("x",))
actual2 = conventions.decode_cf(original, drop_variables="x")
assert_identical(expected, actual)
assert_identical(expected, actual2)
def test_invalid_time_units_raises_eagerly(self):
ds = Dataset({"time": ("time", [0, 1], {"units": "foobar since 123"})})
with raises_regex(ValueError, "unable to decode time"):
decode_cf(ds)
@requires_cftime
def test_dataset_repr_with_netcdf4_datetimes(self):
# regression test for #347
attrs = {"units": "days since 0001-01-01", "calendar": "noleap"}
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "unable to decode time")
ds = decode_cf(Dataset({"time": ("time", [0, 1], attrs)}))
assert "(time) object" in repr(ds)
attrs = {"units": "days since 1900-01-01"}
ds = decode_cf(Dataset({"time": ("time", [0, 1], attrs)}))
assert "(time) datetime64[ns]" in repr(ds)
@requires_cftime
def test_decode_cf_datetime_transition_to_invalid(self):
# manually create dataset with not-decoded date
from datetime import datetime
ds = Dataset(coords={"time": [0, 266 * 365]})
units = "days since 2000-01-01 00:00:00"
ds.time.attrs = dict(units=units)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "unable to decode time")
ds_decoded = conventions.decode_cf(ds)
expected = [datetime(2000, 1, 1, 0, 0), datetime(2265, 10, 28, 0, 0)]
assert_array_equal(ds_decoded.time.values, expected)
@requires_dask
def test_decode_cf_with_dask(self):
import dask.array as da
original = Dataset(
{
"t": ("t", [0, 1, 2], {"units": "days since 2000-01-01"}),
"foo": ("t", [0, 0, 0], {"coordinates": "y", "units": "bar"}),
"bar": ("string2", [b"a", b"b"]),
"baz": (("x"), [b"abc"], {"_Encoding": "utf-8"}),
"y": ("t", [5, 10, -999], {"_FillValue": -999}),
}
).chunk()
decoded = conventions.decode_cf(original)
print(decoded)
assert all(
isinstance(var.data, da.Array)
for name, var in decoded.variables.items()
if name not in decoded.indexes
)
assert_identical(decoded, conventions.decode_cf(original).compute())
@requires_dask
def test_decode_dask_times(self):
original = Dataset.from_dict(
{
"coords": {},
"dims": {"time": 5},
"data_vars": {
"average_T1": {
"dims": ("time",),
"attrs": {"units": "days since 1958-01-01 00:00:00"},
"data": [87659.0, 88024.0, 88389.0, 88754.0, 89119.0],
}
},
}
)
assert_identical(
conventions.decode_cf(original.chunk()),
conventions.decode_cf(original).chunk(),
)
class CFEncodedInMemoryStore(WritableCFDataStore, InMemoryDataStore):
def encode_variable(self, var):
"""encode one variable"""
coder = coding.strings.EncodedStringCoder(allows_unicode=True)
var = coder.encode(var)
return var
@requires_netCDF4
class TestCFEncodedDataStore(CFEncodedBase):
@contextlib.contextmanager
def create_store(self):
yield CFEncodedInMemoryStore()
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs={}, open_kwargs={}, allow_cleanup_failure=False
):
store = CFEncodedInMemoryStore()
data.dump_to_store(store, **save_kwargs)
yield open_dataset(store, **open_kwargs)
@pytest.mark.skip("cannot roundtrip coordinates yet for " "CFEncodedInMemoryStore")
def test_roundtrip_coordinates(self):
pass
def test_invalid_dataarray_names_raise(self):
# only relevant for on-disk file formats
pass
def test_encoding_kwarg(self):
# we haven't bothered to raise errors yet for unexpected encodings in
# this test dummy
pass
def test_encoding_kwarg_fixed_width_string(self):
# CFEncodedInMemoryStore doesn't support explicit string encodings.
pass
| apache-2.0 |
tupes/School | CS366/Asns/Asn1/ass1.py | 1 | 1105 | import sys
import matplotlib.pyplot as plt
step = float(sys.argv[1])
try: changes = int(sys.argv[2])
except: changes = 0
if 'i' in sys.argv: invert = True
else: invert = False
def newEstimate(old, target, step):
print abs(target - est)
return old + step * (target - old)
target = 1
iters = 6
est = 0
xs = []
ys = []
tys = []
for x in range(1, iters + 1):
xs.append(x)
tys.append(1)
ys.append(est)
est = newEstimate(est, target, step)
if invert: step = 1 / float(x)
if changes > 0:
# change target to 0
target = 0
for x in range(iters + 1, iters * 2 + 1):
xs.append(x)
tys.append(0)
ys.append(est)
est = newEstimate(est, target, step)
if invert: step = 1 / float(x)
if changes > 1:
# change target on every trial
for x in range(iters * 2 + 1, iters * 3 + 1):
if target == 0: target = 1
else: target = 0
xs.append(x)
tys.append(target)
ys.append(est)
est = newEstimate(est, target, step)
if invert: step = 1 / float(x)
f = open('step' + str(step) + 'results.dat', 'w')
for y in ys:
f.write(str(y) + '\n')
f.close()
plt.plot(xs, ys)
plt.plot(xs, tys)
plt.show()
| gpl-3.0 |
luturonunca/LAGOmaps | mapingLAGO/sui.py | 1 | 2076 | import shapefile
import numpy as np
from matplotlib import cm, rcParams
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
# Coordinates (from Wikipedia) and shift
DF = [-99.1333, 19.4333]
TH = [ 8.1042, 47.4375]
proj = 'tmerc'
# Colours
cMX = '#0000cc' # Blue tone
cCH = '#006600' # Green tone
# 70 km by 70 km, Transverse Mercator, resolution does not
# matter, as we use other data for the borders
width = 1500000
height = 1500000
dshift = [-.133, -.05]
fig2 = plt.figure(figsize=(8,8))
# Create basemaps for Mexico and Switzerland
m_ch = Basemap(width=width, height=height, projection=proj,
lon_0=TH[0], lat_0=TH[1],resolution='l')
m_ch.drawcoastlines()
#m_ch.drawcountries()
# Draw the district of Aargau
CHE_adm1 = shapefile.Reader('data/basemap/CHE_adm0')
iag = [i for i, s in enumerate(CHE_adm1.records()) if 'SWITZERLAND' in s][0]
aglonlat = np.array(CHE_adm1.shapes()[iag].points)
print aglonlat
print aglonlat[:,1]
m_ch.plot(aglonlat[:, 0], aglonlat[:, 1], '-', c=cCH, lw=6)
agx, agy = m_ch(aglonlat[:, 0], aglonlat[:, 1])
#plt.(agx, agy, cCH, ec='none', alpha=.4)
CHE_adm0 = shapefile.Reader('data/basemap/CHE_adm0')
chlonlat = np.array(CHE_adm0.shape().points)
m_ch.plot(chlonlat[:, 0], chlonlat[:, 1] , '-', c=cCH, latlon=True, ms=1)
# Draw scales to cross-check that the scales are the same
#sdf = np.array(m_mx(DF[0], DF[1])) - np.array([28000, 10000])
#sth = np.array(m_ch(TH[0], TH[1])) - np.array([28000, 10000])
#isdf = m_mx(sdf[0], sdf[1], inverse='True')
#isth = m_ch(sth[0], sth[1], inverse='True')
#m_mx.drawmapscale(isdf[0], isdf[1], DF[0], DF[1]+dshift[0],
# 4, barstyle='fancy', fillcolor2=cMX, fontsize=12)
#m_ch.drawmapscale(isth[0], isth[1], TH[0], TH[1]+dshift[1],
# 4, barstyle='fancy', fillcolor2=cCH, fontsize=12)
# Draw locations
#hPT = [-99.12, 19.47]
#pPT = [-99.11, 19.33]
#m_mx.plot(hPT[0], hPT[1], '*', c='k', ms=14, mew=1)
#m_mx.plot(pPT[0], pPT[1], 'o', c='k', ms=10, mew=1)
# Remove border around the whole
#m_mx.drawmapboundary(color='none')
plt.show()
| cc0-1.0 |
av8ramit/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/__init__.py | 79 | 2464 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to allow different io formats."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_data
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_labels
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import HAS_DASK
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import queue_parsed_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_record_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.numpy_io import numpy_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_data
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_labels
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_matrix
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import pandas_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.generator_io import generator_input_fn
| apache-2.0 |
yunfeilu/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 204 | 5442 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
great-expectations/great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_alphabetical.py | 1 | 17246 | import json
import operator
from typing import Any, Dict, Optional, Tuple
import pandas
from great_expectations.core import ExpectationConfiguration
#!!! This giant block of imports should be something simpler, such as:
# from great_exepectations.helpers.expectation_creation import *
from great_expectations.execution_engine import (
ExecutionEngine,
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.execution_engine.execution_engine import (
MetricDomainTypes,
MetricPartialFunctionTypes,
)
from great_expectations.expectations.expectation import (
ColumnMapExpectation,
Expectation,
ExpectationConfiguration,
)
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
from great_expectations.expectations.metrics.import_manager import F, Window, sparktypes
from great_expectations.expectations.metrics.map_metric import (
ColumnMapMetricProvider,
column_condition_partial,
)
from great_expectations.expectations.metrics.metric_provider import (
metric_partial,
metric_value,
)
from great_expectations.expectations.metrics.table_metrics.table_column_types import (
ColumnTypes,
)
from great_expectations.expectations.registry import (
_registered_expectations,
_registered_metrics,
_registered_renderers,
)
from great_expectations.expectations.util import render_evaluation_parameter_string
from great_expectations.render.renderer.renderer import renderer
from great_expectations.render.types import RenderedStringTemplateContent
from great_expectations.render.util import num_to_str, substitute_none_for_missing
from great_expectations.validator.validation_graph import MetricConfiguration
from great_expectations.validator.validator import Validator
# This class defines a Metric to support your Expectation
# For most Expectations, the main business logic for calculation will live here.
# To learn about the relationship between Metrics and Expectations, please visit {some doc}.
class ColumnValuesAreAlphabetical(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
# Please see {some doc} for information on how to choose an id string for your Metric.
condition_metric_name = "column_values.are_alphabetical"
condition_value_keys = ("reverse",)
# This method defines the business logic for evaluating your metric when using a PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, reverse=False, **kwargs):
# lowercase the whole column to avoid issues with capitalization
# (since every capital letter is "before" the lowercase letters)
column_lower = column.map(str.lower)
column_length = column.size
# choose the operator to use for comparion of consecutive items
# could be easily adapted for other comparisons, perhaps of custom objects
if reverse:
compare_function = operator.ge
else:
compare_function = operator.le
output = [True] # first value is automatically in order
for i in range(1, column_length):
if (
column_lower[i] and column_lower[i - 1]
): # make sure we aren't comparing Nones
output.append(compare_function(column_lower[i - 1], column_lower[i]))
else:
output.append(None)
return pandas.Series(output)
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# return column.in_([3])
#
#
#
#
#
#
#
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# return column.isin([3])
######
# The Spark implementation is based on the expect_column_values_to_be_decreasing Expectation but currently doesn't
# work.
######
# @metric_partial(
# engine=SparkDFExecutionEngine,
# partial_fn_type=MetricPartialFunctionTypes.WINDOW_CONDITION_FN,
# domain_type=MetricDomainTypes.COLUMN,
# )
# def _spark(
# cls,
# execution_engine: SparkDFExecutionEngine,
# metric_domain_kwargs: Dict,
# metric_value_kwargs: Dict,
# metrics: Dict[Tuple, Any],
# runtime_configuration: Dict,
# ):
# # check if column is any type that could have na (numeric types)
# column_name = metric_domain_kwargs["column"]
# # table_columns = metrics["table.column_types"]
# # column_metadata = [col for col in table_columns if col["name"] == column_name][
# # 0
# # ]
# # if isinstance(
# # column_metadata["type"],
# # (
# # sparktypes.LongType,
# # sparktypes.DoubleType,
# # sparktypes.IntegerType,
# # ),
# # ):
# # # if column is any type that could have NA values, remove them (not filtered by .isNotNull())
# # compute_domain_kwargs = execution_engine.add_column_row_condition(
# # metric_domain_kwargs,
# # filter_null=cls.filter_column_isnull,
# # filter_nan=True,
# # )
# # else:
# compute_domain_kwargs = metric_domain_kwargs
# (
# df,
# compute_domain_kwargs,
# accessor_domain_kwargs,
# ) = execution_engine.get_compute_domain(
# compute_domain_kwargs, MetricDomainTypes.COLUMN
# )
#
# # # NOTE: 20201105 - parse_strings_as_datetimes is not supported here;
# # # instead detect types naturally
# column = F.col(column_name)
# column = F.lower(column)
# # if isinstance(
# # column_metadata["type"], (sparktypes.TimestampType, sparktypes.DateType)
# # ):
# # diff = F.datediff(
# # column, F.lag(column).over(Window.orderBy(F.lit("constant")))
# # )
# # else:
# diff = F.lag(column, default="a").over(Window.orderBy(F.lit("constant")))
# diff = F.when(column > diff, True).otherwise(False)
#
# # NOTE: because in spark we are implementing the window function directly,
# # we have to return the *unexpected* condition
# # if metric_value_kwargs["strictly"]:
# # return (
# # F.when(diff >= 0, F.lit(True)).otherwise(F.lit(False)),
# # compute_domain_kwargs,
# # accessor_domain_kwargs,
# # )
# # # If we expect values to be flat or decreasing then unexpected values are those
# # # that are decreasing
# # else:
# return (
# diff,
# compute_domain_kwargs,
# accessor_domain_kwargs,
# )
# This class defines the Expectation itself
# The main business logic for calculation lives here.
class ExpectColumnValuesToBeAlphabetical(ColumnMapExpectation):
"""
Given a list of string values, check if the list is alphabetical, either forwards or backwards (specified with the
`reverse` parameter). Comparison is case-insensitive. Using `mostly` will give you how many items are alphabetical
relative to the immediately previous item in the list.
conditions:
reverse: Checks for Z to A alphabetical if True, otherwise checks A to Z
"""
# These examples will be shown in the public gallery, and also executed as unit tests for your Expectation
examples = [
{
"data": {
"is_alphabetical_lowercase": [
"apple",
"banana",
"coconut",
"donut",
"eggplant",
"flour",
"grapes",
"jellybean",
None,
None,
None,
],
"is_alphabetical_lowercase_reversed": [
"moon",
"monster",
"messy",
"mellow",
"marble",
"maple",
"malted",
"machine",
None,
None,
None,
],
"is_alphabetical_mixedcase": [
"Atlanta",
"bonnet",
"Delaware",
"gymnasium",
"igloo",
"Montreal",
"Tennessee",
"toast",
"Washington",
"xylophone",
"zebra",
],
"out_of_order": [
"Right",
"wrong",
"up",
"down",
"Opposite",
"Same",
"west",
"east",
None,
None,
None,
],
},
"tests": [
{
"title": "positive_test_with_all_values_alphabetical_lowercase",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "is_alphabetical_lowercase",
"reverse": False,
"mostly": 1.0,
},
"out": {
"success": True,
"unexpected_index_list": [],
"unexpected_list": [],
},
},
{
"title": "negative_test_with_all_values_alphabetical_lowercase_reversed",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "is_alphabetical_lowercase_reversed",
"reverse": False,
"mostly": 1.0,
},
"out": {
"success": False,
"unexpected_index_list": [1, 2, 3, 4, 5, 6, 7],
"unexpected_list": [
"monster",
"messy",
"mellow",
"marble",
"maple",
"malted",
"machine",
],
},
},
{
"title": "positive_test_with_all_values_alphabetical_lowercase_reversed",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "is_alphabetical_lowercase_reversed",
"reverse": True,
"mostly": 1.0,
},
"out": {
"success": True,
"unexpected_index_list": [],
"unexpected_list": [],
},
},
{
"title": "positive_test_with_all_values_alphabetical_mixedcase",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "is_alphabetical_mixedcase", "mostly": 1.0},
"out": {
"success": True,
"unexpected_index_list": [],
"unexpected_list": [],
},
},
{
"title": "negative_test_with_out_of_order_mixedcase",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "out_of_order", "mostly": 1.0},
"out": {
"success": False,
"unexpected_index_list": [2, 3, 7],
"unexpected_list": ["up", "down", "east"],
},
},
],
}
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": ["experimental"], # Tags for this Expectation in the gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@sethdmay",
"@maximetokman",
"@Harriee02", # Don't forget to add your github handle here!
],
"package": "experimental_expectations",
}
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.are_alphabetical"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
# Please see {some doc} for more information about domain and success keys, and other arguments to Expectations
success_keys = ("mostly", "reverse")
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This method defines a question Renderer
# For more info on Renderers, see {some doc}
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.question")
# def _question_renderer(
# cls, configuration, result=None, language=None, runtime_configuration=None
# ):
# column = configuration.kwargs.get("column")
# mostly = configuration.kwargs.get("mostly")
# return f'Do at least {mostly * 100}% of values in column "{column}" equal 3?'
# This method defines an answer Renderer
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.answer")
# def _answer_renderer(
# cls, configuration=None, result=None, language=None, runtime_configuration=None
# ):
# column = result.expectation_config.kwargs.get("column")
# mostly = result.expectation_config.kwargs.get("mostly")
# regex = result.expectation_config.kwargs.get("regex")
# if result.success:
# return f'At least {mostly * 100}% of values in column "{column}" equal 3.'
# else:
# return f'Less than {mostly * 100}% of values in column "{column}" equal 3.'
# This method defines a prescriptive Renderer
# @classmethod
# @renderer(renderer_type="renderer.prescriptive")
# @render_evaluation_parameter_string
# def _prescriptive_renderer(
# cls,
# configuration=None,
# result=None,
# language=None,
# runtime_configuration=None,
# **kwargs,
# ):
#!!! This example renderer should be shorter
# runtime_configuration = runtime_configuration or {}
# include_column_name = runtime_configuration.get("include_column_name", True)
# include_column_name = (
# include_column_name if include_column_name is not None else True
# )
# styling = runtime_configuration.get("styling")
# params = substitute_none_for_missing(
# configuration.kwargs,
# ["column", "regex", "mostly", "row_condition", "condition_parser"],
# )
# template_str = "values must be equal to 3"
# if params["mostly"] is not None:
# params["mostly_pct"] = num_to_str(
# params["mostly"] * 100, precision=15, no_scientific=True
# )
# # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
# template_str += ", at least $mostly_pct % of the time."
# else:
# template_str += "."
# if include_column_name:
# template_str = "$column " + template_str
# if params["row_condition"] is not None:
# (
# conditional_template_str,
# conditional_params,
# ) = parse_row_condition_string_pandas_engine(params["row_condition"])
# template_str = conditional_template_str + ", then " + template_str
# params.update(conditional_params)
# return [
# RenderedStringTemplateContent(
# **{
# "content_block_type": "string_template",
# "string_template": {
# "template": template_str,
# "params": params,
# "styling": styling,
# },
# }
# )
# ]
if __name__ == "__main__":
diagnostics_report = ExpectColumnValuesToBeAlphabetical().run_diagnostics()
print(json.dumps(diagnostics_report, indent=2))
| apache-2.0 |
aabadie/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
GregorCH/ipet | ipet/misc/quick_Pandas.py | 1 | 3288 | """
The MIT License (MIT)
Copyright (c) 2018 Zuse Institute Berlin, www.zib.de
Permissions are granted as stated in the license file you have obtained
with this software. If you find the library useful for your purpose,
please refer to README.md for how to cite IPET.
@author: Gregor Hendel
"""
import pandas as pd
import numpy as np
import itertools
from scipy import stats
default_to_latex_kw = dict(float_format=lambda x:"%.1f" % x, index_names=False)
colformatlatex = lambda x: "\\%s" % x
def quickAggregateAndPercentage(df, collist, rowlist, aggfunc=np.mean, defindex=0):
parts = []
if type(aggfunc) is not list:
aggfuncs = [aggfunc] * len(collist)
else:
aggfuncs = aggfunc
for col, aggfunc in zip(collist, aggfuncs):
resultser = df.pivot_table(col, rows=rowlist, aggfunc=aggfunc)
percentage = resultser / resultser[defindex] * 100
percentage.name = '%'
parts.append(resultser)
parts.append(percentage)
return pd.concat(parts, axis=1)
def quickToLatex(df, index_makros=False, **to_latex_kw):
newdf = df.rename(columns={col:colformatlatex(col) for col in df.columns if col != '%'})
if index_makros:
newdf.rename(index={idx:colformatlatex(idx) for idx in newdf.index}, inplace=True)
to_latex_kws = {key:val for key, val in itertools.chain(list(default_to_latex_kw.items()), list(to_latex_kw.items()))}
return newdf.to_latex(**to_latex_kw)
def quickAggregationOnIndex(df, col, aggfunc=np.min, threshold=None):
aggforindex = pd.Series(df.groupby(level=0).apply(lambda x:aggfunc(x[col])))
newcolname = aggfunc.__name__ + col
aggforindex.name = newcolname
if threshold is None:
return aggforindex
else:
newcolname += 'LE' + repr(threshold)
return pd.Series(aggforindex <= threshold, name=newcolname)
def getWilcoxonQuotientSignificance(x, y, shiftby=10):
shiftedquotients = (x + shiftby) / (y + shiftby)
logshifted = np.log2(shiftedquotients)
# filter elements that are too close to zero
logshifted = logshifted[np.abs(logshifted) >= np.log2(1 + 1e-2)]
if logshifted.size < 10:
return np.nan
try:
return stats.wilcoxon(logshifted.values)[1]
except ValueError:
return np.nan
def quickAggregateAndSignificance(df, collist, rowlist, aggfunc=np.mean, wilcoxonfuncs=None, defindex=0):
parts = []
if type(aggfunc) is not list:
aggfuncs = [aggfunc] * len(collist)
else:
aggfuncs = aggfunc
pieces = dict(list(df.groupby(rowlist)[collist]))
if wilcoxonfuncs is None:
wilcoxonfuncs = [getWilcoxonQuotientSignificance] * len(collist)
elif type(wilcoxonfuncs) is not list:
wilcoxonfuncs = [wilcoxonfuncs] * len(collist)
for col, aggfunc, wilcoxonfunc in zip(collist, aggfuncs, wilcoxonfuncs):
resultser = df.pivot_table(col, rows=rowlist, aggfunc=aggfunc)
percentage = resultser / resultser[defindex] * 100
significance = pd.Series([wilcoxonfunc(pieces[resultser.index[defindex]][col], pieces[idx][col]) for idx in resultser.index], index=resultser.index)
percentage.name = '%'
significance.name = 'Wilcox'
parts.append(resultser)
parts.append(percentage)
parts.append(significance)
return pd.concat(parts, axis=1)
| mit |
nmayorov/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 272 | 7798 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/decomposition/dict_learning.py | 3 | 48029 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000, check_input=True, verbose=0):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X : array of shape (n_samples, n_features)
Data matrix.
dictionary : array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram : None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov : array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init : array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter : int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov : boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
check_input : boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose : int
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code : array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if dictionary.shape[1] != X.shape[1]:
raise ValueError("Dictionary and X have different numbers of features:"
"dictionary.shape: {} X.shape{}".format(
dictionary.shape, X.shape))
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=verbose, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
# TODO: Make verbosity argument for Lasso?
# sklearn.linear_model.coordinate_descent.enet_path has a verbosity
# argument that we could pass in from Lasso.
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True)
if init is not None:
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=check_input)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lars = Lars(fit_intercept=False, verbose=verbose, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
# TODO: Should verbose argument be passed to this?
new_code = orthogonal_mp_gram(
Gram=gram, Xy=cov, n_nonzero_coefs=int(regularization),
tol=None, norms_squared=row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
if new_code.ndim != 2:
return new_code.reshape(n_samples, n_components)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1, check_input=True, verbose=0):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X : array of shape (n_samples, n_features)
Data matrix
dictionary : array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram : array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov : array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs : int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
copy_cov : boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
init : array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter : int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
n_jobs : int, optional
Number of parallel jobs to run.
check_input : boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose : int, optional
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code : array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if check_input:
if algorithm == 'lasso_cd':
dictionary = check_array(dictionary, order='C', dtype='float64')
X = check_array(X, order='C', dtype='float64')
else:
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter,
check_input=False,
verbose=verbose)
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter,
check_input=False)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary : array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y : array of shape (n_features, n_samples)
Data matrix.
code : array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2 : bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
dictionary : array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : int,
Sparsity controlling parameter.
max_iter : int,
Maximum number of iterations to perform.
tol : float,
Tolerance for the stopping condition.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init : array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback : callable or None, optional (default: None)
Callable that gets invoked every five iterations
verbose : bool, optional (default: False)
To control the verbosity of the procedure.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary : array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors : array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback : callable or None, optional (default: None)
callable that gets invoked every five iterations
batch_size : int,
The number of samples to take in each batch.
verbose : bool, optional (default: False)
To control the verbosity of the procedure.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs, check_input=False)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
_required_parameters = ["dictionary"]
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : Ignored.
y : Ignored.
Returns
-------
self : object
Returns the object itself
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
.. versionadded:: 0.17
*cd* coordinate descent method to improve speed.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
.. versionadded:: 0.17
*lasso_cd* coordinate descent method to improve speed.
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose : bool, optional (default: False)
To control the verbosity of the procedure.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored.
Returns
-------
self : object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs : int,
number of parallel jobs to run
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
verbose : bool, optional (default: False)
To control the verbosity of the procedure.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored.
iter_offset : integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.