repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
huzq/scikit-learn
|
sklearn/manifold/tests/test_t_sne.py
|
2
|
34380
|
import sys
from io import StringIO
import numpy as np
from numpy.testing import assert_allclose
import scipy.sparse as sp
import pytest
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import kneighbors_graph
from sklearn.exceptions import EfficiencyWarning
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import skip_if_32bit
from sklearn.utils import check_random_state
from sklearn.manifold._t_sne import _joint_probabilities
from sklearn.manifold._t_sne import _joint_probabilities_nn
from sklearn.manifold._t_sne import _kl_divergence
from sklearn.manifold._t_sne import _kl_divergence_bh
from sklearn.manifold._t_sne import _gradient_descent
from sklearn.manifold._t_sne import trustworthiness
from sklearn.manifold import TSNE
# mypy error: Module 'sklearn.manifold' has no attribute '_barnes_hut_tsne'
from sklearn.manifold import _barnes_hut_tsne # type: ignore
from sklearn.manifold._utils import _binary_search_perplexity
from sklearn.datasets import make_blobs
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import cosine_distances
x = np.linspace(0, 1, 10)
xx, yy = np.meshgrid(x, x)
X_2d_grid = np.hstack([
xx.ravel().reshape(-1, 1),
yy.ravel().reshape(-1, 1),
])
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _, compute_error=True):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_, compute_error=True):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert error == 1.0
assert it == 0
assert("gradient norm" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert error == 0.0
assert it == 11
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert error == 0.0
assert it == 10
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
data = random_state.randn(50, 5)
distances = pairwise_distances(data).astype(np.float32)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 200
desired_perplexity = 25.0
random_state = check_random_state(0)
data = random_state.randn(n_samples, 2).astype(np.float32, copy=False)
distances = pairwise_distances(data)
P1 = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
# Test that when we use all the neighbors the results are identical
n_neighbors = n_samples - 1
nn = NearestNeighbors().fit(data)
distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors,
mode='distance')
distances_nn = distance_graph.data.astype(np.float32, copy=False)
distances_nn = distances_nn.reshape(n_samples, n_neighbors)
P2 = _binary_search_perplexity(distances_nn, desired_perplexity, verbose=0)
indptr = distance_graph.indptr
P1_nn = np.array([P1[k, distance_graph.indices[indptr[k]:indptr[k + 1]]]
for k in range(n_samples)])
assert_array_almost_equal(P1_nn, P2, decimal=4)
# Test that the highest P_ij are the same when fewer neighbors are used
for k in np.linspace(150, n_samples - 1, 5):
k = int(k)
topn = k * 10 # check the top 10 * k entries out of k * k entries
distance_graph = nn.kneighbors_graph(n_neighbors=k, mode='distance')
distances_nn = distance_graph.data.astype(np.float32, copy=False)
distances_nn = distances_nn.reshape(n_samples, k)
P2k = _binary_search_perplexity(distances_nn, desired_perplexity,
verbose=0)
assert_array_almost_equal(P1_nn, P2, decimal=2)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
idx = np.argsort(P2k.ravel())[::-1]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
n_neighbors = 10
n_samples = 100
random_state = check_random_state(0)
data = random_state.randn(n_samples, 5)
nn = NearestNeighbors().fit(data)
distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors,
mode='distance')
distances = distance_graph.data.astype(np.float32, copy=False)
distances = distances.reshape(n_samples, n_neighbors)
last_P = None
desired_perplexity = 3
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), desired_perplexity,
verbose=0)
P1 = _joint_probabilities_nn(distance_graph, desired_perplexity,
verbose=0)
# Convert the sparse matrix to a dense one for testing
P1 = P1.toarray()
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components).astype(np.float32)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
def fun(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[0]
def grad(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert trustworthiness(X, 5.0 + X / 10.0) == 1.0
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert trustworthiness(X, X_embedded) < 0.6
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
@pytest.mark.parametrize("method", ['exact', 'barnes_hut'])
@pytest.mark.parametrize("init", ('random', 'pca'))
def test_preserve_trustworthiness_approximately(method, init):
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
n_components = 2
X = random_state.randn(50, n_components).astype(np.float32)
tsne = TSNE(n_components=n_components, init=init, random_state=0,
method=method, n_iter=700)
X_embedded = tsne.fit_transform(X)
t = trustworthiness(X, X_embedded, n_neighbors=1)
assert t > 0.85
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for n_iter in [250, 300, 350]:
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
n_iter=n_iter, random_state=0)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert kl_divergences[1] <= kl_divergences[0]
assert kl_divergences[2] <= kl_divergences[1]
@pytest.mark.parametrize('method', ['exact', 'barnes_hut'])
def test_fit_csr_matrix(method):
# X can be a sparse matrix.
rng = check_random_state(0)
X = rng.randn(50, 2)
X[(rng.randint(0, 50, 25), rng.randint(0, 2, 25))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method=method, n_iter=750)
X_embedded = tsne.fit_transform(X_csr)
assert_allclose(trustworthiness(X_csr, X_embedded, n_neighbors=1),
1.0, rtol=1.1e-1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
for i in range(3):
X = random_state.randn(80, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
early_exaggeration=2.0, metric="precomputed",
random_state=i, verbose=0, n_iter=500)
X_embedded = tsne.fit_transform(D)
t = trustworthiness(D, X_embedded, n_neighbors=1, metric="precomputed")
assert t > .95
def test_trustworthiness_not_euclidean_metric():
# Test trustworthiness with a metric different from 'euclidean' and
# 'precomputed'
random_state = check_random_state(0)
X = random_state.randn(100, 2)
assert (trustworthiness(X, X, metric='cosine') ==
trustworthiness(pairwise_distances(X, metric='cosine'), X,
metric='precomputed'))
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
with pytest.raises(ValueError, match="early_exaggeration .*"):
tsne.fit_transform(np.array([[0.0], [0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
with pytest.raises(ValueError, match="n_iter .*"):
tsne.fit_transform(np.array([[0.0], [0.0]]))
@pytest.mark.parametrize('method, retype', [
('exact', np.asarray),
('barnes_hut', np.asarray),
('barnes_hut', sp.csr_matrix),
])
@pytest.mark.parametrize('D, message_regex', [
([[0.0], [1.0]], ".* square distance matrix"),
([[0., -1.], [1., 0.]], ".* positive.*"),
])
def test_bad_precomputed_distances(method, D, retype, message_regex):
tsne = TSNE(metric="precomputed", method=method)
with pytest.raises(ValueError, match=message_regex):
tsne.fit_transform(retype(D))
def test_exact_no_precomputed_sparse():
tsne = TSNE(metric='precomputed', method='exact')
with pytest.raises(TypeError, match='sparse'):
tsne.fit_transform(sp.csr_matrix([[0, 5], [5, 0]]))
def test_high_perplexity_precomputed_sparse_distances():
# Perplexity should be less than 50
dist = np.array([[1., 0., 0.], [0., 1., 0.], [1., 0., 0.]])
bad_dist = sp.csr_matrix(dist)
tsne = TSNE(metric="precomputed")
msg = "3 neighbors per samples are required, but some samples have only 1"
with pytest.raises(ValueError, match=msg):
tsne.fit_transform(bad_dist)
@ignore_warnings(category=EfficiencyWarning)
def test_sparse_precomputed_distance():
"""Make sure that TSNE works identically for sparse and dense matrix"""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D_sparse = kneighbors_graph(X, n_neighbors=100, mode='distance',
include_self=True)
D = pairwise_distances(X)
assert sp.issparse(D_sparse)
assert_almost_equal(D_sparse.A, D)
tsne = TSNE(metric="precomputed", random_state=0)
Xt_dense = tsne.fit_transform(D)
for fmt in ['csr', 'lil']:
Xt_sparse = tsne.fit_transform(D_sparse.asformat(fmt))
assert_almost_equal(Xt_dense, Xt_sparse)
def test_non_positive_computed_distances():
# Computed distance matrices must be positive.
def metric(x, y):
return -1
tsne = TSNE(metric=metric, method='exact')
X = np.array([[0.0, 0.0], [1.0, 1.0]])
with pytest.raises(ValueError, match="All distances .*metric given.*"):
tsne.fit_transform(X)
def test_init_not_available():
# 'init' must be 'pca', 'random', or numpy array.
tsne = TSNE(init="not available")
m = "'init' must be 'pca', 'random', or a numpy array"
with pytest.raises(ValueError, match=m):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_init_ndarray():
# Initialize TSNE with ndarray and test fit
tsne = TSNE(init=np.zeros((100, 2)))
X_embedded = tsne.fit_transform(np.ones((100, 5)))
assert_array_equal(np.zeros((100, 2)), X_embedded)
def test_init_ndarray_precomputed():
# Initialize TSNE with ndarray and metric 'precomputed'
# Make sure no FutureWarning is thrown from _fit
tsne = TSNE(init=np.zeros((100, 2)), metric="precomputed")
tsne.fit(np.zeros((100, 100)))
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available", method='exact')
with pytest.raises(ValueError, match="Unknown metric not available.*"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
tsne = TSNE(metric="not available", method='barnes_hut')
with pytest.raises(ValueError, match="Metric 'not available' not valid.*"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_method_not_available():
# 'nethod' must be 'barnes_hut' or 'exact'
tsne = TSNE(method='not available')
with pytest.raises(ValueError, match="'method' must be 'barnes_hut' or "):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_angle_out_of_range_checks():
# check the angle parameter range
for angle in [-1, -1e-6, 1 + 1e-6, 2]:
tsne = TSNE(angle=angle)
with pytest.raises(ValueError, match="'angle' must be between "
"0.0 - 1.0"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
with pytest.raises(ValueError, match="The parameter init=\"pca\" cannot"
" be used with"
" metric=\"precomputed\"."):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_n_components_range():
# barnes_hut method should only be used with n_components <= 3
tsne = TSNE(n_components=4, method="barnes_hut")
with pytest.raises(ValueError, match="'n_components' should be .*"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_early_exaggeration_used():
# check that the ``early_exaggeration`` parameter has an effect
random_state = check_random_state(0)
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(25, n_components).astype(np.float32)
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=1,
learning_rate=100.0, init="pca", random_state=0,
method=method, early_exaggeration=1.0, n_iter=250)
X_embedded1 = tsne.fit_transform(X)
tsne = TSNE(n_components=n_components, perplexity=1,
learning_rate=100.0, init="pca", random_state=0,
method=method, early_exaggeration=10.0, n_iter=250)
X_embedded2 = tsne.fit_transform(X)
assert not np.allclose(X_embedded1, X_embedded2)
def test_n_iter_used():
# check that the ``n_iter`` parameter has an effect
random_state = check_random_state(0)
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(25, n_components).astype(np.float32)
for method in methods:
for n_iter in [251, 500]:
tsne = TSNE(n_components=n_components, perplexity=1,
learning_rate=0.5, init="random", random_state=0,
method=method, early_exaggeration=1.0, n_iter=n_iter)
tsne.fit_transform(X)
assert tsne.n_iter_ == n_iter - 1
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64, copy=False)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
from scipy.sparse import csr_matrix
P = csr_matrix(pij_input)
neighbors = P.indices.astype(np.int64)
indptr = P.indptr.astype(np.int64)
_barnes_hut_tsne.gradient(P.data, pos_output, neighbors, indptr,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("nearest neighbors..." in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("early exaggeration" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
@pytest.mark.parametrize('method', ['barnes_hut', 'exact'])
@pytest.mark.parametrize('dt', [np.float32, np.float64])
def test_64bit(method, dt):
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
X = random_state.randn(10, 2).astype(dt, copy=False)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method, verbose=0,
n_iter=300)
X_embedded = tsne.fit_transform(X)
effective_type = X_embedded.dtype
# tsne cython code is only single precision, so the output will
# always be single precision, irrespectively of the input dtype
assert effective_type == np.float32
@pytest.mark.parametrize('method', ['barnes_hut', 'exact'])
def test_kl_divergence_not_nan(method):
# Ensure kl_divergence_ is computed at last iteration
# even though n_iter % n_iter_check != 0, i.e. 1003 % 50 != 0
random_state = check_random_state(0)
X = random_state.randn(50, 2)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method, verbose=0, n_iter=503)
tsne.fit_transform(X)
assert not np.isnan(tsne.kl_divergence_)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
data = random_state.randn(n_samples, n_features)
distances = pairwise_distances(data)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, verbose=0)
kl_exact, grad_exact = _kl_divergence(params, P, degrees_of_freedom,
n_samples, n_components)
n_neighbors = n_samples - 1
distances_csr = NearestNeighbors().fit(data).kneighbors_graph(
n_neighbors=n_neighbors, mode='distance')
P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0)
kl_bh, grad_bh = _kl_divergence_bh(params, P_bh, degrees_of_freedom,
n_samples, n_components,
angle=angle, skip_num_points=0,
verbose=0)
P = squareform(P)
P_bh = P_bh.toarray()
assert_array_almost_equal(P_bh, P, decimal=5)
assert_almost_equal(kl_exact, kl_bh, decimal=3)
@skip_if_32bit
def test_n_iter_without_progress():
# Use a dummy negative n_iter_without_progress and check output on stdout
random_state = check_random_state(0)
X = random_state.randn(100, 10)
for method in ["barnes_hut", "exact"]:
tsne = TSNE(n_iter_without_progress=-1, verbose=2, learning_rate=1e8,
random_state=0, method=method, n_iter=351, init="random")
tsne._N_ITER_CHECK = 1
tsne._EXPLORATION_N_ITER = 0
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the value of n_iter_without_progress
assert ("did not make any progress during the "
"last -1 episodes. Finished." in out)
def test_min_grad_norm():
# Make sure that the parameter min_grad_norm is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
min_grad_norm = 0.002
tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
lines_out = out.split('\n')
# extract the gradient norm from the verbose output
gradient_norm_values = []
for line in lines_out:
# When the computation is Finished just an old gradient norm value
# is repeated that we do not need to store
if 'Finished' in line:
break
start_grad_norm = line.find('gradient norm')
if start_grad_norm >= 0:
line = line[start_grad_norm:]
line = line.replace('gradient norm = ', '').split(' ')[0]
gradient_norm_values.append(float(line))
# Compute how often the gradient norm is smaller than min_grad_norm
gradient_norm_values = np.array(gradient_norm_values)
n_smaller_gradient_norms = \
len(gradient_norm_values[gradient_norm_values <= min_grad_norm])
# The gradient norm can be smaller than min_grad_norm at most once,
# because in the moment it becomes smaller the optimization stops
assert n_smaller_gradient_norms <= 1
def test_accessible_kl_divergence():
# Ensures that the accessible kl_divergence matches the computed value
random_state = check_random_state(0)
X = random_state.randn(50, 2)
tsne = TSNE(n_iter_without_progress=2, verbose=2,
random_state=0, method='exact',
n_iter=500)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the accessible kl_divergence as the error at
# the last iteration
for line in out.split('\n')[::-1]:
if 'Iteration' in line:
_, _, error = line.partition('error = ')
if error:
error, _, _ = error.partition(',')
break
assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5)
@pytest.mark.parametrize('method', ['barnes_hut', 'exact'])
def test_uniform_grid(method):
"""Make sure that TSNE can approximately recover a uniform 2D grid
Due to ties in distances between point in X_2d_grid, this test is platform
dependent for ``method='barnes_hut'`` due to numerical imprecision.
Also, t-SNE is not assured to converge to the right solution because bad
initialization can lead to convergence to bad local minimum (the
optimization problem is non-convex). To avoid breaking the test too often,
we re-run t-SNE from the final point when the convergence is not good
enough.
"""
seeds = range(3)
n_iter = 500
for seed in seeds:
tsne = TSNE(n_components=2, init='random', random_state=seed,
perplexity=50, n_iter=n_iter, method=method)
Y = tsne.fit_transform(X_2d_grid)
try_name = "{}_{}".format(method, seed)
try:
assert_uniform_grid(Y, try_name)
except AssertionError:
# If the test fails a first time, re-run with init=Y to see if
# this was caused by a bad initialization. Note that this will
# also run an early_exaggeration step.
try_name += ":rerun"
tsne.init = Y
Y = tsne.fit_transform(X_2d_grid)
assert_uniform_grid(Y, try_name)
def assert_uniform_grid(Y, try_name=None):
# Ensure that the resulting embedding leads to approximately
# uniformly spaced points: the distance to the closest neighbors
# should be non-zero and approximately constant.
nn = NearestNeighbors(n_neighbors=1).fit(Y)
dist_to_nn = nn.kneighbors(return_distance=True)[0].ravel()
assert dist_to_nn.min() > 0.1
smallest_to_mean = dist_to_nn.min() / np.mean(dist_to_nn)
largest_to_mean = dist_to_nn.max() / np.mean(dist_to_nn)
assert smallest_to_mean > .5, try_name
assert largest_to_mean < 2, try_name
def test_bh_match_exact():
# check that the ``barnes_hut`` method match the exact one when
# ``angle = 0`` and ``perplexity > n_samples / 3``
random_state = check_random_state(0)
n_features = 10
X = random_state.randn(30, n_features).astype(np.float32)
X_embeddeds = {}
n_iter = {}
for method in ['exact', 'barnes_hut']:
tsne = TSNE(n_components=2, method=method, learning_rate=1.0,
init="random", random_state=0, n_iter=251,
perplexity=30.0, angle=0)
# Kill the early_exaggeration
tsne._EXPLORATION_N_ITER = 0
X_embeddeds[method] = tsne.fit_transform(X)
n_iter[method] = tsne.n_iter_
assert n_iter['exact'] == n_iter['barnes_hut']
assert_allclose(X_embeddeds['exact'], X_embeddeds['barnes_hut'], rtol=1e-4)
def test_gradient_bh_multithread_match_sequential():
# check that the bh gradient with different num_threads gives the same
# results
n_features = 10
n_samples = 30
n_components = 2
degrees_of_freedom = 1
angle = 3
perplexity = 5
random_state = check_random_state(0)
data = random_state.randn(n_samples, n_features).astype(np.float32)
params = random_state.randn(n_samples, n_components)
n_neighbors = n_samples - 1
distances_csr = NearestNeighbors().fit(data).kneighbors_graph(
n_neighbors=n_neighbors, mode='distance')
P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0)
kl_sequential, grad_sequential = _kl_divergence_bh(
params, P_bh, degrees_of_freedom, n_samples, n_components,
angle=angle, skip_num_points=0, verbose=0, num_threads=1)
for num_threads in [2, 4]:
kl_multithread, grad_multithread = _kl_divergence_bh(
params, P_bh, degrees_of_freedom, n_samples, n_components,
angle=angle, skip_num_points=0, verbose=0, num_threads=num_threads)
assert_allclose(kl_multithread, kl_sequential, rtol=1e-6)
assert_allclose(grad_multithread, grad_multithread)
def test_tsne_with_different_distance_metrics():
"""Make sure that TSNE works for different distance metrics"""
random_state = check_random_state(0)
n_components_original = 3
n_components_embedding = 2
X = random_state.randn(50, n_components_original).astype(np.float32)
metrics = ['manhattan', 'cosine']
dist_funcs = [manhattan_distances, cosine_distances]
for metric, dist_func in zip(metrics, dist_funcs):
X_transformed_tsne = TSNE(
metric=metric, n_components=n_components_embedding,
random_state=0, n_iter=300).fit_transform(X)
X_transformed_tsne_precomputed = TSNE(
metric='precomputed', n_components=n_components_embedding,
random_state=0, n_iter=300).fit_transform(dist_func(X))
assert_array_equal(X_transformed_tsne, X_transformed_tsne_precomputed)
@pytest.mark.parametrize('method', ['exact', 'barnes_hut'])
def test_tsne_n_jobs(method):
"""Make sure that the n_jobs parameter doesn't impact the output"""
random_state = check_random_state(0)
n_features = 10
X = random_state.randn(30, n_features)
X_tr_ref = TSNE(n_components=2, method=method, perplexity=30.0,
angle=0, n_jobs=1, random_state=0).fit_transform(X)
X_tr = TSNE(n_components=2, method=method, perplexity=30.0,
angle=0, n_jobs=2, random_state=0).fit_transform(X)
assert_allclose(X_tr_ref, X_tr)
|
bsd-3-clause
|
lgbouma/astrobase
|
astrobase/lcproc/catalogs.py
|
2
|
57723
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# catalogs.py - Waqas Bhatti ([email protected]) - Feb 2019
'''
This contains functions to generate light curve catalogs from collections of
light curves.
'''
#############
## LOGGING ##
#############
import logging
from astrobase import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import pickle
import os
import os.path
import glob
import shutil
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor
import numpy as np
import numpy.random as npr
npr.seed(0xc0ffee)
import scipy.spatial as sps
import astropy.io.fits as pyfits
from astropy.wcs import WCS
from astropy.visualization import ZScaleInterval, LinearStretch
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
try:
from tqdm import tqdm
TQDM = True
except Exception:
TQDM = False
pass
# to turn a list of keys into a dict address
# from https://stackoverflow.com/a/14692747
from functools import reduce
from operator import getitem
def _dict_get(datadict, keylist):
return reduce(getitem, keylist, datadict)
############
## CONFIG ##
############
NCPUS = mp.cpu_count()
# these translate filter operators given as strings to Python operators
FILTEROPS = {'eq':'==',
'gt':'>',
'ge':'>=',
'lt':'<',
'le':'<=',
'ne':'!='}
###################
## LOCAL IMPORTS ##
###################
from astrobase.plotbase import fits_finder_chart
from astrobase.cpserver.checkplotlist import checkplot_infokey_worker
from astrobase.lcproc import get_lcformat
#####################################################
## FUNCTIONS TO GENERATE OBJECT CATALOGS (LCLISTS) ##
#####################################################
def _lclist_parallel_worker(task):
'''This is a parallel worker for makelclist.
Parameters
----------
task : tuple
This is a tuple containing the following items:
task[0] = lcf
task[1] = columns
task[2] = lcformat
task[3] = lcformatdir
task[4] = lcndetkey
Returns
-------
dict or None
This contains all of the info for the object processed in this LC read
operation. If this fails, returns None
'''
lcf, columns, lcformat, lcformatdir, lcndetkey = task
# get the bits needed for lcformat handling
# NOTE: we re-import things in this worker function because sometimes
# functions can't be pickled correctly for passing them to worker functions
# in a processing pool
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
return None
# we store the full path of the light curve
lcobjdict = {'lcfname':os.path.abspath(lcf)}
try:
# read the light curve in
lcdict = readerfunc(lcf)
# this should handle lists/tuples being returned by readerfunc
# we assume that the first element is the actual lcdict
# FIXME: figure out how to not need this assumption
if ( (isinstance(lcdict, (list, tuple))) and
(isinstance(lcdict[0], dict)) ):
lcdict = lcdict[0]
# insert all of the columns
for colkey in columns:
if '.' in colkey:
getkey = colkey.split('.')
else:
getkey = [colkey]
try:
thiscolval = _dict_get(lcdict, getkey)
except Exception:
LOGWARNING('column %s does not exist for %s' %
(colkey, lcf))
thiscolval = np.nan
# update the lcobjdict with this value
lcobjdict[getkey[-1]] = thiscolval
except Exception:
LOGEXCEPTION('could not figure out columns for %s' % lcf)
# insert all of the columns as nans
for colkey in columns:
if '.' in colkey:
getkey = colkey.split('.')
else:
getkey = [colkey]
thiscolval = np.nan
# update the lclistdict with this value
lcobjdict[getkey[-1]] = thiscolval
# now get the actual ndets; this excludes nans and infs
for dk in lcndetkey:
try:
if '.' in dk:
getdk = dk.split('.')
else:
getdk = [dk]
ndetcol = _dict_get(lcdict, getdk)
actualndets = ndetcol[np.isfinite(ndetcol)].size
lcobjdict['%s.ndet' % getdk[-1]] = actualndets
except Exception:
lcobjdict['%s.ndet' % getdk[-1]] = np.nan
return lcobjdict
def make_lclist(basedir,
outfile,
use_list_of_filenames=None,
lcformat='hat-sql',
lcformatdir=None,
fileglob=None,
recursive=True,
columns=('objectid',
'objectinfo.ra',
'objectinfo.decl',
'objectinfo.ndet'),
makecoordindex=('objectinfo.ra','objectinfo.decl'),
field_fitsfile=None,
field_wcsfrom=None,
field_scale=ZScaleInterval(),
field_stretch=LinearStretch(),
field_colormap=plt.cm.gray_r,
field_findersize=None,
field_pltopts={'marker':'o',
'markersize':10.0,
'markerfacecolor':'none',
'markeredgewidth':2.0,
'markeredgecolor':'red'},
field_grid=False,
field_gridcolor='k',
field_zoomcontain=True,
maxlcs=None,
nworkers=NCPUS):
'''This generates a light curve catalog for all light curves in a directory.
Given a base directory where all the files are, and a light curve format,
this will find all light curves, pull out the keys in each lcdict requested
in the `columns` kwarg for each object, and write them to the requested
output pickle file. These keys should be pointers to scalar values
(i.e. something like `objectinfo.ra` is OK, but something like 'times' won't
work because it's a vector).
Generally, this works with light curve reading functions that produce
lcdicts as detailed in the docstring for `lcproc.register_lcformat`. Once
you've registered your light curve reader functions using the
`lcproc.register_lcformat` function, pass in the `formatkey` associated with
your light curve format, and this function will be able to read all light
curves in that format as well as the object information stored in their
`objectinfo` dict.
Parameters
----------
basedir : str or list of str
If this is a str, points to a single directory to search for light
curves. If this is a list of str, it must be a list of directories to
search for light curves. All of these will be searched to find light
curve files matching either your light curve format's default fileglob
(when you registered your LC format), or a specific fileglob that you
can pass in using the `fileglob` kwargh here. If the `recursive` kwarg
is set, the provided directories will be searched recursively.
If `use_list_of_filenames` is not None, it will override this argument
and the function will take those light curves as the list of files it
must process instead of whatever is specified in `basedir`.
outfile : str
This is the name of the output file to write. This will be a pickle
file, so a good convention to use for this name is something like
'my-lightcurve-catalog.pkl'.
use_list_of_filenames : list of str or None
Use this kwarg to override whatever is provided in `basedir` and
directly pass in a list of light curve files to process. This can speed
up this function by a lot because no searches on disk will be performed
to find light curve files matching `basedir` and `fileglob`.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
fileglob : str or None
If provided, is a string that is a valid UNIX filename glob. Used to
override the default fileglob for this LC format when searching for
light curve files in `basedir`.
recursive : bool
If True, the directories specified in `basedir` will be searched
recursively for all light curve files that match the default fileglob
for this LC format or a specific one provided in `fileglob`.
columns : list of str
This is a list of keys in the lcdict produced by your light curve reader
function that contain object information, which will be extracted and
put into the output light curve catalog. It's highly recommended that
your LC reader function produce a lcdict that contains at least the
default keys shown here.
The lcdict keys to extract are specified by using an address scheme:
- First level dict keys can be specified directly:
e.g., 'objectid' will extract lcdict['objectid']
- Keys at other levels can be specified by using a period to indicate
the level:
- e.g., 'objectinfo.ra' will extract lcdict['objectinfo']['ra']
- e.g., 'objectinfo.varinfo.features.stetsonj' will extract
lcdict['objectinfo']['varinfo']['features']['stetsonj']
makecoordindex : list of two str or None
This is used to specify which lcdict keys contain the right ascension
and declination coordinates for this object. If these are provided, the
output light curve catalog will have a kdtree built on all object
coordinates, which enables fast spatial searches and cross-matching to
external catalogs by `checkplot` and `lcproc` functions.
field_fitsfile : str or None
If this is not None, it should be the path to a FITS image containing
the objects these light curves are for. If this is provided,
`make_lclist` will use the WCS information in the FITS itself if
`field_wcsfrom` is None (or from a WCS header file pointed to by
`field_wcsfrom`) to obtain x and y pixel coordinates for all of the
objects in the field. A finder chart will also be made using
`astrobase.plotbase.fits_finder_chart` using the corresponding
`field_scale`, `_stretch`, `_colormap`, `_findersize`, `_pltopts`,
`_grid`, and `_gridcolors` kwargs for that function, reproduced here to
enable customization of the finder chart plot.
field_wcsfrom : str or None
If `wcsfrom` is None, the WCS to transform the RA/Dec to pixel x/y will
be taken from the FITS header of `fitsfile`. If this is not None, it
must be a FITS or similar file that contains a WCS header in its first
extension.
field_scale : astropy.visualization.Interval object
`scale` sets the normalization for the FITS pixel values. This is an
astropy.visualization Interval object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
field_stretch : astropy.visualization.Stretch object
`stretch` sets the stretch function for mapping FITS pixel values to
output pixel values. This is an astropy.visualization Stretch object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
field_colormap : matplotlib Colormap object
`colormap` is a matplotlib color map object to use for the output image.
field_findersize : None or tuple of two ints
If `findersize` is None, the output image size will be set by the NAXIS1
and NAXIS2 keywords in the input `fitsfile` FITS header. Otherwise,
`findersize` must be a tuple with the intended x and y size of the image
in inches (all output images will use a DPI = 100).
field_pltopts : dict
`field_pltopts` controls how the overlay points will be plotted. This
a dict with standard matplotlib marker, etc. kwargs as key-val pairs,
e.g. 'markersize', 'markerfacecolor', etc. The default options make red
outline circles at the location of each object in the overlay.
field_grid : bool
`grid` sets if a grid will be made on the output image.
field_gridcolor : str
`gridcolor` sets the color of the grid lines. This is a usual matplotib
color spec string.
field_zoomcontain : bool
`field_zoomcontain` controls if the finder chart will be zoomed to
just contain the overlayed points. Everything outside the footprint of
these points will be discarded.
maxlcs : int or None
This sets how many light curves to process in the input LC list
generated by searching for LCs in `basedir` or in the list provided as
`use_list_of_filenames`.
nworkers : int
This sets the number of parallel workers to launch to collect
information from the light curves.
Returns
-------
str
Returns the path to the generated light curve catalog pickle file.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
return None
if not fileglob:
fileglob = dfileglob
# this is to get the actual ndet
# set to the magnitudes column
lcndetkey = dmagcols
if isinstance(use_list_of_filenames, list):
matching = use_list_of_filenames
else:
# handle the case where basedir is a list of directories
if isinstance(basedir, list):
matching = []
for bdir in basedir:
# now find the files
LOGINFO('searching for %s light curves in %s ...' % (lcformat,
bdir))
if recursive is False:
matching.extend(glob.glob(os.path.join(bdir, fileglob)))
else:
matching.extend(glob.glob(os.path.join(bdir,
'**',
fileglob),
recursive=True))
# otherwise, handle the usual case of one basedir to search in
else:
# now find the files
LOGINFO('searching for %s light curves in %s ...' %
(lcformat, basedir))
if recursive is False:
matching = glob.glob(os.path.join(basedir, fileglob))
else:
matching = glob.glob(os.path.join(basedir,
'**',
fileglob),recursive=True)
#
# now that we have all the files, process them
#
if matching and len(matching) > 0:
LOGINFO('found %s light curves' % len(matching))
# cut down matching to maxlcs
if maxlcs:
matching = matching[:maxlcs]
# prepare the output dict
lclistdict = {
'basedir':basedir,
'lcformat':lcformat,
'fileglob':fileglob,
'recursive':recursive,
'columns':columns,
'makecoordindex':makecoordindex,
'nfiles':len(matching),
'objects': {
}
}
# columns that will always be present in the output lclistdict
derefcols = ['lcfname']
derefcols.extend(['%s.ndet' % x.split('.')[-1] for x in lcndetkey])
for dc in derefcols:
lclistdict['objects'][dc] = []
# fill in the rest of the lclist columns from the columns kwarg
for col in columns:
# dereference the column
thiscol = col.split('.')
thiscol = thiscol[-1]
lclistdict['objects'][thiscol] = []
derefcols.append(thiscol)
# start collecting info
LOGINFO('collecting light curve info...')
tasks = [(x, columns, lcformat, lcformatdir, lcndetkey)
for x in matching]
with ProcessPoolExecutor(max_workers=nworkers) as executor:
results = executor.map(_lclist_parallel_worker, tasks)
results = list(results)
# update the columns in the overall dict from the results of the
# parallel map
for result in results:
for xcol in derefcols:
lclistdict['objects'][xcol].append(result[xcol])
executor.shutdown()
# done with collecting info
# turn all of the lists in the lclistdict into arrays
for col in lclistdict['objects']:
lclistdict['objects'][col] = np.array(lclistdict['objects'][col])
# handle duplicate objectids with different light curves
uniques, counts = np.unique(lclistdict['objects']['objectid'],
return_counts=True)
duplicated_objectids = uniques[counts > 1]
if duplicated_objectids.size > 0:
# redo the objectid array so it has a bit larger dtype so the extra
# tag can fit into the field
dt = lclistdict['objects']['objectid'].dtype.str
dt = '<U%s' % (
int(dt.replace('<','').replace('U','').replace('S','')) + 3
)
lclistdict['objects']['objectid'] = np.array(
lclistdict['objects']['objectid'],
dtype=dt
)
for objid in duplicated_objectids:
objid_inds = np.where(
lclistdict['objects']['objectid'] == objid
)
# mark the duplicates, assume the first instance is the actual
# one
for ncounter, nind in enumerate(objid_inds[0][1:]):
lclistdict['objects']['objectid'][nind] = '%s-%s' % (
lclistdict['objects']['objectid'][nind],
ncounter+2
)
LOGWARNING(
'tagging duplicated instance %s of objectid: '
'%s as %s-%s, lightcurve: %s' %
(ncounter+2, objid, objid, ncounter+2,
lclistdict['objects']['lcfname'][nind])
)
# if we're supposed to make a spatial index, do so
if (makecoordindex and
isinstance(makecoordindex, (list, tuple)) and
len(makecoordindex) == 2):
try:
# deref the column names
racol, declcol = makecoordindex
racol = racol.split('.')[-1]
declcol = declcol.split('.')[-1]
# get the ras and decls
objra, objdecl = (lclistdict['objects'][racol],
lclistdict['objects'][declcol])
# get the xyz unit vectors from ra,decl
# since i had to remind myself:
# https://en.wikipedia.org/wiki/Equatorial_coordinate_system
cosdecl = np.cos(np.radians(objdecl))
sindecl = np.sin(np.radians(objdecl))
cosra = np.cos(np.radians(objra))
sinra = np.sin(np.radians(objra))
xyz = np.column_stack((cosra*cosdecl,sinra*cosdecl, sindecl))
# generate the kdtree
kdt = sps.cKDTree(xyz,copy_data=True)
# put the tree into the dict
lclistdict['kdtree'] = kdt
LOGINFO('kdtree generated for (ra, decl): (%s, %s)' %
(makecoordindex[0], makecoordindex[1]))
except Exception:
LOGEXCEPTION('could not make kdtree for (ra, decl): (%s, %s)' %
(makecoordindex[0], makecoordindex[1]))
raise
# generate the xy pairs if fieldfits is not None
if field_fitsfile and os.path.exists(field_fitsfile):
# read in the FITS file
if field_wcsfrom is None:
hdulist = pyfits.open(field_fitsfile)
hdr = hdulist[0].header
hdulist.close()
w = WCS(hdr)
wcsok = True
elif os.path.exists(field_wcsfrom):
w = WCS(field_wcsfrom)
wcsok = True
else:
LOGERROR('could not determine WCS info for input FITS: %s' %
field_fitsfile)
wcsok = False
if wcsok:
# first, transform the ra/decl to x/y and put these in the
# lclist output dict
radecl = np.column_stack((objra, objdecl))
lclistdict['objects']['framexy'] = w.all_world2pix(
radecl,
1
)
# next, we'll make a PNG plot for the finder
finder_outfile = os.path.join(
os.path.dirname(outfile),
os.path.splitext(os.path.basename(outfile))[0] + '.png'
)
finder_png = fits_finder_chart(
field_fitsfile,
finder_outfile,
wcsfrom=field_wcsfrom,
scale=field_scale,
stretch=field_stretch,
colormap=field_colormap,
findersize=field_findersize,
overlay_ra=objra,
overlay_decl=objdecl,
overlay_pltopts=field_pltopts,
overlay_zoomcontain=field_zoomcontain,
grid=field_grid,
gridcolor=field_gridcolor
)
if finder_png is not None:
LOGINFO('generated a finder PNG '
'with an object position overlay '
'for this LC list: %s' % finder_png)
# write the pickle
with open(outfile,'wb') as outfd:
pickle.dump(lclistdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
LOGINFO('done. LC info -> %s' % outfile)
return outfile
else:
LOGERROR('no files found in %s matching %s' % (basedir, fileglob))
return None
def filter_lclist(lc_catalog,
objectidcol='objectid',
racol='ra',
declcol='decl',
xmatchexternal=None,
xmatchdistarcsec=3.0,
externalcolnums=(0,1,2),
externalcolnames=('objectid','ra','decl'),
externalcoldtypes='U20,f8,f8',
externalcolsep=None,
externalcommentchar='#',
conesearch=None,
conesearchworkers=1,
columnfilters=None,
field_fitsfile=None,
field_wcsfrom=None,
field_scale=ZScaleInterval(),
field_stretch=LinearStretch(),
field_colormap=plt.cm.gray_r,
field_findersize=None,
field_pltopts={'marker':'o',
'markersize':10.0,
'markerfacecolor':'none',
'markeredgewidth':2.0,
'markeredgecolor':'red'},
field_grid=False,
field_gridcolor='k',
field_zoomcontain=True,
copylcsto=None):
'''This is used to perform cone-search, cross-match, and column-filter
operations on a light curve catalog generated by `make_lclist`.
Uses the output of `make_lclist` above. This function returns a list of
light curves matching various criteria specified by the `xmatchexternal`,
`conesearch`, and `columnfilters kwargs`. Use this function to generate
input lists for other lcproc functions,
e.g. `lcproc.lcvfeatures.parallel_varfeatures`,
`lcproc.periodfinding.parallel_pf`, and `lcproc.lcbin.parallel_timebin`,
among others.
The operations are applied in this order if more than one is specified:
`xmatchexternal` -> `conesearch` -> `columnfilters`. All results from these
operations are joined using a logical AND operation.
Parameters
----------
objectidcol : str
This is the name of the object ID column in the light curve catalog.
racol : str
This is the name of the RA column in the light curve catalog.
declcol : str
This is the name of the Dec column in the light curve catalog.
xmatchexternal : str or None
If provided, this is the filename of a text file containing objectids,
ras and decs to match the objects in the light curve catalog to by their
positions.
xmatchdistarcsec : float
This is the distance in arcseconds to use when cross-matching to the
external catalog in `xmatchexternal`.
externalcolnums : sequence of int
This a list of the zero-indexed column numbers of columns to extract
from the external catalog file.
externalcolnames : sequence of str
This is a list of names of columns that will be extracted from the
external catalog file. This is the same length as
`externalcolnums`. These must contain the names provided as the
`objectid`, `ra`, and `decl` column names so this function knows which
column numbers correspond to those columns and can use them to set up
the cross-match.
externalcoldtypes : str
This is a CSV string containing numpy dtype definitions for all columns
listed to extract from the external catalog file. The number of dtype
definitions should be equal to the number of columns to extract.
externalcolsep : str or None
The column separator to use when extracting columns from the external
catalog file. If None, any whitespace between columns is used as the
separator.
externalcommentchar : str
The character indicating that a line in the external catalog file is to
be ignored.
conesearch : list of float
This is used to specify cone-search parameters. It should be a three
element list:
[center_ra_deg, center_decl_deg, search_radius_deg]
conesearchworkers : int
The number of parallel workers to launch for the cone-search operation.
columnfilters : list of str
This is a list of strings indicating any filters to apply on each column
in the light curve catalog. All column filters are applied in the
specified sequence and are combined with a logical AND operator. The
format of each filter string should be:
'<lc_catalog column>|<operator>|<operand>'
where:
- <lc_catalog column> is a column in the lc_catalog pickle file
- <operator> is one of: 'lt', 'gt', 'le', 'ge', 'eq', 'ne', which
correspond to the usual operators: <, >, <=, >=, ==, != respectively.
- <operand> is a float, int, or string.
field_fitsfile : str or None
If this is not None, it should be the path to a FITS image containing
the objects these light curves are for. If this is provided,
`make_lclist` will use the WCS information in the FITS itself if
`field_wcsfrom` is None (or from a WCS header file pointed to by
`field_wcsfrom`) to obtain x and y pixel coordinates for all of the
objects in the field. A finder chart will also be made using
`astrobase.plotbase.fits_finder_chart` using the corresponding
`field_scale`, `_stretch`, `_colormap`, `_findersize`, `_pltopts`,
`_grid`, and `_gridcolors` kwargs for that function, reproduced here to
enable customization of the finder chart plot.
field_wcsfrom : str or None
If `wcsfrom` is None, the WCS to transform the RA/Dec to pixel x/y will
be taken from the FITS header of `fitsfile`. If this is not None, it
must be a FITS or similar file that contains a WCS header in its first
extension.
field_scale : astropy.visualization.Interval object
`scale` sets the normalization for the FITS pixel values. This is an
astropy.visualization Interval object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
field_stretch : astropy.visualization.Stretch object
`stretch` sets the stretch function for mapping FITS pixel values to
output pixel values. This is an astropy.visualization Stretch object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
field_colormap : matplotlib Colormap object
`colormap` is a matplotlib color map object to use for the output image.
field_findersize : None or tuple of two ints
If `findersize` is None, the output image size will be set by the NAXIS1
and NAXIS2 keywords in the input `fitsfile` FITS header. Otherwise,
`findersize` must be a tuple with the intended x and y size of the image
in inches (all output images will use a DPI = 100).
field_pltopts : dict
`field_pltopts` controls how the overlay points will be plotted. This
a dict with standard matplotlib marker, etc. kwargs as key-val pairs,
e.g. 'markersize', 'markerfacecolor', etc. The default options make red
outline circles at the location of each object in the overlay.
field_grid : bool
`grid` sets if a grid will be made on the output image.
field_gridcolor : str
`gridcolor` sets the color of the grid lines. This is a usual matplotib
color spec string.
field_zoomcontain : bool
`field_zoomcontain` controls if the finder chart will be zoomed to
just contain the overlayed points. Everything outside the footprint of
these points will be discarded.
copylcsto : str
If this is provided, it is interpreted as a directory target to copy
all the light curves that match the specified conditions.
Returns
-------
tuple
Returns a two elem tuple: (matching_object_lcfiles, matching_objectids)
if conesearch and/or column filters are used. If `xmatchexternal` is
also used, a three-elem tuple is returned: (matching_object_lcfiles,
matching_objectids, extcat_matched_objectids).
'''
with open(lc_catalog,'rb') as infd:
lclist = pickle.load(infd)
# generate numpy arrays of the matching object indexes. we do it this way so
# we can AND everything at the end, instead of having to look up the objects
# at these indices and running the columnfilter on them
xmatch_matching_index = np.full_like(lclist['objects'][objectidcol],
False,
dtype=np.bool)
conesearch_matching_index = np.full_like(lclist['objects'][objectidcol],
False,
dtype=np.bool)
# do the xmatch first
ext_matches = []
ext_matching_objects = []
if (xmatchexternal and
isinstance(xmatchexternal, str) and
os.path.exists(xmatchexternal)):
try:
# read in the external file
extcat = np.genfromtxt(xmatchexternal,
usecols=externalcolnums,
delimiter=externalcolsep,
names=externalcolnames,
dtype=externalcoldtypes,
comments=externalcommentchar)
ext_cosdecl = np.cos(np.radians(extcat['decl']))
ext_sindecl = np.sin(np.radians(extcat['decl']))
ext_cosra = np.cos(np.radians(extcat['ra']))
ext_sinra = np.sin(np.radians(extcat['ra']))
ext_xyz = np.column_stack((ext_cosra*ext_cosdecl,
ext_sinra*ext_cosdecl,
ext_sindecl))
ext_xyzdist = 2.0 * np.sin(np.radians(xmatchdistarcsec/3600.0)/2.0)
# get our kdtree
our_kdt = lclist['kdtree']
# get the external kdtree
ext_kdt = sps.cKDTree(ext_xyz)
# do a query_ball_tree
extkd_matchinds = ext_kdt.query_ball_tree(our_kdt, ext_xyzdist)
for extind, mind in enumerate(extkd_matchinds):
if len(mind) > 0:
ext_matches.append(mind[0])
# get the whole matching row for the ext objects recarray
ext_matching_objects.append(extcat[extind])
ext_matches = np.array(ext_matches)
if ext_matches.size > 0:
# update the xmatch_matching_index
xmatch_matching_index[ext_matches] = True
LOGINFO('xmatch: objects matched to %s within %.1f arcsec: %s' %
(xmatchexternal, xmatchdistarcsec, ext_matches.size))
else:
LOGERROR("xmatch: no objects were cross-matched to external "
"catalog spec: %s, can't continue" % xmatchexternal)
return None, None, None
except Exception:
LOGEXCEPTION('could not match to external catalog spec: %s' %
repr(xmatchexternal))
raise
# do the cone search next
if (conesearch and
isinstance(conesearch, (list, tuple)) and
len(conesearch) == 3):
try:
racenter, declcenter, searchradius = conesearch
cosdecl = np.cos(np.radians(declcenter))
sindecl = np.sin(np.radians(declcenter))
cosra = np.cos(np.radians(racenter))
sinra = np.sin(np.radians(racenter))
# this is the search distance in xyz unit vectors
xyzdist = 2.0 * np.sin(np.radians(searchradius)/2.0)
# get the kdtree
our_kdt = lclist['kdtree']
# look up the coordinates
kdtindices = our_kdt.query_ball_point([cosra*cosdecl,
sinra*cosdecl,
sindecl],
xyzdist,
n_jobs=conesearchworkers)
if kdtindices and len(kdtindices) > 0:
LOGINFO('cone search: objects within %.4f deg '
'of (%.3f, %.3f): %s' %
(searchradius, racenter, declcenter, len(kdtindices)))
# update the conesearch_matching_index
matchingind = kdtindices
conesearch_matching_index[np.array(matchingind)] = True
# we fail immediately if we found nothing. this assumes the user
# cares more about the cone-search than the regular column filters
else:
LOGERROR("cone-search: no objects were found within "
"%.4f deg of (%.3f, %.3f): %s, can't continue" %
(searchradius, racenter, declcenter, len(kdtindices)))
return None, None
except Exception:
LOGEXCEPTION('cone-search: could not run a cone-search, '
'is there a kdtree present in %s?' % lc_catalog)
raise
# now that we're done with cone-search, do the column filtering
allfilterinds = []
if columnfilters and isinstance(columnfilters, list):
# go through each filter
for cfilt in columnfilters:
try:
fcol, foperator, foperand = cfilt.split('|')
foperator = FILTEROPS[foperator]
# generate the evalstring
filterstr = (
"np.isfinite(lclist['objects']['%s']) & "
"(lclist['objects']['%s'] %s %s)"
) % (fcol, fcol, foperator, foperand)
filterind = eval(filterstr)
ngood = lclist['objects'][objectidcol][filterind].size
LOGINFO('filter: %s -> objects matching: %s ' % (cfilt, ngood))
allfilterinds.append(filterind)
except Exception:
LOGEXCEPTION('filter: could not understand filter spec: %s'
% cfilt)
LOGWARNING('filter: not applying this broken filter')
# now that we have all the filter indices good to go
# logical-AND all the things
# make sure we only do filtering if we were told to do so
if (xmatchexternal or conesearch or columnfilters):
filterstack = []
if xmatchexternal:
filterstack.append(xmatch_matching_index)
if conesearch:
filterstack.append(conesearch_matching_index)
if columnfilters:
filterstack.extend(allfilterinds)
finalfilterind = np.column_stack(filterstack)
finalfilterind = np.all(finalfilterind, axis=1)
# get the filtered object light curves and object names
filteredobjectids = lclist['objects'][objectidcol][finalfilterind]
filteredlcfnames = lclist['objects']['lcfname'][finalfilterind]
else:
filteredobjectids = lclist['objects'][objectidcol]
filteredlcfnames = lclist['objects']['lcfname']
# if we're told to make a finder chart with the selected objects
if field_fitsfile is not None and os.path.exists(field_fitsfile):
# get the RA and DEC of the matching objects
matching_ra = lclist['objects'][racol][finalfilterind]
matching_decl = lclist['objects'][declcol][finalfilterind]
matching_postfix = []
if xmatchexternal is not None:
matching_postfix.append(
'xmatch_%s' %
os.path.splitext(os.path.basename(xmatchexternal))[0]
)
if conesearch is not None:
matching_postfix.append('conesearch_RA%.3f_DEC%.3f_RAD%.5f' %
tuple(conesearch))
if columnfilters is not None:
for cfi, cf in enumerate(columnfilters):
if cfi == 0:
matching_postfix.append('filter_%s_%s_%s' %
tuple(cf.split('|')))
else:
matching_postfix.append('_and_%s_%s_%s' %
tuple(cf.split('|')))
if len(matching_postfix) > 0:
matching_postfix = '-%s' % '_'.join(matching_postfix)
else:
matching_postfix = ''
# next, we'll make a PNG plot for the finder
finder_outfile = os.path.join(
os.path.dirname(lc_catalog),
'%s%s.png' %
(os.path.splitext(os.path.basename(lc_catalog))[0],
matching_postfix)
)
finder_png = fits_finder_chart(
field_fitsfile,
finder_outfile,
wcsfrom=field_wcsfrom,
scale=field_scale,
stretch=field_stretch,
colormap=field_colormap,
findersize=field_findersize,
overlay_ra=matching_ra,
overlay_decl=matching_decl,
overlay_pltopts=field_pltopts,
field_zoomcontain=field_zoomcontain,
grid=field_grid,
gridcolor=field_gridcolor
)
if finder_png is not None:
LOGINFO('generated a finder PNG '
'with an object position overlay '
'for this filtered LC list: %s' % finder_png)
# if copylcsto is not None, copy LCs over to it
if copylcsto is not None:
if not os.path.exists(copylcsto):
os.mkdir(copylcsto)
if TQDM:
lciter = tqdm(filteredlcfnames)
else:
lciter = filteredlcfnames
LOGINFO('copying matching light curves to %s' % copylcsto)
for lc in lciter:
shutil.copy(lc, copylcsto)
LOGINFO('done. objects matching all filters: %s' % filteredobjectids.size)
if xmatchexternal and len(ext_matching_objects) > 0:
return filteredlcfnames, filteredobjectids, ext_matching_objects
else:
return filteredlcfnames, filteredobjectids
############################################################
## ADDING CHECKPLOT INFO BACK TO THE LIGHT CURVE CATALOGS ##
############################################################
def _cpinfo_key_worker(task):
'''This wraps `checkplotlist.checkplot_infokey_worker`.
This is used to get the correct dtype for each element in retrieved results.
Parameters
----------
task : tuple
task[0] = cpfile
task[1] = keyspeclist (infokeys kwarg from `add_cpinfo_to_lclist`)
Returns
-------
dict
All of the requested keys from the checkplot are returned along with
their values in a dict.
'''
cpfile, keyspeclist = task
keystoget = [x[0] for x in keyspeclist]
nonesubs = [x[-2] for x in keyspeclist]
nansubs = [x[-1] for x in keyspeclist]
# reform the keystoget into a list of lists
for i, k in enumerate(keystoget):
thisk = k.split('.')
thisk = [(int(x) if x.isdecimal() else x) for x in thisk]
keystoget[i] = thisk
# add in the objectid as well to match to the object catalog later
keystoget.insert(0,['objectid'])
nonesubs.insert(0, '')
nansubs.insert(0,'')
# get all the keys we need
vals = checkplot_infokey_worker((cpfile, keystoget))
# if they have some Nones, nans, etc., reform them as expected
for val, nonesub, nansub, valind in zip(vals, nonesubs,
nansubs, range(len(vals))):
if val is None:
outval = nonesub
elif isinstance(val, float) and not np.isfinite(val):
outval = nansub
elif isinstance(val, (list, tuple)):
outval = ', '.join(val)
else:
outval = val
vals[valind] = outval
return vals
CPINFO_DEFAULTKEYS = [
# key, dtype, first level, overwrite=T|append=F, None sub, nan sub
('comments',
np.unicode_, False, True, '', ''),
('objectinfo.objecttags',
np.unicode_, True, True, '', ''),
('objectinfo.twomassid',
np.unicode_, True, True, '', ''),
('objectinfo.bmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.vmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.rmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.imag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.jmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.hmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.kmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.sdssu',
np.float_, True, True, np.nan, np.nan),
('objectinfo.sdssg',
np.float_, True, True, np.nan, np.nan),
('objectinfo.sdssr',
np.float_, True, True, np.nan, np.nan),
('objectinfo.sdssi',
np.float_, True, True, np.nan, np.nan),
('objectinfo.sdssz',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_bmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_vmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_rmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_imag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_jmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_hmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_kmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_sdssu',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_sdssg',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_sdssr',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_sdssi',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_sdssz',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_bmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_vmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_rmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_imag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_jmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_hmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_kmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_sdssu',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_sdssg',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_sdssr',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_sdssi',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_sdssz',
np.float_, True, True, np.nan, np.nan),
('objectinfo.color_classes',
np.unicode_, True, True, '', ''),
('objectinfo.pmra',
np.float_, True, True, np.nan, np.nan),
('objectinfo.pmdecl',
np.float_, True, True, np.nan, np.nan),
('objectinfo.propermotion',
np.float_, True, True, np.nan, np.nan),
('objectinfo.rpmj',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gl',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gb',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gaia_status',
np.unicode_, True, True, '', ''),
('objectinfo.gaia_ids.0',
np.unicode_, True, True, '', ''),
('objectinfo.gaiamag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gaia_parallax',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gaia_parallax_err',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gaia_absmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.simbad_best_mainid',
np.unicode_, True, True, '', ''),
('objectinfo.simbad_best_objtype',
np.unicode_, True, True, '', ''),
('objectinfo.simbad_best_allids',
np.unicode_, True, True, '', ''),
('objectinfo.simbad_best_distarcsec',
np.float_, True, True, np.nan, np.nan),
#
# TIC info
#
('objectinfo.ticid',
np.unicode_, True, True, '', ''),
('objectinfo.tic_version',
np.unicode_, True, True, '', ''),
('objectinfo.tessmag',
np.float_, True, True, np.nan, np.nan),
#
# variability info
#
('varinfo.vartags',
np.unicode_, False, True, '', ''),
('varinfo.varperiod',
np.float_, False, True, np.nan, np.nan),
('varinfo.varepoch',
np.float_, False, True, np.nan, np.nan),
('varinfo.varisperiodic',
np.int_, False, True, 0, 0),
('varinfo.objectisvar',
np.int_, False, True, 0, 0),
('varinfo.features.median',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.mad',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.stdev',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.mag_iqr',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.skew',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.kurtosis',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.stetsonj',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.stetsonk',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.eta_normal',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.linear_fit_slope',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.magnitude_ratio',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.beyond1std',
np.float_, False, True, np.nan, np.nan)
]
def add_cpinfo_to_lclist(
checkplots, # list or a directory path
initial_lc_catalog,
magcol, # to indicate checkplot magcol
outfile,
checkplotglob='checkplot*.pkl*',
infokeys=CPINFO_DEFAULTKEYS,
nworkers=NCPUS
):
'''This adds checkplot info to the initial light curve catalogs generated by
`make_lclist`.
This is used to incorporate all the extra info checkplots can have for
objects back into columns in the light curve catalog produced by
`make_lclist`. Objects are matched between the checkplots and the light
curve catalog using their `objectid`. This then allows one to search this
'augmented' light curve catalog by these extra columns. The 'augmented'
light curve catalog also forms the basis for search interface provided by
the LCC-Server.
The default list of keys that will be extracted from a checkplot and added
as columns in the initial light curve catalog is listed above in the
`CPINFO_DEFAULTKEYS` list.
Parameters
----------
checkplots : str or list
If this is a str, is interpreted as a directory which will be searched
for checkplot pickle files using `checkplotglob`. If this is a list, it
will be interpreted as a list of checkplot pickle files to process.
initial_lc_catalog : str
This is the path to the light curve catalog pickle made by
`make_lclist`.
magcol : str
This is used to indicate the light curve magnitude column to extract
magnitude column specific information. For example, Stetson variability
indices can be generated using magnitude measurements in separate
photometric apertures, which appear in separate `magcols` in the
checkplot. To associate each such feature of the object with its
specific `magcol`, pass that `magcol` in here. This `magcol` will then
be added as a prefix to the resulting column in the 'augmented' LC
catalog, e.g. Stetson J will appear as `magcol1_stetsonj` and
`magcol2_stetsonj` for two separate magcols.
outfile : str
This is the file name of the output 'augmented' light curve catalog
pickle file that will be written.
infokeys : list of tuples
This is a list of keys to extract from the checkplot and some info on
how this extraction is to be done. Each key entry is a six-element
tuple of the following form:
- key name in the checkplot
- numpy dtype of the value of this key
- False if key is associated with a magcol or True otherwise
- False if subsequent updates to the same column name will append to
existing key values in the output augmented light curve catalog or
True if these will overwrite the existing key value
- character to use to substitute a None value of the key in the
checkplot in the output light curve catalog column
- character to use to substitute a nan value of the key in the
checkplot in the output light curve catalog column
See the `CPFINFO_DEFAULTKEYS` list above for examples.
nworkers : int
The number of parallel workers to launch to extract checkplot
information.
Returns
-------
str
Returns the path to the generated 'augmented' light curve catalog pickle
file.
'''
# get the checkplots from the directory if one is provided
if not isinstance(checkplots, list) and os.path.exists(checkplots):
checkplots = sorted(glob.glob(os.path.join(checkplots, checkplotglob)))
tasklist = [(cpf, infokeys) for cpf in checkplots]
with ProcessPoolExecutor(max_workers=nworkers) as executor:
resultfutures = executor.map(_cpinfo_key_worker, tasklist)
results = list(resultfutures)
executor.shutdown()
# now that we have all the checkplot info, we need to match to the
# objectlist in the lclist
# open the lclist
with open(initial_lc_catalog,'rb') as infd:
lc_catalog = pickle.load(infd)
# convert the lc_catalog['columns'] item to a list if it's not
# this is so we can append columns to it later
lc_catalog['columns'] = list(lc_catalog['columns'])
catalog_objectids = np.array(lc_catalog['objects']['objectid'])
checkplot_objectids = np.array([x[0] for x in results])
# add the extra key arrays in the lclist dict
extrainfokeys = []
actualkeys = []
# set up the extrainfokeys list
for keyspec in infokeys:
key, dtype, firstlevel, overwrite_append, nonesub, nansub = keyspec
if firstlevel:
eik = key
else:
eik = '%s.%s' % (magcol, key)
extrainfokeys.append(eik)
# now handle the output dicts and column list
eactual = eik.split('.')
# this handles dereferenced list indices
if not eactual[-1].isdigit():
if not firstlevel:
eactual = '.'.join([eactual[0], eactual[-1]])
else:
eactual = eactual[-1]
else:
elastkey = eactual[-2]
# for list columns, this converts stuff like errs -> err,
# and parallaxes -> parallax
if elastkey.endswith('es'):
elastkey = elastkey[:-2]
elif elastkey.endswith('s'):
elastkey = elastkey[:-1]
if not firstlevel:
eactual = '.'.join([eactual[0], elastkey])
else:
eactual = elastkey
actualkeys.append(eactual)
# add a new column only if required
if eactual not in lc_catalog['columns']:
lc_catalog['columns'].append(eactual)
# we'll overwrite earlier existing columns in any case
lc_catalog['objects'][eactual] = []
# now go through each objectid in the catalog and add the extra keys to
# their respective arrays
for catobj in tqdm(catalog_objectids):
cp_objind = np.where(checkplot_objectids == catobj)
if len(cp_objind[0]) > 0:
# get the info line for this checkplot
thiscpinfo = results[cp_objind[0][0]]
# the first element is the objectid which we remove
thiscpinfo = thiscpinfo[1:]
# update the object catalog entries for this object
for ekind, ek in enumerate(actualkeys):
# add the actual thing to the output list
lc_catalog['objects'][ek].append(
thiscpinfo[ekind]
)
else:
# update the object catalog entries for this object
for ekind, ek in enumerate(actualkeys):
thiskeyspec = infokeys[ekind]
nonesub = thiskeyspec[-2]
lc_catalog['objects'][ek].append(
nonesub
)
# now we should have all the new keys in the object catalog
# turn them into arrays
for ek in actualkeys:
lc_catalog['objects'][ek] = np.array(
lc_catalog['objects'][ek]
)
# add the magcol to the lc_catalog
if 'magcols' in lc_catalog:
if magcol not in lc_catalog['magcols']:
lc_catalog['magcols'].append(magcol)
else:
lc_catalog['magcols'] = [magcol]
# write back the new object catalog
with open(outfile, 'wb') as outfd:
pickle.dump(lc_catalog, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return outfile
|
mit
|
liyu1990/sklearn
|
sklearn/tests/test_cross_validation.py
|
20
|
46586
|
"""Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
|
bsd-3-clause
|
New-College-of-Florida/Jonathan-Niles-Thesis
|
code/genes/preprocess.py
|
1
|
3749
|
#!/usr/bin/env python
"""
preprocess.py
preprocess the raw expression values from Affy U+133v2 arrays and aligns them
with the gene dataset from biomart.
Copyright (C) 2015 Jonathan Niles
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function
import numpy as np
import nutils as nu
from matplotlib.mlab import csv2rec, rec2csv
from numpy.lib.recfunctions import append_fields
def transformData():
"""preprocessing"""
names = [
'name', # Associated Gene Name
'strand',
'end', # Gene End (bp)
'start', # Gene Start (bp)
'ensemble_id', # Ensembl Gene ID
'affy_id', # Affy HG U133-PLUS-2 probeset
'chrom' # Chromosome Name
]
fname = nu.join(nu.sync, 'data/genes/affy_gene.txt')
print("Loading gene data from", fname)
genes = csv2rec(fname, delimiter='\t', names=names, skiprows=1)
# add fields for the new data sets
shape, = genes.shape
genes = append_fields(genes, 'hesc_1', np.zeros(shape), usemask=False)
genes = append_fields(genes, 'hesc_2', np.zeros(shape), usemask=False)
genes = append_fields(genes, 'imr90_1', np.zeros(shape), usemask=False)
genes = append_fields(genes, 'imr90_2', np.zeros(shape), usemask=False)
# load data
print("Loading cell line probe sets")
stem = nu.join(nu.sync, 'data/genes/')
hesc_1 = csv2rec(nu.join(stem, 'hesc/GSM1309417.txt'), delimiter='\t')
hesc_2 = csv2rec(nu.join(stem, 'hesc/GSM1309418.txt'), delimiter='\t')
imr90_1 = csv2rec(nu.join(stem, 'imr90/GSM51626.txt'), delimiter='\t')
imr90_2 = csv2rec(nu.join(stem, 'imr90/GSM51627.txt'), delimiter='\t')
print("Sorting probe sets")
id = 'id_ref'
hesc_1.sort(order=id) # speed gains!
hesc_2.sort(order=id)
imr90_1.sort(order=id)
imr90_2.sort(order=id)
print("Assigning probe expression values")
v = 'value' # for conciseness
for i, probe in enumerate(genes['affy_id']):
genes[i]['hesc_1'] = hesc_1[hesc_1[id] == probe][v]
genes[i]['hesc_2'] = hesc_2[hesc_2[id] == probe][v]
genes[i]['imr90_1'] = imr90_1[imr90_1[id] == probe][v]
genes[i]['imr90_2'] = imr90_2[imr90_2[id] == probe][v]
print("Adjusting chromosome labels")
# makes sure chromosmes are not just number
dtypes = np.dtype([
('name', 'S28'),
('strand', '<i8'),
('end', '<i8'),
('start', '<i8'),
('ensemble_id', 'S15'),
('affy_id', 'S27'),
('chrom', 'S5'),
('hesc_1', '<f8'),
('hesc_2', '<f8'),
('imr90_1', '<f8'),
('imr90_2', '<f8')
])
genes = genes.astype(dtypes)
genes['chrom'] = np.array(map(lambda x: 'chr'+x, genes['chrom']))
# reorder columns so this fits as a .bed file
cols = ['chrom', 'start', 'end', 'ensemble_id', 'strand', 'name', 'hesc_1', 'hesc_2', 'imr90_1', 'imr90_2']
bed = genes[cols]
fname = nu.join(nu.sync, 'data/genes/expression.bed.gz')
print("Writing new file to", fname)
rec2csv(bed, fname, delimiter='\t')
return
if __name__ == "__main__":
transformData()
|
gpl-3.0
|
iandriver/SCICAST
|
scicast/correlation.py
|
1
|
7909
|
import numpy as np
import pandas as pd
import os
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator
import seaborn as sns
from operator import itemgetter
import matplotlib.ticker as ticker
import math
import matplotlib.patches as patches
#run correlation matrix and save only those above threshold
def run_corr(args, df_by_gene, title, path_filename, method_name='pearson', sig_threshold= 0.5, min_period=3, save_corrs=False):
try:
from .dim_reduction import return_top_pca_gene
except (SystemError, ValueError, ImportError):
from dim_reduction import return_top_pca_gene
if len(df_by_gene.columns.tolist())>5000:
df_by_gene, top_pca_list = return_top_pca_gene(args, df_by_gene.transpose(), user_num_genes=5000)
if method_name != 'kendall':
corr_by_gene = df_by_gene.corr(method=method_name, min_periods=min_period)
else:
corr_by_gene = df_by_gene.corr(method=method_name)
cor = corr_by_gene
cor.loc[:,:] = np.tril(cor.values, k=-1)
cor = cor.stack()
corr_by_gene_pos = cor[cor >=sig_threshold]
corr_by_gene_neg = cor[cor <=(sig_threshold*-1)]
cor_pos_df = pd.DataFrame(corr_by_gene_pos)
cor_neg_df = pd.DataFrame(corr_by_gene_neg)
sig_corr = cor_pos_df.append(cor_neg_df)
sig_corrs = pd.DataFrame(sig_corr[0], columns=["corr"])
sig_corrs.to_csv(os.path.join(path_filename, title+'_counts_corr_sig_'+method_name+'.txt'), sep = '\t')
return sig_corrs
#finds most correlated gene groups that are not overlapping
def find_top_corrs(terms_to_search, sig_corrs, num_to_return, gene_corr_list = []):
all_corrs_list = []
best_corrs_list = []
for term_to_search in terms_to_search:
corr_tup = [(term_to_search, 1)]
for index, row in sig_corrs.iterrows():
if term_to_search in index:
if index[0]==term_to_search:
corr_tup.append((index[1],row['corr']))
else:
corr_tup.append((index[0],row['corr']))
all_corrs_list.append(corr_tup)
all_corrs_list.sort(key=len, reverse=True)
good_count = 0
len_count = 0
corr_genes_seen = []
while good_count <= num_to_return and len_count <= len(all_corrs_list):
for i, corrs in enumerate(all_corrs_list):
len_count+=1
if corrs[0][0] not in corr_genes_seen:
best_corrs_list.append(corrs)
good_count+=1
for g, c in corrs:
if g not in corr_genes_seen and '-' not in str(c):
corr_genes_seen.append(g)
if gene_corr_list != []:
search_corrs = []
for term in gene_corr_list:
corr_tup = [(term, 1)]
for index, row in sig_corrs.iterrows():
if term in index:
if index[0]==term:
corr_tup.append((index[1],row['corr']))
else:
corr_tup.append((index[0],row['corr']))
search_corrs.append(corr_tup)
best_corrs_list = search_corrs+best_corrs_list
return best_corrs_list[0:num_to_return+len(gene_corr_list)+1]
else:
return best_corrs_list[0:num_to_return]
#corr_plot finds and plots all correlated genes, log turns on log scale, sort plots the genes in the rank order of the gene searched
def corr_plot(terms_to_search, df_by_gene_corr, args, matrix_data, title ='', sort=True, sig_threshold=0.5):
path_filename = matrix_data.new_filepath
#if there are genes supplied with genes_corr flag process them to a list for correlation search
if args.genes_corr != '':
gene_corr_list = args.genes_corr.split(',')
#otherwise pass an empty list
else:
gene_corr_list = []
size_cells = len(df_by_gene_corr.index.tolist())
figlen=int(size_cells/11)
if figlen < 15:
figlen = 15
ncol = int(figlen/3.2)
if size_cells <100:
sig_threshold = -0.137*math.log(size_cells)+1.1322
sig_corrs = run_corr(args, df_by_gene_corr, title, path_filename, sig_threshold=sig_threshold)
corr_list = find_top_corrs(terms_to_search, sig_corrs, num_to_return=3, gene_corr_list=gene_corr_list)
for corr_tup in corr_list:
term_to_search = corr_tup[0][0]
corr_tup.sort(key=itemgetter(1), reverse=True)
corr_df = pd.DataFrame(corr_tup, columns=['GeneID', 'Correlation'])
corr_df.to_csv(os.path.join(matrix_data.new_filepath, title+'_Corr_w_'+term_to_search+'_list.txt'), sep = '\t', index=False)
to_plot = [x[0] for x in corr_tup]
sns.set_palette(sns.cubehelix_palette(len(to_plot), start=1, rot=-.9, reverse=True))
sns.set_context("notebook", font_scale=.9, rc={"lines.linewidth": 1})
try:
sorted_df = df_by_gene_corr.sort_values(by=[term_to_search])
ylabel='Counts (log2)'
if sort:
ax = sorted_df[to_plot].plot(figsize = (figlen,10))
xlabels = sorted_df[to_plot].index.values
else:
ax = df_by_gene_corr[to_plot].plot(figsize = (figlen,10))
xlabels = df_by_gene_corr[to_plot].index.values
ax.set_xlabel('Cell Label')
ax.set_ylabel(ylabel)
ax.set_title('Correlates with '+term_to_search, loc='right')
ax.xaxis.set_minor_locator(LinearLocator(numticks=len(xlabels)))
if matrix_data.cell_label_map:
ax.set_xticklabels(xlabels, minor=True, rotation='vertical', fontsize=3)
Xcolors = [matrix_data.cell_label_map[cell][0][0] for cell in xlabels]
group_labels = [matrix_data.cell_label_map[cell][0][2] for cell in xlabels]
group_seen = []
leg_handles = []
for xtick, xcolor, group_name in zip(ax.get_xticklabels(which='minor'), Xcolors, group_labels):
xtick.set_color(xcolor)
xtick.set_rotation(90)
if group_name not in group_seen:
leg_handles.append(patches.Patch(color=xcolor, label=group_name))
group_seen.append(group_name)
else:
ax.set_xticklabels(xlabels, minor=True, rotation='vertical', fontsize=3)
ax.set_ylim([0, df_by_gene_corr[to_plot].values.max()])
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.tick_params(axis='x', which ='minor', labelsize=9)
#scale bbox anchoring to account for number of correlated genes and plot size
if len(corr_tup)>1:
bbox_height = float(1E-13)*pow(len(corr_tup),6) - float(7E-11)*pow(len(corr_tup),5) + float(1E-8)*pow(len(corr_tup),4) - float(8E-7)*pow(len(corr_tup),3) - float(3E-5)*pow(len(corr_tup),2) + 0.0086*len(corr_tup) + 1.0042
else:
bbox_height = 1.05
l_labels = [str(x[0])+' '+"%.2f" % x[1] for x in corr_tup]
if matrix_data.cell_label_map:
first_legend = ax.legend(l_labels, loc='upper left', bbox_to_anchor=(0.01, bbox_height+.1), ncol=ncol, prop={'size':10})
ax = plt.gca().add_artist(first_legend)
plt.legend(handles=leg_handles, loc='upper right', bbox_to_anchor=(0.9, bbox_height+.1))
else:
ax.legend(l_labels, loc='upper left', bbox_to_anchor=(0.01, bbox_height), ncol=ncol, prop={'size':10})
fig = plt.gcf()
fig.subplots_adjust(bottom=0.08, top=0.95, right=0.98, left=0.03)
plt.savefig(os.path.join(path_filename, title+'_corr_with_'+term_to_search+'.'+args.image_format), bbox_inches='tight')
plt.close('all')
except KeyError:
if args.verbose:
print(term_to_search+' not in this matrix.')
pass
|
mit
|
welch/rasl
|
rasl/application.py
|
1
|
6787
|
# -*- coding: utf-8 -*-
# pylint:disable=invalid-name, too-many-arguments
"""Application
commandline batch alignment published as an entry point in setup.py
"""
from __future__ import division, print_function
import os
from argparse import ArgumentParser
from textwrap import dedent
import skimage.io as skio
from skimage.util import img_as_float
import scipy.io as scio
import numpy as np
try:
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
except ImportError:
plt = None
from .version import __version__
from .tform import (EuclideanTransform, SimilarityTransform, AffineTransform,
ProjectiveTransform)
from .rasl import rasl
def load_images(path, suffixes=('jpg', 'gif', 'png', 'bmp'), points_too=False):
"""load an image set from a directory.
Load all images in a directory as float grayscale. Optionally
if MATLAB 'points' files are present (as in the published RASL
data sets[1]), read and return those. These give coordinates of
corresponding points on the batch of images, eg, the outside eye
corners in facial images.
Parameters
----------
path : string
file path to image directory
suffixes : list of string
allowable image suffixes
points_too : bool
if true, read and return any "*.mat" files that are present
Returns
-------
images : list[100] of ndarray(h,v)
dummy images as ndarrays
bounds : list[100] of ndarray(2, 2)
coordinates of eye corner points as columns
References
----------
.. [1] http://perception.csl.illinois.edu/matrix-rank/rasl.html#Code
"""
images = [img_as_float(skio.imread(os.path.join(path, fname), as_grey=True))
for fname in os.listdir(path)
if fname.split('.')[-1] in suffixes]
shapes = np.array([image.shape for image in images])
if np.all(shapes == shapes[0, :]):
print("loaded {} {}x{} images".format(
len(images), images[0].shape[0], images[0].shape[1]))
else:
print("loaded {} images with sizes ranging {},{} -- {},{}".format(
len(images), np.min(shapes[:, 0]), np.min(shapes[:, 1]),
np.max(shapes[:, 0]), np.max(shapes[:, 1])))
if points_too:
points = [scio.loadmat(os.path.join(path, fname))['points']
for fname in os.listdir(path) if fname.endswith('.mat')]
return images, points
else:
return images
def rasl_arg_parser(description, path=None, tform=AffineTransform,
grid=(3, 10), frame=5):
"""standard argument parser for RASL utilities that load an image directory
Parameters
----------
description : string
command description
path : string or None
path to image directory. If provided, becomes the default value
for --path. If None, path is a required commandline argument
tform : TForm
default transform type
grid : tuple(2)
shape of image grid to display
frame : real or real(2) or (real(2), real(2))
crop images to specified frame:
pixel-width of boundary (single number), cropped image
size (tuple, centered) or boundary points (minimum and
maximum points) as pixel offsets into the image, values
ranging [0, max-1]. Negative values are subtracted from
the dimension size, as with python array indexing.
Returns
-------
parser : ArgumentParser
configured argument parser
"""
parser = ArgumentParser(description=description)
parser.set_defaults(tform=tform)
tformspec = parser.add_mutually_exclusive_group()
tformspec.add_argument(
"--euclidean", dest='tform', action='store_const',
const=EuclideanTransform,
help="Align using rotation and translation")
tformspec.add_argument(
"--similarity", dest='tform', action='store_const',
const=SimilarityTransform,
help="Align using a similarity transform (rotate, scale, translate)")
tformspec.add_argument(
"--affine", dest='tform', action='store_const', const=AffineTransform,
help="Align using an affine transform (rotate, shear, translate)")
tformspec.add_argument(
"--projective", dest='tform', action='store_const',
const=ProjectiveTransform,
help="Align using a projective transform (affine + perspective)")
parser.add_argument(
"--grid", type=int, nargs=2, default=grid,
help=dedent("""\
image grid shape (rows cols). Note that the entire set of images
is always aligned, even if --grid only displays a subset of them.
%(default)s"""))
parser.add_argument(
"--stop", type=float, default=0.005,
help="halt when objective changes less than this (%(default)s)")
framespec = parser.add_mutually_exclusive_group()
parser.set_defaults(frame=frame)
framespec.add_argument(
"--inset", type=int, dest='frame', help=dedent("""\
inset images by this many pixels to avoid going out
of bounds during alignment (%(default)s)"""))
framespec.add_argument(
"--crop", type=int, nargs=2, dest='frame', help=dedent("""\
crop the image to a specified size, centered. (height, width)
(%(default)s)"""))
framespec.add_argument(
"--bounds", type=int, nargs=4, dest='frame', help=dedent("""\
crop the image to specified min and max points (vmin hmin vmax hmax)
(%(default)s)"""))
parser.add_argument(
"--noise", type=float, default=0,
help="percentage noise to add to images (%(default)s)")
if path:
parser.add_argument(
"--path", default=path,
help="path to directory of images (%(default)s)")
else:
parser.add_argument(
"path", help="path to directory of images (%(default)s)")
return parser
def demo_cmd(description="load and align images in a directory",
path=None, frame=5, grid=(3, 10), tform=AffineTransform):
"""load and align images in a directory, animating the process
Parameters
----------
see rasl_arg_parser
"""
parser = rasl_arg_parser(description=description, path=path, frame=frame,
grid=grid, tform=tform)
args = parser.parse_args()
Image = load_images(args.path)
if len(Image) < np.prod(args.grid):
raise ValueError("Only {} images, specify a smaller --grid than {}"\
.format(len(Image), args.grid))
T = [args.tform().inset(image.shape, args.frame)
for image in Image]
_ = rasl(Image, T, stop_delta=args.stop, show=args.grid)
print("click the image to exit")
plt.waitforbuttonpress()
|
mit
|
walterreade/scikit-learn
|
examples/linear_model/plot_sparse_recovery.py
|
70
|
7486
|
"""
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/sites/default/files/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.exceptions import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
|
bsd-3-clause
|
diogo149/CauseEffectPairsChallenge
|
code/random_functions.py
|
2
|
10809
|
from __future__ import print_function
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from functools import partial
from multiprocessing import Pool
from utils import binarize, print_current_time, quick_cache
from convert import NumericalToCategorical, CategoricalToNumerical
from decorators import timer
from unary_features import UNARY_FEATURES
from binary_features import BINARY_FEATURES
from regression_machines import REGRESSION_FEATURES
from classification_machines import CLASSIFICATION_FEATURES
def feature_map(func_name, func, list_of_features):
new_features = []
len0 = len(list_of_features[0])
for features in list_of_features:
assert len(features) == len0
for f in zip(*list_of_features):
names, values = zip(*f)
feature_name = names[0]
for n in names:
assert n == feature_name
new_name = "{}_{}".format(feature_name, func_name)
new_features.append((new_name, func(values)))
return new_features
def feature_difference(f1, f2):
def diff(values):
assert len(values) == 2
return values[0] - values[1]
return feature_map("difference", diff, [f1, f2])
def feature_sum(list_of_features):
return feature_map("sum", sum, list_of_features)
def feature_avg(list_of_features):
return feature_map("average", np.mean, list_of_features)
def combine_features(list_of_features):
names = []
values = []
for features in list_of_features:
tmp_names, tmp_values = zip(*features)
names += tmp_names
values += tmp_values
return names, values
def preprend_name(pre, features):
return [("{}_{}".format(pre, name), val) for name, val in features]
def convert_to_categorical(data, data_type):
assert isinstance(data, np.ndarray)
NUM_CATEGORIES = 10
if data_type in ["Binary", "Categorical"]:
return data
elif data_type == "Numerical":
rows = data.shape[0]
new_data = np.zeros(rows)
percentile = step = 100.0 / NUM_CATEGORIES
while percentile < 100.0:
new_data += data > np.percentile(data, percentile)
percentile += step
return new_data
else:
raise Exception
def convert_to_numerical(data, data_type):
assert isinstance(data, np.ndarray)
if data_type == "Binary":
return [data]
elif data_type == "Numerical":
ss = StandardScaler()
return [ss.fit_transform(data)]
elif data_type == "Categorical":
binarized = binarize(data)
assert binarized.shape[0] == data.shape[0]
return list(binarized.T)
else:
raise Exception
def preprocess(store):
FEATURES = BINARY_FEATURES + UNARY_FEATURES + REGRESSION_FEATURES + CLASSIFICATION_FEATURES
pool = Pool()
V2_cache = quick_cache("create_V2_cache" + str(store.raw.shape), create_V2_cache, store.raw, pool)
# V2_cache = create_V2_cache(store.raw, pool)
features = []
for feature in FEATURES:
name, func, func_args = feature[0], feature[1], feature[2:]
print(name, end=' ')
print_current_time()
tmp_feature = store.cache(name, feature_creation_V1, pool, store, func, func_args, name)
features.append(tmp_feature)
name2 = "V2_" + name
tmp_feature2 = store.cache(name2, feature_creation_V2, pool, V2_cache, func, func_args, name)
tmp_feature2.rename(columns=lambda x: "V2_" + x)
features.append(tmp_feature2)
pool.close()
return pd.concat(features, axis=1)
@timer
def feature_creation_V1(pool, store, func, func_args, name):
desired_type = name[:2]
assert desired_type in ["NN", "NC", "CN", "CC"]
new_func = partial(feature_creation_row_helper, func, func_args, desired_type)
mapped = pool.map(new_func, store.raw.as_matrix())
names = None
transformed = []
for row_names, transformed_row in mapped:
if names is None:
names = row_names
assert names == row_names
transformed.append(transformed_row)
new_names = ["{}_{}".format(name, n) for n in names]
result = pd.DataFrame(transformed, columns=new_names).fillna(0)
result[np.isinf(result)] = 0
return result
def feature_creation_row_helper(func, func_args, desired_type, row):
if len(func_args) > 0:
func = func(*func_args)
return feature_creation_row(func, desired_type, *row)
def feature_creation_row(func, desired_type, x, y, type_x, type_y):
assert isinstance(x, np.ndarray)
assert isinstance(y, np.ndarray)
assert type_x in ["Numerical", "Categorical", "Binary"]
assert type_y in ["Numerical", "Categorical", "Binary"]
left = asymmetric_feature_creation(func, desired_type, x, y, type_x, type_y)
right = asymmetric_feature_creation(func, desired_type, y, x, type_y, type_x)
relative = feature_difference(left, right)
new_left = preprend_name("A->B", left)
new_right = preprend_name("B->A", right)
features = (new_left, new_right, relative)
return combine_features(features)
def asymmetric_feature_creation(func, desired_type, x, y, type_x, type_y):
cat_x, cat_y = convert_to_categorical(x, type_x), convert_to_categorical(y, type_y)
num_xs, num_ys = convert_to_numerical(x, type_x), convert_to_numerical(y, type_y)
if desired_type == "NN":
nn_tmp = [func(num_x, num_y) for num_x in num_xs for num_y in num_ys]
features = feature_sum(nn_tmp) + feature_avg(nn_tmp)
elif desired_type == "CN":
cn_tmp = [func(cat_x, num_y) for num_y in num_ys]
features = feature_sum(cn_tmp) + feature_avg(cn_tmp)
elif desired_type == "NC":
nc_tmp = [func(num_x, cat_y) for num_x in num_xs]
features = feature_sum(nc_tmp) + feature_avg(nc_tmp)
elif desired_type == "CC":
features = func(cat_x, cat_y)
else:
raise Exception("Incorrect desired type: {}".format(desired_type))
return features
def create_V2_cache_transform(row):
a, b, a_type, b_type = row
assert a_type in ["Numerical", "Categorical", "Binary"]
assert b_type in ["Numerical", "Categorical", "Binary"]
num_x, cat_x, num_y, cat_y = a, a, b, b
if a_type == "Numerical":
cat_x = NumericalToCategorical(verify=False).fit_transform(num_x)
if a_type == "Categorical":
num_x = CategoricalToNumerical(verify=False).fit_transform(cat_x)
if b_type == "Numerical":
cat_y = NumericalToCategorical(verify=False).fit_transform(num_y)
if b_type == "Categorical":
num_y = CategoricalToNumerical(verify=False).fit_transform(cat_y)
return (num_x, cat_x, num_y, cat_y)
@timer
def create_V2_cache(df, pool):
assert isinstance(df, pd.DataFrame)
for col in ['A', 'B', 'A type', 'B type']:
assert col in df
V2_cache = pool.map(create_V2_cache_transform, df.as_matrix())
return tuple(V2_cache)
@timer
def feature_creation_V2(pool, V2_cache, func, func_args, name):
desired_type = name[:2]
assert desired_type in ["NN", "NC", "CN", "CC"]
new_func = partial(feature_creation_row_helper_V2, func, func_args, desired_type)
mapped = pool.map(new_func, V2_cache)
# mapped = map(new_func, V2_cache)
names = None
transformed = []
for row_names, transformed_row in mapped:
if names is None:
names = row_names
assert names == row_names
transformed.append(transformed_row)
new_names = ["{}_{}".format(name, n) for n in names]
result = pd.DataFrame(transformed, columns=new_names).fillna(0)
result[np.isinf(result)] = 0
return result
def feature_creation_row_helper_V2(func, func_args, desired_type, row):
if len(func_args) > 0:
func = func(*func_args)
row = map(lambda x: x.astype(np.float), row)
return feature_creation_row_V2(func, desired_type, *row)
def feature_creation_row_V2(func, desired_type, num_x, cat_x, num_y, cat_y):
assert isinstance(num_x, np.ndarray)
assert isinstance(cat_x, np.ndarray)
assert isinstance(num_y, np.ndarray)
assert isinstance(cat_y, np.ndarray)
left = asymmetric_feature_creation_V2(func, desired_type, num_x, cat_x, num_y, cat_y)
right = asymmetric_feature_creation_V2(func, desired_type, num_y, cat_y, num_x, cat_x)
relative = feature_difference(left, right)
new_left = preprend_name("A->B", left)
new_right = preprend_name("B->A", right)
features = (new_left, new_right, relative)
return combine_features(features)
def asymmetric_feature_creation_V2(func, desired_type, num_x, cat_x, num_y, cat_y):
if desired_type == "NN":
features = func(num_x, num_y)
elif desired_type == "CN":
features = func(cat_x, num_y)
elif desired_type == "NC":
features = func(num_x, cat_y)
elif desired_type == "CC":
features = func(cat_x, cat_y)
else:
raise Exception("Incorrect desired type: {}".format(desired_type))
return features
def metafeature_creation(df):
def or_(t1, t2):
return ((t1 + t2) > 0) + 0.0
def and_(t1, t2):
return ((t1 + t2) == 2) + 0.0
types = ["Binary", "Numerical", "Categorical"]
assert isinstance(df, pd.DataFrame)
a_type = np.array(df['A type'])
b_type = np.array(df['B type'])
metafeatures = []
columns = []
for t in types:
tmp = (a_type == t) + 0.0
columns.append("aIs" + t)
metafeatures.append(tmp)
for t in types:
tmp = (a_type != t) + 0.0
columns.append("aIsNot" + t)
metafeatures.append(tmp)
for t in types:
tmp = (b_type == t) + 0.0
columns.append("bIs" + t)
metafeatures.append(tmp)
for t in types:
tmp = (b_type != t) + 0.0
columns.append("bIsNot" + t)
metafeatures.append(tmp)
for t1 in types:
for t2 in types:
tmp = and_(a_type == t1, b_type == t2)
columns.append("abAre" + t1 + t2)
metafeatures.append(tmp)
if t1 <= t2:
tmp = or_(and_(a_type == t1, b_type == t2), and_(a_type == t2, b_type == t1))
columns.append("abAreAmong" + t1 + t2)
metafeatures.append(tmp)
six_options = or_(a_type == "Binary", b_type == "Binary") + 2 * or_(a_type == "Categorical", b_type == "Categorical") + 3 * and_(a_type == "Binary", b_type == "Binary") + 3 * and_(a_type == "Categorical", b_type == "Categorical")
columns.append("allTypes")
metafeatures.append(six_options)
return metafeatures, columns
def add_metafeatures(df, df_feat):
metafeatures, columns = metafeature_creation(df)
assert len(metafeatures) == len(columns)
for mf, col in zip(metafeatures, columns):
df_feat["metafeature_" + col] = mf
|
gpl-3.0
|
CopyChat/Plotting
|
Python/climate_change/swio_changes_rsds_rcp85_with_rcm.relative.py
|
1
|
11921
|
#!/usr/bin/env python
########################################
#Globale Karte fuer tests
# from Rabea Amther
########################################
# http://gfesuite.noaa.gov/developer/netCDFPythonInterface.html
import math
import numpy as np
import pandas as pd
import pylab as pl
import Scientific.IO.NetCDF as IO
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.lines as lines
from mpl_toolkits.basemap import Basemap , addcyclic
from matplotlib.colors import LinearSegmentedColormap
import textwrap
pl.close('all')
########################## for CMIP5 charactors
DIR='/Users/tang/climate/CMIP5/monthly/rsds/'
VARIABLE='rsds'
PRODUCT='Amon'
ENSEMBLE='r1i1p1'
AbsTemp=0
#AbsTemp=0
RefTemp=0.0000001
# RefTemp, initialise , could be any value
CRUmean=8.148 #1900-2100 land
TargetModel=[\
#'CanESM2',\
#'BCC-CSM1.1',\
#'CCSM4',\
#'CNRM-CM5',\
#'CSIRO-Mk3.6.0',\
#'EC-EARTH',\
#'GFDL-ESM2G',\
'GFDL-ESM2M',\
#'GISS-E2-H',\
#'GISS-E2-R',\
#'HadGEM2-CC',\
'HadGEM2-ES',\
#'INM-CM4',\
#'IPSL-CM5A-LR',\
#'IPSL-CM5A-MR',\
#'MIROC-ESM-CHEM',\
#'MIROC-ESM',\
#'MIROC5',\
#'MPI-ESM-LR',\
#'MRI-CGCM3',\
#'NorESM1-M',\
#'MPI-ESM-LR',\
]
COLORtar=['red','dodgerblue','deeppink','orange',\
'orangedarkred','yellow','gold','brown','chocolate',\
'green','yellowgreen','aqua','olive','teal',\
'blue','purple','darkmagenta','fuchsia','indigo',\
'dimgray','black','navy']
COLORall=['darkred','darkblue','darkgreen','deeppink',\
'darkred','blue','green','pink','gold',\
'lime','lightcyan','orchid','yellow','lightsalmon',\
'brown','khaki','aquamarine','yellowgreen','blueviolet',\
'snow','skyblue','slateblue','orangedarkred','dimgray',\
'chocolate','teal','mediumvioletdarkred','gray','cadetblue',\
'mediumorchid','bisque','tomato','hotpink','firebrick',\
'Chartreuse','purple','goldenrod',\
'black','orangedarkred','cyan','magenta']
linestyles=['_', '_', '_', '-', '-',\
'-', '--','--','--', '--',\
'_', '_','_','_',\
'_', '_','_','_',\
'_', '-', '--', ':','_', '-', '--', ':','_', '-', '--', ':','_', '-', '--', ':']
#================================================ CMIP5 models
# for rcp8.5
# ls -l | awk '{printf "999%s998,\\\n",$NF}' | sort -n
modelist2=[\
'ACCESS1-0',\
'ACCESS1-3',\
'BNU-ESM',\
'CCSM4',\
'CESM1-BGC',\
'CESM1-CAM5',\
'CMCC-CM',\
'CMCC-CMS',\
#'CNRM-CM5',\
'CSIRO-Mk3-6-0',\
'CanESM2',\
#'EC-EARTH',\
'FIO-ESM',\
'GFDL-CM3',\
'GFDL-ESM2G',\
'GFDL-ESM2M',\
'GISS-E2-R',\
'HadGEM2-AO',\
'HadGEM2-CC',\
'HadGEM2-ES',\
'IPSL-CM5A-LR',\
'IPSL-CM5A-MR',\
'IPSL-CM5B-LR',\
'MIROC-ESM-CHEM',\
'MIROC-ESM',\
'MIROC5',\
'MPI-ESM-LR',\
'MPI-ESM-MR',\
'MRI-CGCM3',\
'NorESM1-M',\
'NorESM1-ME',\
'bcc-csm1-1-m',\
'bcc-csm1-1',\
'inmcm4',\
]
print "==============================================="
#=================================================== define the Plot:
fig1=plt.figure(figsize=(16,9))
ax = fig1.add_subplot(111)
plt.xlabel('Year',fontsize=16)
#plt.ylabel('SWIO Surface Downwelling Solar Radiation Change (W/m2)',fontsize=16)
plt.ylabel('SWIO Surface Downwelling Radiation Changes (%)',fontsize=16)
plt.title("SWIO Surface Downwelling Radiation Changes simulated by GCMs and RegCM",fontsize=18)
#plt.title('Global Surface Downwelling Solar Radiation Changes simulated by CMIP5 models (W/m2)',fontsize=18)
# vertical range ylim yrange
plt.ylim(-6,6)
plt.xlim(1981,2100)
plt.grid()
plt.xticks(np.arange(1981, 2093+10, 20))
plt.tick_params(axis='both', which='major', labelsize=14)
plt.tick_params(axis='both', which='minor', labelsize=14)
#=================================================== 3 windows
plt.axvspan(1996, 2005, alpha=0.2, color='teal')
plt.axvspan(2046, 2055, alpha=0.2, color='teal')
plt.axvspan(2090, 2099, alpha=0.2, color='teal')
#=================================================== 3 windows
plt.axvline(x=2005,linewidth=1, color='gray')
plt.axhline(y=0,linewidth=1, color='gray')
#plt.plot(x,y,color="blue",linewidth=4)
########################## for historical
########################## for historical
print "========== for rcp85 ==============="
EXPERIMENT='historical-rcp85'
PRODUCT='Amon'
ENSEMBLE='r1i1p1'
TIME='196101-209912'
filetag="swiomean"
YEAR=range(1961,2100)
Nmonth=1668
SumTemp=np.zeros(Nmonth/12)
K=0
for Model in modelist2:
#define the K-th model input file:
K=K+1 # for average
infile1=DIR+'rcp8.5'+'/'+Model+'/'\
+VARIABLE+'_'+PRODUCT+'_'+Model+'_'+EXPERIMENT+'_'+ENSEMBLE+'_'+TIME+'.'+filetag+'.nc'
#an example: tas_Amon_CanESM2_historical-rcp85_r1i1p1_200601-210012.globalmean.nc & \
#this file was copied locally for tests in this book
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the variable tas
TAS=infile.variables[VARIABLE][:,:,:].copy()
print 'the variable tas ===============: '
print TAS
# calculate the annual mean temp:
TEMP=range(0,Nmonth,12)
for j in range(0,Nmonth,12):
TEMP[j/12]=np.mean(TAS[j:j+11][:][:])-AbsTemp
print " temp ======================== absolut"
print TEMP
#=================================================== apply 3 month running mean
#TEMP_temp=np.asarray(TEMP)
#TEMPS=pd.Series(TEMP_temp)
#TEMP=pd.rolling_mean(TEMP_temp,3)
#=================================================== apply 3 month running mean
# reference temp: mean of 1996-2005
RefTemp=np.mean(TEMP[len(TEMP)-94-10+1:len(TEMP)-94])
if Model=='HadGEM2-ES':
HadRefTemp=RefTemp
if Model=='GFDL-ESM2M':
GFDLRefTemp=RefTemp
if K==1:
ArrRefTemp=[RefTemp]
else:
ArrRefTemp=ArrRefTemp+[RefTemp]
print 'ArrRefTemp ========== ',ArrRefTemp
TEMP=[(t-RefTemp)*100/(RefTemp) for t in TEMP]
print " temp ======================== relative to mean of 1986-2005"
print TEMP
##quit()
# for std
# get array of temp K*TimeStep
if K==1:
ArrTemp=[TEMP]
else:
ArrTemp=ArrTemp+[TEMP]
print np.shape(SumTemp)
SumTemp=SumTemp+TEMP
print SumTemp
print np.shape(SumTemp)
#=================================================== to plot
print "======== to plot =========="
print len(TEMP)
print 'NO. of year:',len(YEAR)
#quit()
#plot only target models
if Model in TargetModel:
plt.plot(YEAR,TEMP,\
label=Model,\
#linestyles[TargetModel.index(Model)],\
color=COLORtar[TargetModel.index(Model)],\
linewidth=1.5)
#if Model=='CanESM2':
#plt.plot(YEAR,TEMP,color="darkred",linewidth=1)
#if Model=='MPI-ESM-LR':
#plt.plot(YEAR,TEMP,color="blue",linewidth=1)
#if Model=='MPI-ESM-MR':
#plt.plot(YEAR,TEMP,color="green",linewidth=1)
#=================================================== for ensemble mean
AveTemp=[e/K for e in SumTemp]
ArrTemp=list(np.array(ArrTemp))
print 'shape of ArrTemp:',np.shape(ArrTemp)
StdTemp=np.std(np.array(ArrTemp),axis=0)
print 'shape of StdTemp:',np.shape(StdTemp)
print "ArrTemp ========================:"
print ArrTemp
print "StdTemp ========================:"
print StdTemp
# 5-95% range ( +-1.64 STD)
StdTemp1=[AveTemp[i]+StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
StdTemp2=[AveTemp[i]-StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
print "Model number for historical is :",K
print "models for historical:";print modelist2
plt.plot(YEAR,AveTemp,label='ensemble mean',color="black",linewidth=0.5)
plt.plot(YEAR,StdTemp1,color="black",linewidth=0.1)
plt.plot(YEAR,StdTemp2,color="black",linewidth=0.1)
plt.fill_between(YEAR,StdTemp1,StdTemp2,color='black',alpha=0.3)
# draw NO. of model used:
plt.text(2015,2.5,str(K)+' CMIP5 models',size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
)
#=================================================== put downscaling data:
# for Had
Had_dir1="/Users/tang/climate/Modeling/333/Had.G71E0001/output/pprcmdata/monthly/"
Had_dir2="/Users/tang/climate/Modeling/333/Had.G71E0001/output.RCP85.2044-2055/pprcmdata/monthly/"
Had_dir3="/Users/tang/climate/Modeling/333/Had.G71E0001/output.RCP85.2088-2100/pprcmdata/monthly/"
infile1=Had_dir1+'Had_hist.SRF.all.year.fldmean.1996-2005.nc'
infile2=Had_dir2+'Had_rcp85.SRF.all.year.fldmean.2046-2055.nc'
infile3=Had_dir3+'Had_rcp85.SRF.all.year.fldmean.2090-2099.nc'
YEAR1=range(1996,2006)
YEAR2=range(2046,2056)
YEAR3=range(2090,2100)
#open input files
infile01=IO.NetCDFFile(infile1,'r')
infile02=IO.NetCDFFile(infile2,'r')
infile03=IO.NetCDFFile(infile3,'r')
print infile01.variables.keys()
print infile02.variables.keys()
print infile03.variables.keys()
# read the variable tas
TAS1=infile01.variables[VARIABLE][:].copy()
TAS2=infile02.variables[VARIABLE][:].copy()
TAS3=infile03.variables[VARIABLE][:].copy()
#print 'the variable tas ===============: '
#print TAS
# calculate the annual mean temp:
TEMP1=range(0,9)
TEMP2=range(0,9)
TEMP3=range(0,9)
# reference temp: mean of 1996-2005
RefTemp=np.mean(TAS1[1:10,0,0])-AbsTemp
print RefTemp
TEMP1=(TAS1[:,0,0]-RefTemp)*100/RefTemp
TEMP2=(TAS2[:,0,0]-RefTemp)*100/RefTemp
TEMP3=(TAS3[:,0,0]-RefTemp)*100/RefTemp
#print " temp ======================== absolut"
#print TEMP
plt.plot(YEAR1,TEMP1,color="blue",label="HadGEM2-ES + RegCM",linewidth=6,linestyle='-')
plt.plot(YEAR2,TEMP2,color="blue",linewidth=6,linestyle='-')
plt.plot(YEAR3,TEMP3,color="blue",linewidth=6,linestyle='-')
#===================================================
# for GFDL
GFDL_dir1="/Users/tang/climate/Modeling/333/GFDL.G71E0001/output/pprcmdata/monthly/"
GFDL_dir2="/Users/tang/climate/Modeling/333/GFDL.G71E0001/output.RCP85.2044-2055/pprcmdata/monthly/"
GFDL_dir3="/Users/tang/climate/Modeling/333/GFDL.G71E0001/output.RCP85.2088-2100/pprcmdata/monthly/"
infile1=GFDL_dir1+'GFDL_hist.SRF.all.year.fldmean.1996-2005.nc'
infile2=GFDL_dir2+'GFDL_rcp85.SRF.all.year.fldmean.2046-2055.nc'
infile3=GFDL_dir3+'GFDL_rcp85.SRF.all.year.fldmean.2090-2099.nc'
YEAR1=range(1996,2006)
YEAR2=range(2046,2055)
YEAR3=range(2090,2100)
#open input files
infile01=IO.NetCDFFile(infile1,'r')
infile02=IO.NetCDFFile(infile2,'r')
infile03=IO.NetCDFFile(infile3,'r')
print infile01.variables.keys()
print infile02.variables.keys()
print infile03.variables.keys()
# read the variable tas
TAS1=infile01.variables[VARIABLE][:].copy()
TAS2=infile02.variables[VARIABLE][:].copy()
TAS3=infile03.variables[VARIABLE][:].copy()
#print 'the variable tas ===============: '
#print TAS
# calculate the annual mean temp:
TEMP1=range(0,9)
TEMP2=range(0,9)
TEMP3=range(0,9)
# reference temp: mean of 1996-2005
RefTemp=np.mean(TAS1[1:10,0,0])
TEMP1=(TAS1[:,0,0]-RefTemp)*100/RefTemp
TEMP2=(TAS2[:,0,0]-RefTemp)*100/RefTemp
TEMP3=(TAS3[:,0,0]-RefTemp)*100/RefTemp
#print " temp ======================== absolut"
#print TEMP
plt.plot(YEAR1,TEMP1,color="darkred",label="GFDL-ESM2M + RegCM",linewidth=6,linestyle='-')
print len(YEAR2)
print len(TEMP2)
plt.plot(YEAR2,TEMP2,color="darkred",linewidth=6,linestyle='-')
plt.plot(YEAR3,TEMP3,color="darkred",linewidth=6,linestyle='-')
print "==============================================="
print TEMP1
print TEMP2
print TEMP3
print RefTemp
plt.legend(loc=2)
plt.show()
quit()
|
gpl-3.0
|
sarahgrogan/scikit-learn
|
sklearn/feature_selection/tests/test_feature_select.py
|
103
|
22297
|
"""
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
|
bsd-3-clause
|
jankoslavic/numpy
|
numpy/core/tests/test_multiarray.py
|
1
|
218667
|
from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import os
import shutil
import warnings
import operator
import io
import itertools
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from nose import SkipTest
from numpy.core import *
from numpy.compat import asbytes, getexception, strchar, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed bytewise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = arange(10)
self.two = arange(20).reshape(4, 5)
self.three = arange(60, dtype=float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, dtype(int_))
assert_equal(self.three.dtype, dtype(float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides=strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = empty((3, 2, 1), t)
y = empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = empty((3, 2, 1), dtype=uint64)
y = empty((3, 2, 1), dtype=uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = dtype('i4')
assert_equal(d1, dtype(int32))
d2 = dtype('f8')
assert_equal(d2, dtype(float64))
def test_byteorders(self):
self.assertNotEqual(dtype('<i4'), dtype('>i4'))
self.assertNotEqual(dtype([('a', '<i4')]), dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = array(0), array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[array([], int)], a)
self.assertRaises(IndexError, lambda x: x[array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[newaxis].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ...].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ..., newaxis].shape, (1, 1))
self.assertEqual(a[..., newaxis, newaxis].shape, (1, 1))
self.assertEqual(a[newaxis, newaxis, ...].shape, (1, 1))
self.assertEqual(a[(newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i): x[i]
self.assertRaises(IndexError, subscript, a, (newaxis, 0))
self.assertRaises(IndexError, subscript, a, (newaxis,)*50)
def test_constructor(self):
x = ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = array(2)
self.assertRaises(ValueError, add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[newaxis].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ...].shape, (1,))
self.assertEqual(a[..., newaxis].shape, (1,))
self.assertEqual(a[newaxis, ..., newaxis].shape, (1, 1))
self.assertEqual(a[..., newaxis, newaxis].shape, (1, 1))
self.assertEqual(a[newaxis, newaxis, ...].shape, (1, 1))
self.assertEqual(a[(newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i): x[i]
self.assertRaises(IndexError, subscript, a, (newaxis, 0))
self.assertRaises(IndexError, subscript, a, (newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, array, x())
def test_from_string(self) :
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = array([123, 123], dtype=int)
for type in types :
msg = 'String conversion for %s' % type
assert_equal(array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the sytem
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = zeros((3,3), dtype=c)
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = zeros((3,3), dtype='S5')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='U5')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='<i4')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='>i4')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='<M8[s]')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='>M8[s]')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
d = zeros((3,3), dtype='f4,f4')
assert_array_equal(zeros_like(d), d)
assert_equal(zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a==b, [True, True])
assert_equal(a!=b, [False, False])
b[1].b = 'c'
assert_equal(a==b, [True, False])
assert_equal(a!=b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a==b, [False, False])
assert_equal(a!=b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a==b, [False, True])
assert_equal(a!=b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a==b, [[True, True, False], [False, False, True]])
assert_equal(b==a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a==b, [[True, True, False], [False, False, True]])
assert_equal(b==a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a==b, [[True, False, False], [False, False, True]])
assert_equal(b==a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a==b, [[True, False, False], [False, False, True]])
assert_equal(b==a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
new2 = dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
#check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
class TestBool(TestCase):
def test_test_interning(self):
a0 = bool_(0)
b0 = bool_(False)
self.assertTrue(a0 is b0)
a1 = bool_(1)
b1 = bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(array([True])[0] is a1)
self.assertTrue(array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool);
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(array([1.2, 1.5]), [1, 2])
check_round(array(1.5), 2)
check_round(array([12.2, 15.5]), [10, 20], -1)
check_round(array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(array([4.5 + 1.5j]), [4 + 2j])
check_round(array([12.5 + 15.5j]), [10 + 20j], -1)
def test_transpose(self):
a = array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "scalar sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h'] :
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h'] :
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy();
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
dtype = '{0}{1}'.format(endianess, dt)
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "string sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "unicode sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "object sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = array([(i, i) for i in range(101)], dtype = dt)
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "object sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "datetime64 sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm'] :
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy();
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1=np.array([21, 32, 14])
x2=np.array(['my', 'first', 'name'])
x3=np.array([3.1, 4.5, 6.2])
r=np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, array([14, 21, 32]))
assert_equal(r.word, array(['name', 'my', 'first']))
assert_equal(r.number, array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, array([32, 21, 14]))
assert_equal(r.word, array(['first', 'my', 'name']))
assert_equal(r.number, array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, array([21, 32, 14]))
assert_equal(r.word, array(['my', 'first', 'name']))
assert_equal(r.number, array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype= mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h'] :
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h'] :
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h'] :
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
dtype = '{0}{1}'.format(endianess, dt)
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = array([(i, i) for i in range(101)], dtype = dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h'] :
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm'] :
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm'] :
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones((1))
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones((50))
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange((49))
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange((47))[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10); d[1] = 4;
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange((47)) % 7
tgt = np.sort(np.arange((47)) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i,:], np.array([i] * d1.shape[0],
dtype=dt))
# array_less does not seem to work right
at((p[:i,:] <= p[i,:]).all(),
msg="%d: %r <= %r" % (i, p[i,:], p[:i,:]))
at((p[i + 1:,:] > p[i,:]).all(),
msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None,:]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# General case of possible ravel that is not contiguous but
# works and includes a 1-sized axis with non matching stride
a = a.swapaxes(-1, -2) # swap back to C-order
assert_(np.may_share_memory(a.ravel(order='C'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
a = a.T # swap all to Fortran order
assert_(np.may_share_memory(a.ravel(order='F'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
# Test negative strides:
a = np.arange(4)[::-1].reshape(2, 2)
assert_(np.may_share_memory(a.ravel(order='C'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# Test keeporder with weirdly strided 1-sized dims (1-d first stride)
a = np.arange(8)[::2].reshape(1, 2, 2, 1) # neither C, nor F order
strides = list(a.strides)
strides[0] = -12
strides[-1] = 0
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('K'), a.ravel('C'))
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
#Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(ndarray):
pass
class OtherNdarraySubclassWithOverride(ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
r = func(*inputs, **kw)
if 'out' in kw:
return r
else:
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
transpose(carray),
array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return loads(obj, encoding='latin1')
else:
return loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = array([1, 2, 3, 4], dtype=int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = array([1.0, 2.0, 3.0, 4.0], dtype=float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = array([1, 2, 3, 4], dtype=int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = array([1.0, 2.0, 3.0, 4.0], dtype=float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, array([[2.0]]))
x = ones((1, 1, 1))
x[:,:, [0]] = 2.0
assert_array_equal(x, array([[[2.0]]]))
def test_tuple(self):
x = ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, array([[2.0]]))
x = ones((1, 1, 1))
x[:,:, (0,)] = 2.0
assert_array_equal(x, array([[[2.0]]]))
def test_mask(self):
x = array([1, 2, 3, 4])
m = array([0, 1, 0, 0], bool)
assert_array_equal(x[m], array([2]))
def test_mask2(self):
x = array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = array([0, 1], bool)
m2 = array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], array([2, 5]))
assert_array_equal(x[m3], array([2]))
def test_assign_mask(self):
x = array([1, 2, 3, 4])
m = array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = array([0, 1], bool)
m2 = array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = array(["This", "is", "example"])
g2 = array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = array([sixu("This"), sixu("is"), sixu("example")])
g2 = array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r"%arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r"%arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = zeros(2, dtype=int)
out2 = zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = zeros(3, dtype=int)
out2 = zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r"%arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r"%arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = zeros(2, dtype=int)
out2 = ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = zeros(3, dtype=int)
out2 = ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
class TestNewaxis(TestCase):
def test_basic(self):
sk = array([0, -0.1, 0.1])
res = 250*sk[:, newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self,type_group,array_max,
clip_min,clip_max,inplace=False,
expected_min=None,expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|': byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type('float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type('float', 1024, 0, 0, inplace=inplace)
self._clip_type('int', 1024, -120, 100.5, inplace=inplace)
self._clip_type('int', 1024, 0, 0, inplace=inplace)
x = self._clip_type('uint', 1024, -120, 100, expected_min=0,
inplace=inplace)
x = self._clip_type('uint', 1024, 0, 0, inplace=inplace)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [nan, inf, -inf, nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ');
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ');
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from("nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[nan, nan, nan, nan, nan, nan, nan],
sep=' ')
def test_inf(self):
self._check_from("inf +inf -inf infinity -Infinity iNfInItY -inF",
[inf, inf, -inf, inf, -inf, inf, -inf], sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.0,3.51,4.0')
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.a.flat[12] == 12.0
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.b.flat[4] == 12.0
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert c.flags.writeable is False
assert d.flags.writeable is False
assert e.flags.writeable is True
assert f.flags.writeable is True
assert c.flags.updateifcopy is False
assert d.flags.updateifcopy is False
assert e.flags.updateifcopy is False
assert f.flags.updateifcopy is True
assert f.base is self.b0
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = ones(10, dtype=[('k', object, 2)])
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(IndexError, b[0].__setitem__, fnn, 1)
assert_raises(IndexError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple Subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
view_dtype=[('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
mat = np.eye(3)
# stats for integer types
# fixme:
# this needs definition as there are lots places along the line
# where type casting may take place.
#for f in self.funcs:
#for c in icodes:
#tgt = np.dtype(c).type
#res = f(mat, axis=1, dtype=c).dtype.type
#assert_(res is tgt)
## scalar case
#res = f(mat, axis=None, dtype=c).dtype.type
#assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in fcodes:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = array([[1, 2], [3, 4]], order='C')
b = array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
class TestDot(TestCase):
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = array([[1, 2], [3, 4]], order='C')
b = array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_accelerate_framework_sgemv_fix(self):
if sys.platform != 'darwin':
return
def aligned_array(shape, align, dtype, order='C'):
d = dtype()
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = matmul(v, v)
assert_(type(res) is dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = matmul(v, m1)
assert_equal(res, tgt1)
res = matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = matmul(vec, mat1)
assert_equal(res, tgt1)
res = matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = matmul(m1, v)
assert_equal(res, tgt1)
res = matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = matmul(vec, mat1)
assert_equal(res, tgt1)
res = matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt22 = np.array([[1, 0], [2, 1]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = matmul(m1, m2)
assert_equal(res, tgt12)
res = matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt22 = m2
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = matmul(m1, m2)
assert_equal(res, tgt12)
res = matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
from operator import matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = zeros(shape = (1, 80), dtype = float64)
p = inner(a, a)
assert_almost_equal(p, 0, decimal=14)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*ones((3,), dtype=int)
self.y = 3*ones((3,), dtype=int)
self.x2 = 2*ones((2, 3), dtype=int)
self.y2 = 3*ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([dtype(b).itemsize for a, b in dt])
if dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert memoryview(c).strides == (800, 80, 8)
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert memoryview(fortran).strides == (8, 80, 800)
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr' : '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[ 0., 1., 2., 19.,],
[ 104., 5., 6., 7.,],
[ 8., 9., 40., 11.,]])
b = arange(6).astype(float)
index = (array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [ 100.1, 51., 6., 3., 4., 5. ])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class PriorityNdarray():
__array_priority__ = 1000
def __init__(self, array):
self.array = array
def __lt__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array < array)
def __gt__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array > array)
def __le__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array <= array)
def __ge__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array >= array)
def __eq__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array == array)
def __ne__(self, array):
if isinstance(array, PriorityNdarray):
array = array.array
return PriorityNdarray(self.array != array)
class TestArrayPriority(TestCase):
def test_lt(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l < r
res2 = l < rp
res3 = lp < r
res4 = lp < rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_gt(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l > r
res2 = l > rp
res3 = lp > r
res4 = lp > rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_le(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l <= r
res2 = l <= rp
res3 = lp <= r
res4 = lp <= rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_ge(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l >= r
res2 = l >= rp
res3 = lp >= r
res4 = lp >= rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_eq(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l == r
res2 = l == rp
res3 = lp == r
res4 = lp == rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
def test_ne(self):
l = np.asarray([0., -1., 1.], dtype=dtype)
r = np.asarray([0., 1., -1.], dtype=dtype)
lp = PriorityNdarray(l)
rp = PriorityNdarray(r)
res1 = l != r
res2 = l != rp
res3 = lp != r
res4 = lp != rp
assert_array_equal(res1, res2.array)
assert_array_equal(res1, res3.array)
assert_array_equal(res1, res4.array)
assert_(isinstance(res1, np.ndarray))
assert_(isinstance(res2, PriorityNdarray))
assert_(isinstance(res3, PriorityNdarray))
assert_(isinstance(res4, PriorityNdarray))
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
#All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object);
m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
from numpy.core._internal import _view_is_safe
class TestObjViewSafetyFuncs(TestCase):
def test_view_safety(self):
psize = dtype('p').itemsize
# creates dtype but with extra character code - for missing 'p' fields
def mtype(s):
n, offset, fields = 0, 0, []
for c in s.split(','): #subarrays won't work
if c != '-':
fields.append(('f{0}'.format(n), c, offset))
n += 1
offset += dtype(c).itemsize if c != '-' else psize
names, formats, offsets = zip(*fields)
return dtype({'names': names, 'formats': formats,
'offsets': offsets, 'itemsize': offset})
# test nonequal itemsizes with objects:
# these should succeed:
_view_is_safe(dtype('O,p,O,p'), dtype('O,p,O,p,O,p'))
_view_is_safe(dtype('O,O'), dtype('O,O,O'))
# these should fail:
assert_raises(TypeError, _view_is_safe, dtype('O,O,p'), dtype('O,O'))
assert_raises(TypeError, _view_is_safe, dtype('O,O,p'), dtype('O,p'))
assert_raises(TypeError, _view_is_safe, dtype('O,O,p'), dtype('p,O'))
# test nonequal itemsizes with missing fields:
# these should succeed:
_view_is_safe(mtype('-,p,-,p'), mtype('-,p,-,p,-,p'))
_view_is_safe(dtype('p,p'), dtype('p,p,p'))
# these should fail:
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,p'))
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,-'))
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('-,p'))
# scans through positions at which we can view a type
def scanView(d1, otype):
goodpos = []
for shift in range(d1.itemsize - dtype(otype).itemsize+1):
d2 = dtype({'names': ['f0'], 'formats': [otype],
'offsets': [shift], 'itemsize': d1.itemsize})
try:
_view_is_safe(d1, d2)
except TypeError:
pass
else:
goodpos.append(shift)
return goodpos
# test partial overlap with object field
assert_equal(scanView(dtype('p,O,p,p,O,O'), 'p'),
[0] + list(range(2*psize, 3*psize+1)))
assert_equal(scanView(dtype('p,O,p,p,O,O'), 'O'),
[psize, 4*psize, 5*psize])
# test partial overlap with missing field
assert_equal(scanView(mtype('p,-,p,p,-,-'), 'p'),
[0] + list(range(2*psize, 3*psize+1)))
# test nested structures with objects:
nestedO = dtype([('f0', 'p'), ('f1', 'p,O,p')])
assert_equal(scanView(nestedO, 'p'), list(range(psize+1)) + [3*psize])
assert_equal(scanView(nestedO, 'O'), [2*psize])
# test nested structures with missing fields:
nestedM = dtype([('f0', 'p'), ('f1', mtype('p,-,p'))])
assert_equal(scanView(nestedM, 'p'), list(range(psize+1)) + [3*psize])
# test subarrays with objects
subarrayO = dtype('p,(2,3)O,p')
assert_equal(scanView(subarrayO, 'p'), [0, 7*psize])
assert_equal(scanView(subarrayO, 'O'),
list(range(psize, 6*psize+1, psize)))
#test dtype with overlapping fields
overlapped = dtype({'names': ['f0', 'f1', 'f2', 'f3'],
'formats': ['p', 'p', 'p', 'p'],
'offsets': [0, 1, 3*psize-1, 3*psize],
'itemsize': 4*psize})
assert_equal(scanView(overlapped, 'p'), [0, 1, 3*psize-1, 3*psize])
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
if sys.version_info[0] < 3:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
if __name__ == "__main__":
run_module_suite()
|
bsd-3-clause
|
lol/BCI-BO-old
|
BCI_Framework/Filter.py
|
1
|
2119
|
from scipy.signal import butter, lfilter
import numpy as np
#import matplotlib.pyplot as plt
from scipy.signal import freqz
class Filter:
"""This class performs different kind of filters on the data"""
def __init__(self):
pass
@staticmethod
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
@staticmethod
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
print lowcut, highcut
b, a = Filter.butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
print lowcut, highcut
return y
if __name__ == "__main__":
# Sample rate and desired cutoff frequencies (in Hz).
fs = 5000.0
lowcut = 500.0
highcut = 1250.0
# Plot the frequency response for a few different orders.
# plt.figure(1)
# plt.clf()
for order in [3, 6, 9]:
b, a = Filter.butter_bandpass(lowcut, highcut, fs, order=order)
w, h = freqz(b, a, worN=2000)
plt.plot((fs * 0.5 / np.pi) * w, abs(h), label="order = %d" % order)
plt.plot([0, 0.5 * fs], [np.sqrt(0.5), np.sqrt(0.5)],
'--', label='sqrt(0.5)')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain')
plt.grid(True)
plt.legend(loc='best')
# Filter a noisy signal.
T = 0.05
nsamples = T * fs
t = np.linspace(0, T, nsamples, endpoint=False)
a = 0.02
f0 = 600.0
x = 0.1 * np.sin(2 * np.pi * 1.2 * np.sqrt(t))
x += 0.01 * np.cos(2 * np.pi * 312 * t + 0.1)
x += a * np.cos(2 * np.pi * f0 * t + .11)
x += 0.03 * np.cos(2 * np.pi * 2000 * t)
plt.figure(2)
plt.clf()
plt.plot(t, x, label='Noisy signal')
y = Filter.butter_bandpass_filter(x, lowcut, highcut, fs, order=6)
plt.plot(t, y, label='Filtered signal (%g Hz)' % f0)
plt.xlabel('time (seconds)')
plt.hlines([-a, a], 0, T, linestyles='--')
plt.grid(True)
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
|
gpl-3.0
|
Akshay0724/scikit-learn
|
sklearn/datasets/svmlight_format.py
|
41
|
16768
|
"""This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X : scipy.sparse matrix of shape (n_samples, n_features)
y : ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id : array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.int64)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features : int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel : boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
X_is_sp = int(hasattr(X, "tocsr"))
y_is_sp = int(hasattr(y, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if X_is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
if y_is_sp:
nz_labels = y[i].nonzero()[1]
else:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
if y_is_sp:
labels_str = label_pattern % y.data[i]
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : {array-like, sparse matrix}, shape = [n_samples (, n_labels)]
Target values. Class labels must be an
integer or float, or array-like objects of integer or float for
multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel : boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
.. versionadded:: 0.17
parameter *multilabel* to support multilabel datasets.
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
yval = check_array(y, accept_sparse='csr', ensure_2d=False)
if sp.issparse(yval):
if yval.shape[1] != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples, 1),"
" got %r" % (yval.shape,))
else:
if yval.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (yval.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != yval.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], yval.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if yval is y and hasattr(yval, "sorted_indices"):
y = yval.sorted_indices()
else:
y = yval
if hasattr(y, "sort_indices"):
y.sort_indices()
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
|
bsd-3-clause
|
0asa/scikit-learn
|
sklearn/feature_extraction/dict_vectorizer.py
|
20
|
11431
|
# Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether feature_names_ and vocabulary_ should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
|
bsd-3-clause
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/sklearn/cluster/tests/test_birch.py
|
342
|
5603
|
"""
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
|
mit
|
ihincks/python-qinfer
|
src/qinfer/tomography/plotting_tools.py
|
3
|
9802
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
# plotting_tools.py: Functions for plotting tomographic data and estimates.
##
# © 2015 Chris Ferrie ([email protected]) and
# Christopher E. Granade ([email protected]),
# except where otherwise noted.
# Based on work with Joshua Combes ([email protected]).
#
# This file is a part of the Qinfer project.
# Licensed under the AGPL version 3.
##
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##
# TODO: unit tests!
## FEATURES ##################################################################
from __future__ import absolute_import
from __future__ import division
## IMPORTS ###################################################################
from builtins import map
import numpy as np
try:
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Polygon
except ImportError:
import warnings
warnings.warn("Could not import matplotlib.")
plt = None
Ellipse = None
# Since the rest of QInfer does not require QuTiP,
# we need to import it in a way that we don't propagate exceptions if QuTiP
# is missing or is too early a version.
from qinfer.utils import get_qutip_module
qt = get_qutip_module('3.2')
## EXPORTS ###################################################################
__all__ = [
'plot_rebit_modelparams',
'plot_decorate_rebits',
'plot_cov_ellipse',
'plot_rebit_prior',
'plot_rebit_posterior'
]
## CONSTANTS #################################################################
REBIT_AXES = [1, 2]
## FUNCTIONS #################################################################
def plot_rebit_modelparams(modelparams, rebit_axes=REBIT_AXES, **kwargs):
"""
Given model parameters representing rebits, plots the
rebit states as a scatter plot. Additional keyword arguments
are passed to :ref:`plt.scatter`.
:param np.ndarray modelparams: Model parameters representing
rebits.
:param list rebit_axes: List containing indices for the :math:`x`
and :math:`z` axes.
"""
mps = modelparams[:, rebit_axes] * np.sqrt(2)
plt.scatter(mps[:, 0], mps[:, 1], **kwargs)
def plot_decorate_rebits(basis=None, rebit_axes=REBIT_AXES):
"""
Decorates a figure with the boundary of rebit state space
and basis labels drawn from a :ref:`~qinfer.tomography.TomographyBasis`.
:param qinfer.tomography.TomographyBasis basis: Basis to use in
labeling axes.
:param list rebit_axes: List containing indices for the :math:`x`
and :math:`z` axes.
"""
ax = plt.gca()
if basis is not None:
labels = list(map(r'$\langle\!\langle {} | \rho \rangle\!\rangle$'.format,
# Pick out the x and z by default.
[basis.labels[rebit_axes[0]], basis.labels[rebit_axes[1]]]
))
plt.xlabel(labels[0])
plt.ylabel(labels[1])
ax.add_artist(plt.Circle([0, 0], 1, color='k', fill=False))
ax.set_xlim(-1.1, 1.1)
ax.set_ylim(-1.1, 1.1)
ax.set_aspect('equal')
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
# Copied from https://github.com/joferkington/oost_paper_code in
# accordance with its license agreement.
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
:param cov: The 2x2 covariance matrix to base the ellipse on.
:param pos: The location of the center of the ellipse. Expects a 2-element
sequence of ``[x0, y0]``.
:param nstd: The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
:param ax: The axis that the ellipse will be plotted on. Defaults to the
current axis.
:return: A matplotlib ellipse artist.
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
def plot_rebit_prior(prior, rebit_axes=REBIT_AXES,
n_samples=2000, true_state=None, true_size=250,
force_mean=None,
legend=True,
mean_color_index=2
):
"""
Plots rebit states drawn from a given prior.
:param qinfer.tomography.DensityOperatorDistribution prior: Distribution over
rebit states to plot.
:param list rebit_axes: List containing indices for the :math:`x`
and :math:`z` axes.
:param int n_samples: Number of samples to draw from the
prior.
:param np.ndarray true_state: State to be plotted as a "true" state for
comparison.
"""
pallette = plt.rcParams['axes.color_cycle']
plot_rebit_modelparams(prior.sample(n_samples),
c=pallette[0],
label='Prior',
rebit_axes=rebit_axes
)
if true_state is not None:
plot_rebit_modelparams(true_state,
c=pallette[1],
label='True', marker='*', s=true_size,
rebit_axes=rebit_axes
)
if hasattr(prior, '_mean') or force_mean is not None:
mean = force_mean if force_mean is not None else prior._mean
plot_rebit_modelparams(
prior._basis.state_to_modelparams(mean)[None, :],
edgecolors=pallette[mean_color_index], s=250, facecolors='none', linewidth=3,
label='Mean',
rebit_axes=rebit_axes
)
plot_decorate_rebits(prior.basis,
rebit_axes=rebit_axes
)
if legend:
plt.legend(loc='lower left', ncol=3, scatterpoints=1)
def plot_rebit_posterior(updater, prior=None, true_state=None, n_std=3, rebit_axes=REBIT_AXES, true_size=250,
legend=True,
level=0.95,
region_est_method='cov'
):
"""
Plots posterior distributions over rebits, including covariance ellipsoids
:param qinfer.smc.SMCUpdater updater: Posterior distribution over rebits.
:param qinfer.tomography.DensityOperatorDistribution: Prior distribution
over rebit states.
:param np.ndarray true_state: Model parameters for "true" state to plot
as comparison.
:param float n_std: Number of standard deviations out from the mean
at which to draw the covariance ellipse. Only used if
region_est_method is ``'cov'``.
:param float level: Credibility level to use for computing
region estimators from convex hulls.
:param list rebit_axes: List containing indices for the :math:`x`
and :math:`z` axes.
:param str region_est_method: Method to use to draw region estimation.
Must be one of None, ``'cov'`` or ``'hull'``.
"""
pallette = plt.rcParams['axes.color_cycle']
plot_rebit_modelparams(updater.particle_locations,
c=pallette[0],
label='Posterior',
s=12 * np.sqrt(updater.particle_weights * len(updater.particle_weights)),
rebit_axes=rebit_axes,
zorder=-10
)
plot_rebit_modelparams(true_state,
c=pallette[1],
label='True', marker='*', s=true_size,
rebit_axes=rebit_axes
)
if prior is not None:
plot_rebit_modelparams(
prior._basis.state_to_modelparams(prior._mean)[None, :],
edgecolors=pallette[3], s=250, facecolors='none', linewidth=3,
label='Prior Mean',
rebit_axes=rebit_axes
)
plot_rebit_modelparams(
updater.est_mean()[None, :],
edgecolors=pallette[2], s=250, facecolors='none', linewidth=3,
label='Posterior Mean',
rebit_axes=rebit_axes
)
if region_est_method == 'cov':
# Multiplying by sqrt{2} to rescale to Bloch ball.
cov = 2 * updater.est_covariance_mtx()
# Use fancy indexing to cut out all but the desired submatrix.
cov = cov[rebit_axes, :][:, rebit_axes]
plot_cov_ellipse(
cov, updater.est_mean()[rebit_axes] * np.sqrt(2),
nstd=n_std,
edgecolor='k', fill=True, lw=2,
facecolor=pallette[0],
alpha=0.4,
zorder=-9,
label='Posterior Cov Ellipse ($Z = {}$)'.format(n_std)
)
elif region_est_method == 'hull':
# Find the convex hull from the updater, projected
# on the rebit axes.
faces, vertices = updater.region_est_hull(level, modelparam_slice=rebit_axes)
polygon = Polygon(vertices * np.sqrt(2),
facecolor=pallette[0], alpha=0.4, zorder=-9,
label=r'Credible Region ($\alpha = {}$)'.format(level),
edgecolor='k', lw=2, fill=True
)
# TODO: consolidate add_patch code with that above.
plt.gca().add_patch(polygon)
plot_decorate_rebits(updater.model.base_model._basis,
rebit_axes=rebit_axes
)
if legend:
plt.legend(loc='lower left', ncol=4, scatterpoints=1)
|
agpl-3.0
|
cloud-fan/spark
|
python/pyspark/sql/pandas/types.py
|
20
|
13357
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Type-specific codes between pandas and PyArrow. Also contains some utils to correct
pandas instances during the type conversion.
"""
from pyspark.sql.types import BooleanType, ByteType, ShortType, IntegerType, LongType, \
FloatType, DoubleType, DecimalType, StringType, BinaryType, DateType, TimestampType, \
ArrayType, MapType, StructType, StructField, NullType
def to_arrow_type(dt):
""" Convert Spark data type to pyarrow type
"""
from distutils.version import LooseVersion
import pyarrow as pa
if type(dt) == BooleanType:
arrow_type = pa.bool_()
elif type(dt) == ByteType:
arrow_type = pa.int8()
elif type(dt) == ShortType:
arrow_type = pa.int16()
elif type(dt) == IntegerType:
arrow_type = pa.int32()
elif type(dt) == LongType:
arrow_type = pa.int64()
elif type(dt) == FloatType:
arrow_type = pa.float32()
elif type(dt) == DoubleType:
arrow_type = pa.float64()
elif type(dt) == DecimalType:
arrow_type = pa.decimal128(dt.precision, dt.scale)
elif type(dt) == StringType:
arrow_type = pa.string()
elif type(dt) == BinaryType:
arrow_type = pa.binary()
elif type(dt) == DateType:
arrow_type = pa.date32()
elif type(dt) == TimestampType:
# Timestamps should be in UTC, JVM Arrow timestamps require a timezone to be read
arrow_type = pa.timestamp('us', tz='UTC')
elif type(dt) == ArrayType:
if type(dt.elementType) in [StructType, TimestampType]:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
arrow_type = pa.list_(to_arrow_type(dt.elementType))
elif type(dt) == MapType:
if LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
raise TypeError("MapType is only supported with pyarrow 2.0.0 and above")
if type(dt.keyType) in [StructType, TimestampType] or \
type(dt.valueType) in [StructType, TimestampType]:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
arrow_type = pa.map_(to_arrow_type(dt.keyType), to_arrow_type(dt.valueType))
elif type(dt) == StructType:
if any(type(field.dataType) == StructType for field in dt):
raise TypeError("Nested StructType not supported in conversion to Arrow")
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in dt]
arrow_type = pa.struct(fields)
elif type(dt) == NullType:
arrow_type = pa.null()
else:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
return arrow_type
def to_arrow_schema(schema):
""" Convert a schema from Spark to Arrow
"""
import pyarrow as pa
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in schema]
return pa.schema(fields)
def from_arrow_type(at):
""" Convert pyarrow type to Spark data type.
"""
from distutils.version import LooseVersion
import pyarrow as pa
import pyarrow.types as types
if types.is_boolean(at):
spark_type = BooleanType()
elif types.is_int8(at):
spark_type = ByteType()
elif types.is_int16(at):
spark_type = ShortType()
elif types.is_int32(at):
spark_type = IntegerType()
elif types.is_int64(at):
spark_type = LongType()
elif types.is_float32(at):
spark_type = FloatType()
elif types.is_float64(at):
spark_type = DoubleType()
elif types.is_decimal(at):
spark_type = DecimalType(precision=at.precision, scale=at.scale)
elif types.is_string(at):
spark_type = StringType()
elif types.is_binary(at):
spark_type = BinaryType()
elif types.is_date32(at):
spark_type = DateType()
elif types.is_timestamp(at):
spark_type = TimestampType()
elif types.is_list(at):
if types.is_timestamp(at.value_type):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
spark_type = ArrayType(from_arrow_type(at.value_type))
elif types.is_map(at):
if LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
raise TypeError("MapType is only supported with pyarrow 2.0.0 and above")
if types.is_timestamp(at.key_type) or types.is_timestamp(at.item_type):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
spark_type = MapType(from_arrow_type(at.key_type), from_arrow_type(at.item_type))
elif types.is_struct(at):
if any(types.is_struct(field.type) for field in at):
raise TypeError("Nested StructType not supported in conversion from Arrow: " + str(at))
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in at])
elif types.is_dictionary(at):
spark_type = from_arrow_type(at.value_type)
elif types.is_null(at):
spark_type = NullType()
else:
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
return spark_type
def from_arrow_schema(arrow_schema):
""" Convert schema from Arrow to Spark.
"""
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in arrow_schema])
def _get_local_timezone():
""" Get local timezone using pytz with environment variable, or dateutil.
If there is a 'TZ' environment variable, pass it to pandas to use pytz and use it as timezone
string, otherwise use the special word 'dateutil/:' which means that pandas uses dateutil and
it reads system configuration to know the system local timezone.
See also:
- https://github.com/pandas-dev/pandas/blob/0.19.x/pandas/tslib.pyx#L1753
- https://github.com/dateutil/dateutil/blob/2.6.1/dateutil/tz/tz.py#L1338
"""
import os
return os.environ.get('TZ', 'dateutil/:')
def _check_series_localize_timestamps(s, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone.
If the input series is not a timestamp series, then the same series is returned. If the input
series is a timestamp series, then a converted series is returned.
Parameters
----------
s : pandas.Series
timezone : str
the timezone to convert. if None then use local timezone
Returns
-------
pandas.Series
`pandas.Series` that have been converted to tz-naive
"""
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64tz_dtype
tz = timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(tz).dt.tz_localize(None)
else:
return s
def _check_series_convert_timestamps_internal(s, timezone):
"""
Convert a tz-naive timestamp in the specified timezone or local timezone to UTC normalized for
Spark internal storage
Parameters
----------
s : pandas.Series
timezone : str
the timezone to convert. if None then use local timezone
Returns
-------
pandas.Series
`pandas.Series` where if it is a timestamp, has been UTC normalized without a time zone
"""
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64_dtype(s.dtype):
# When tz_localize a tz-naive timestamp, the result is ambiguous if the tz-naive
# timestamp is during the hour when the clock is adjusted backward during due to
# daylight saving time (dst).
# E.g., for America/New_York, the clock is adjusted backward on 2015-11-01 2:00 to
# 2015-11-01 1:00 from dst-time to standard time, and therefore, when tz_localize
# a tz-naive timestamp 2015-11-01 1:30 with America/New_York timezone, it can be either
# dst time (2015-01-01 1:30-0400) or standard time (2015-11-01 1:30-0500).
#
# Here we explicit choose to use standard time. This matches the default behavior of
# pytz.
#
# Here are some code to help understand this behavior:
# >>> import datetime
# >>> import pandas as pd
# >>> import pytz
# >>>
# >>> t = datetime.datetime(2015, 11, 1, 1, 30)
# >>> ts = pd.Series([t])
# >>> tz = pytz.timezone('America/New_York')
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=True)
# 0 2015-11-01 01:30:00-04:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=False)
# 0 2015-11-01 01:30:00-05:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> str(tz.localize(t))
# '2015-11-01 01:30:00-05:00'
tz = timezone or _get_local_timezone()
return s.dt.tz_localize(tz, ambiguous=False).dt.tz_convert('UTC')
elif is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert('UTC')
else:
return s
def _check_series_convert_timestamps_localize(s, from_timezone, to_timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
Parameters
----------
s : pandas.Series
from_timezone : str
the timezone to convert from. if None then use local timezone
to_timezone : str
the timezone to convert to. if None then use local timezone
Returns
-------
pandas.Series
`pandas.Series` where if it is a timestamp, has been converted to tz-naive
"""
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import pandas as pd
from pandas.api.types import is_datetime64tz_dtype, is_datetime64_dtype
from_tz = from_timezone or _get_local_timezone()
to_tz = to_timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(to_tz).dt.tz_localize(None)
elif is_datetime64_dtype(s.dtype) and from_tz != to_tz:
# `s.dt.tz_localize('tzlocal()')` doesn't work properly when including NaT.
return s.apply(
lambda ts: ts.tz_localize(from_tz, ambiguous=False).tz_convert(to_tz).tz_localize(None)
if ts is not pd.NaT else pd.NaT)
else:
return s
def _check_series_convert_timestamps_local_tz(s, timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
Parameters
----------
s : pandas.Series
timezone : str
the timezone to convert to. if None then use local timezone
Returns
-------
pandas.Series
`pandas.Series` where if it is a timestamp, has been converted to tz-naive
"""
return _check_series_convert_timestamps_localize(s, None, timezone)
def _check_series_convert_timestamps_tz_local(s, timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
Parameters
----------
s : pandas.Series
timezone : str
the timezone to convert from. if None then use local timezone
Returns
-------
pandas.Series
`pandas.Series` where if it is a timestamp, has been converted to tz-naive
"""
return _check_series_convert_timestamps_localize(s, timezone, None)
def _convert_map_items_to_dict(s):
"""
Convert a series with items as list of (key, value), as made from an Arrow column of map type,
to dict for compatibility with non-arrow MapType columns.
:param s: pandas.Series of lists of (key, value) pairs
:return: pandas.Series of dictionaries
"""
return s.apply(lambda m: None if m is None else {k: v for k, v in m})
def _convert_dict_to_map_items(s):
"""
Convert a series of dictionaries to list of (key, value) pairs to match expected data
for Arrow column of map type.
:param s: pandas.Series of dictionaries
:return: pandas.Series of lists of (key, value) pairs
"""
return s.apply(lambda d: list(d.items()) if d is not None else None)
|
apache-2.0
|
RTS2/rts2
|
scripts/u_point/u_point/sextractor_3.py
|
3
|
5940
|
# Sextractor-Python wrapper.
#
# You will need: scipy matplotlib sextractor
# This should work on Debian/ubuntu:
# sudo apt-get install python-matplotlib python-scipy python-pyfits sextractor
#
# If you would like to see sextractor results, get DS9 and pyds9:
#
# http://hea-www.harvard.edu/saord/ds9/
#
# Please be aware that current sextractor Ubuntu packages does not work
# properly. The best workaround is to install package, and the overwrite
# sextractor binary with one compiled from sources (so you will have access
# to sextractor configuration files, which program assumes).
#
# (C) 2010-2012 Petr Kubanek, Institute of Physics <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import subprocess
import os
import tempfile
import traceback
class Sextractor:
"""Class for a catalogue (SExtractor result)"""
def __init__(self, fields=['NUMBER', 'FLUXERR_ISO', 'FLUX_AUTO', 'X_IMAGE', 'Y_IMAGE', 'MAG_BEST', 'FLAGS', 'CLASS_STAR', 'FWHM_IMAGE', 'A_IMAGE', 'B_IMAGE','EXT_NUMBER'], sexpath='sextractor', sexconfig='/usr/share/sextractor/default.sex', starnnw='/usr/share/sextractor/default.nnw', threshold=2.7, deblendmin = 0.03, saturlevel=65535, verbose=False):
self.sexpath = sexpath
self.sexconfig = sexconfig
self.starnnw = starnnw
self.fields = fields
self.objects = []
self.threshold = threshold
self.deblendmin = deblendmin
self.saturlevel = saturlevel
self.verbose = verbose
def get_field(self,fieldname):
return self.fields.index(fieldname)
def runSExtractor(self,filename):
pf,pfn = tempfile.mkstemp()
ofd,output = tempfile.mkstemp()
pfi = os.fdopen(pf,'w')
for f in self.fields:
pfi.write(f + '\n')
pfi.flush()
cmd = [self.sexpath, filename, '-c', self.sexconfig, '-PARAMETERS_NAME', pfn, '-DETECT_THRESH', str(self.threshold), '-DEBLEND_MINCONT', str(self.deblendmin), '-SATUR_LEVEL', str(self.saturlevel), '-FILTER', 'N', '-STARNNW_NAME', self.starnnw, '-CATALOG_NAME', output]
if not(self.verbose):
cmd.append('-VERBOSE_TYPE')
cmd.append('QUIET')
stdo=stde=None
try:
proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdo,stde=proc.communicate()
except OSError as err:
print ('canot run command: "', ' '.join(cmd), '", error ',err)
raise err
# parse output
self.objects = []
of = os.fdopen(ofd,'r')
while (True):
x=of.readline()
if self.verbose:
pass#print (x)
if x == '':
break
if x[0] == '#':
continue
self.objects.append(list(map(float,x.split())))
# unlink tmp files
pfi.close()
of.close()
os.unlink(pfn)
os.unlink(output)
return stdo,stde
def sortObjects(self,col):
from operator import itemgetter
"""Sort objects by given collumn."""
#self.objects.sort(cmp=lambda x,y: cmp(x[col],y[col]))
srt=sorted(self.objects, key=itemgetter(col))
self.objects=srt
def reverseObjects(self,col):
"""Reverse sort objects by given collumn."""
self.objects.sort(cmp=lambda x,y: cmp(x[col],y[col]))
self.objects.reverse()
def filter_galaxies(self,limit=0.2):
"""Filter possible galaxies"""
try:
i_class = self.get_field('CLASS_STAR')
ret = []
for x in self.objects:
if x[i_class] > limit:
ret.append(x)
return ret
except ValueError as ve:
print ('result does not contain CLASS_STAR')
traceback.print_exc()
def get_FWHM_stars(self,starsn=None,filterGalaxies=True,segments=None):
"""Returns candidate stars for FWHM calculations. """
if len(self.objects) == 0:
raise Exception('Cannot find FWHM on empty source list')
obj = None
if filterGalaxies:
obj = self.filter_galaxies()
if len(obj) == 0:
raise Exception('Cannot find FWHM - all detected sources were filtered out as galaxies')
else:
obj = self.objects
try:
# sort by magnitude
i_mag_best = self.get_field('MAG_BEST')
obj.sort(cmp=lambda x,y: cmp(x[i_mag_best],y[i_mag_best]))
fwhmlist = []
a = 0
b = 0
i_flags = self.get_field('FLAGS')
i_class = self.get_field('CLASS_STAR')
i_seg = self.get_field('EXT_NUMBER')
for x in obj:
if segments and x[i_seg] not in segments:
continue
if x[i_flags] == 0 and (filterGalaxies == False or x[i_class] != 0):
fwhmlist.append(x)
if starsn and len(fwhmlist) >= starsn:
break
else:
if self.verbose:
print ('rejected - FLAGS:', x[i_flags], ', CLASS_STAR:', x[i_class], 'line ', x)
return fwhmlist
except ValueError as ve:
traceback.print_exc()
return []
def calculate_FWHM(self,starsn=None,filterGalaxies=True,segments=None):
obj = self.get_FWHM_stars(starsn,filterGalaxies,segments)
try:
i_fwhm = self.get_field('FWHM_IMAGE')
import numpy
fwhms = map(lambda x:x[i_fwhm],obj)
return numpy.median(fwhms), numpy.std(fwhms), len(fwhms)
# return numpy.average(obj), len(obj)
except ValueError as ve:
traceback.print_exc()
raise Exception('cannot find FWHM_IMAGE value')
|
lgpl-3.0
|
stefanbo92/maleChildren
|
DeepLearning/trainDataNamesSimple.py
|
1
|
4439
|
import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import datetime
print ("Packages loaded")
# Load them!
cwd = os.getcwd()
loadpath = cwd + "/custom_data_signs.npz"
l = np.load(loadpath)
# See what's in here
print (l.files)
# Parse data
trainimg = l['trainimg']
trainlabel = l['trainlabel']
testimg = l['testimg']
testlabel = l['testlabel']
imgsize = l['imgsize']
use_gray = l['use_gray']
ntrain = trainimg.shape[0]
nclass = trainlabel.shape[1]
dim = trainimg.shape[1]
ntest = testimg.shape[0]
print ("%d train images loaded" % (ntrain))
print ("%d test images loaded" % (ntest))
print ("%d dimensional input" % (dim))
print ("Image size is %s" % (imgsize))
print ("%d classes" % (nclass))
#define network
tf.set_random_seed(0)
n_input = dim
n_output = nclass
if use_gray:
weights = {
'wd1': tf.Variable(tf.random_normal(
[(int)(imgsize[0]*imgsize[1]), 128], stddev=0.1),name="wd1"),
'wd2': tf.Variable(tf.random_normal([128, n_output], stddev=0.1),name="wd2")
}
else:
print "You should use gray images!!"
biases = {
'bd1': tf.Variable(tf.random_normal([128], stddev=0.1),name="bd1"),
'bd2': tf.Variable(tf.random_normal([n_output], stddev=0.1),name="bd2")
}
def conv_basic(_input, _w, _b, _keepratio, _use_gray):
# INPUT
if _use_gray:
_input_r = tf.reshape(_input, shape=[-1, imgsize[0], imgsize[1], 1])
else:
_input_r = tf.reshape(_input, shape=[-1, imgsize[0], imgsize[1], 3])
# VECTORIZE
_dense1 = tf.reshape(_input_r
, [-1, _w['wd1'].get_shape().as_list()[0]])
# FULLY CONNECTED LAYER 1
_fc1 = tf.nn.relu(tf.add(tf.matmul(_dense1, _w['wd1']), _b['bd1']))
_fc_dr1 = tf.nn.dropout(_fc1, _keepratio)
# FULLY CONNECTED LAYER 2
_out = tf.add(tf.matmul(_fc_dr1, _w['wd2']), _b['bd2'])
# RETURN
out = {
'out': _out
}
return out
print ("NETWORK READY")
#define functions
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_output])
keepratio = tf.placeholder(tf.float32)
# Functions!
_pred = conv_basic(x, weights, biases, keepratio, use_gray)['out']
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y))
WEIGHT_DECAY_FACTOR = 0.0001
l2_loss = tf.add_n([tf.nn.l2_loss(v)
for v in tf.trainable_variables()])
cost = cost + WEIGHT_DECAY_FACTOR*l2_loss
optm = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
_corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) # Count corrects
accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) # Accuracy
init = tf.initialize_all_variables()
print ("FUNCTIONS READY")
#optimize
# Parameters
training_epochs = 400
batch_size = 100
display_step = 1
# Launch the graph
sess = tf.Session()
sess.run(init)
# Training cycle
#Saver
save_step = 1;
#savedir = "nets/"
saver = tf.train.Saver(max_to_keep=3)
print('Start time: {:[%H:%M:%S]}'.format(datetime.datetime.now()))
for epoch in range(training_epochs):
avg_cost = 0.
num_batch = int(ntrain/batch_size)+1
# Loop over all batches
for i in range(num_batch):
randidx = np.random.randint(ntrain, size=batch_size)
batch_xs = trainimg[randidx, :]
batch_ys = trainlabel[randidx, :]
# Fit training using batch data
sess.run(optm, feed_dict={x: batch_xs, y: batch_ys
, keepratio:0.7})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys
, keepratio:1.})/num_batch
# Display logs per epoch step
if epoch % display_step == 0 or epoch == training_epochs-1:
print ('{:[%H:%M:%S] }'.format(datetime.datetime.now())+"Epoch: %03d/%03d cost: %.9f" %
(epoch, training_epochs, avg_cost))
train_acc = sess.run(accr, feed_dict={x: batch_xs
, y: batch_ys, keepratio:1.})
print (" Training accuracy: %.3f" % (train_acc))
test_acc = sess.run(accr, feed_dict={x: testimg
, y: testlabel, keepratio:1.})
print (" Test accuracy: %.3f" % (test_acc))
#save epoch
if epoch % save_step == 0:
saver.save(sess, cwd + "/nets/signs_fc.ckpt-" + str(epoch))
print ("Optimization Finished!")
sess.close()
print ("Session closed.")
|
mit
|
samzhang111/scikit-learn
|
sklearn/utils/random.py
|
234
|
10510
|
# Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
|
bsd-3-clause
|
MarianZoll/pythonFlaskWebApp
|
inspirationCode.py
|
1
|
3085
|
# Copyright 2015 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, session
from flask import jsonify
from flask import request
from flask import render_template
from flask import redirect
from flask import url_for
from flask import make_response
#from wtforms import Form, BooleanField, StringField, PasswordField, validators
import subprocess
from subprocess import call
import shlex
import os
import sys
import random
import tempfile
import numpy as np
import pandas as pd
from werkzeug.utils import secure_filename
from sklearn import datasets, linear_model
import ibm_db
import csv
import requests
import json
import cPickle
import statistics
app = Flask(__name__)
port = os.getenv('PORT', '5000')
@app.route('/_add_numbers', methods=['GET', 'POST'])
def newDataTuple():
"""Add two numbers server side, ridiculous but well..."""
new_x = np.random.uniform(0, 5, 1)
new_y = 10 + 20 * new_x + np.random.normal(0, 1, 1)
return jsonify(new_x, new_y)
URL = "http://www.nasdaq.com/quotes/nasdaq-100-stocks.aspx?render=download"
def get_data():
r = requests.get(URL)
data = r.text
RESULTS = {'children': []}
for line in csv.DictReader(data.splitlines(), skipinitialspace=True):
RESULTS['children'].append({
'name': line['Name'],
'symbol': line['Symbol'],
'symbol': line['Symbol'],
'price': line['lastsale'],
'net_change': line['netchange'],
'percent_change': line['pctchange'],
'volume': line['share_volume'],
'value': line['Nasdaq100_points']
})
return RESULTS
@app.route('/stockD3', methods=['GET', 'POST'])
def getData():
if 'username' in session:
return jsonify(get_data())
return app.send_static_file('login.html')
@app.route('/predictive')
def Predicitve():
url = 'https://palbyp.pmservice.ibmcloud.com/pm/v1/score/drug1N?accesskey=PvbqNXIACpuDrZgDO9CbN313mSWPg+nXVvIJ0djB4yBSYaR6o+dv7VoBAlI2K7g2HxGxQ3pIogjgEOjN0TGDTcL0h32gVzPkwMbmHXNpi+HzJU8iWyWJqltymsLLIPLgweQ+A7l5lC3CZwLgaXdGr4LuDCZcgdyslsDbxYIQ5EY='
payload = {"tablename":"scoreInput",
"header":["Age","Sex","BP","Cholesterol","Na","K","Drug"],
"data":[[43.0, "M", "HIGH", "HIGH", 0.87, 0.87, "drugY"]]}
r = requests.post(url, json=payload)
return r.text + ' Status Code: ' + str(r.status_code)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(port), debug=True)
|
apache-2.0
|
huobaowangxi/scikit-learn
|
examples/mixture/plot_gmm.py
|
248
|
2817
|
"""
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
|
bsd-3-clause
|
dhruv13J/scikit-learn
|
sklearn/preprocessing/tests/test_data.py
|
14
|
37957
|
import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
|
bsd-3-clause
|
schinmayee/metric-learning
|
view_query.py
|
1
|
1778
|
#!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.misc import imread, imresize
import argparse
def ImshowNoax(img, normalize=True):
""" Tiny helper to show images as uint8 and remove axis labels """
if normalize:
img_max, img_min = np.max(img), np.min(img)
img = 255.0 * (img - img_min) / (img_max - img_min)
plt.imshow(img.astype('uint8'))
plt.gca().axis('off')
def SaveQuery(base_dir, line, result_img):
tokens = line.split(',')
if (len(tokens)!=7):
print('Skipping ' + line)
return
for i in range(6):
im_meta = tokens[i].split(':')
im_path = im_meta[0].strip()
im_class = im_meta[1].strip()
im = imread(os.path.join(base_dir, im_path))
plt.subplot(2,3,(i+3)%6+1)
ImshowNoax(im, normalize=False)
plt.title('as:\n'.join(im_class.split('->')))
plt.tight_layout(pad=0.001)
plt.savefig(result_img)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='View query results')
parser.add_argument('--queries', type=str, default='',
help='file with query results')
parser.add_argument('--images', type=str, default='',
help='base directory for images')
parser.add_argument('--results', type=str, default='',
help='directory to save results to')
args = parser.parse_args()
queries = args.queries
base_dir = args.images
results_dir = args.results
font = { 'size' : 6 }
matplotlib.rc('font', **font)
lines = open(queries).read()
qnum = 1
for line in lines.split('\n'):
SaveQuery(base_dir, line, os.path.join(results_dir, 'query_%03d.png' % qnum))
qnum += 1
|
mit
|
kcavagnolo/astroML
|
book_figures/chapter2/fig_search_scaling.py
|
3
|
2847
|
"""
Search Algorithm Scaling
------------------------
Figure 2.1.
The scaling of two methods to search for an item in an ordered list: a linear
method which performs a comparison on all N items, and a binary search which
uses a more sophisticated algorithm. The theoretical scalings are shown by
dashed lines.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from time import time
import numpy as np
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Compute the execution times as a function of array size
Nsamples = 10 ** np.linspace(6.0, 7.8, 17)
time_linear = np.zeros_like(Nsamples)
time_binary = np.zeros_like(Nsamples)
for i in range(len(Nsamples)):
# create a sorted array
x = np.arange(Nsamples[i], dtype=int)
# Linear search: choose a single item in the array
item = int(0.4 * Nsamples[i])
t0 = time()
j = np.where(x == item)
t1 = time()
time_linear[i] = t1 - t0
# Binary search: this is much faster, so choose 1000 items to search for
items = np.linspace(0, Nsamples[i], 1000).astype(int)
t0 = time()
j = np.searchsorted(x, items)
t1 = time()
time_binary[i] = (t1 - t0)
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
fig.subplots_adjust(bottom=0.15)
ax = plt.axes(xscale='log', yscale='log')
ax.grid()
# plot the observed times
ax.plot(Nsamples, time_linear, 'ok', color='gray', markersize=5,
label=r'linear search $(\mathcal{O}[N])$')
ax.plot(Nsamples, time_binary, 'sk', color='gray', markersize=5,
label=r'efficient search $(\mathcal{O}[\log N])$')
# plot the expected scaling
scale = 10 ** np.linspace(5, 8, 100)
scaling_N = scale * time_linear[7] / Nsamples[7]
scaling_logN = np.log(scale) * time_binary[7] / np.log(Nsamples[7])
ax.plot(scale, scaling_N, '--k')
ax.plot(scale, scaling_logN, '--k')
ax.set_xlim(9E5, 1E8)
# add text and labels
ax.set_title("Scaling of Search Algorithms")
ax.set_xlabel('Length of Array')
ax.set_ylabel('Relative search time')
ax.legend(loc='upper left')
plt.show()
|
bsd-2-clause
|
joernhees/scikit-learn
|
examples/calibration/plot_calibration.py
|
66
|
4795
|
"""
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see https://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
|
bsd-3-clause
|
oaelhara/numbbo
|
code-postprocessing/bbob_pproc/pprldistr.py
|
1
|
35730
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""For generating empirical cumulative distribution function figures.
The outputs show empirical cumulative distribution functions (ECDFs) of
the running times of trials. These ECDFs show on the y-axis the fraction
of cases for which the running time (left subplots) or the df-value
(right subplots) was smaller than the value given on the x-axis. On the
left, ECDFs of the running times from trials are shown for different
target values. Light brown lines in the background show ECDFs for target
value 1e-8 of all algorithms benchmarked during BBOB-2009. On the right,
ECDFs of df-values from all trials are shown for different numbers of
function evaluations.
**Example**
.. plot::
:width: 75%
import urllib
import tarfile
import glob
from pylab import *
import bbob_pproc as bb
# Collect and unarchive data (3.4MB)
dataurl = 'http://coco.lri.fr/BBOB2009/pythondata/BIPOP-CMA-ES.tar.gz'
filename, headers = urllib.urlretrieve(dataurl)
archivefile = tarfile.open(filename)
archivefile.extractall()
# Empirical cumulative distribution function figure
ds = bb.load(glob.glob('BBOB2009pythondata/BIPOP-CMA-ES/ppdata_f0*_20.pickle'))
figure()
bb.pprldistr.plot(ds)
bb.pprldistr.beautify() # resize the window to view whole figure
CAVEAT: the naming conventions in this module mix up ERT (an estimate
of the expected running length) and run lengths.
"""
from __future__ import absolute_import
import os
import warnings # I don't know what I am doing here
import numpy as np
import pickle, gzip
import matplotlib.pyplot as plt
from pdb import set_trace
from . import toolsstats, genericsettings, pproc
from .ppfig import consecutiveNumbers, plotUnifLogXMarkers, saveFigure, logxticks
from .pptex import color_to_latex, marker_to_latex
single_runlength_factors = [0.5, 1.2, 3, 10] + [10 ** i for i in range(2, 12)]
# TODO: the method names in this module seem to be overly unclear or misleading and should be revised.
refcolor = 'wheat'
nbperdecade = 1 # markers in x-axis decades in ecdfs
runlen_xlimits_max = None # is possibly manipulated in config
runlen_xlimits_min = 1 # set to 10**-0.5 in runlength case in config
# Used as a global to store the largest xmax and align the FV ECD figures.
fmax = None
evalfmax = runlen_xlimits_max # is manipulated/stored in this module
# TODO: the target function values and the styles of the line only make sense
# together. Therefore we should either:
# 1. keep the targets as input argument and make rldStyles depend on them or
# 2. remove the targets as input argument and put them here.
rldStyles = ({'color': 'k', 'ls': '-'},
{'color': 'c'},
{'color': 'm', 'ls': '-'},
{'color': 'r', 'linewidth': 3.},
{'color': 'k'},
{'color': 'c'},
{'color': 'm'},
{'color': 'r'},
{'color': 'k'},
{'color': 'c'},
{'color': 'm'},
{'color': 'r', 'linewidth': 3.})
rldUnsuccStyles = (
{'color': 'c', 'ls': '-'},
{'color': 'm', 'ls': '-'},
{'color': 'k', 'ls': '-'},
{'color': 'c'},
{'color': 'm', 'ls': '-'},
{'color': 'k', 'ls': '-'},
{'color': 'c'},
{'color': 'm', 'ls': '-'},
{'color': 'k'},
{'color': 'c', 'ls': '-'},
{'color': 'm'},
{'color': 'k'},
) # should not be too short
styles = genericsettings.line_styles
caption_part_one = r"""%
Empirical cumulative distribution functions (ECDF), plotting the fraction of
trials with an outcome not larger than the respective value on the $x$-axis.
#1"""
caption_left_fixed_targets = r"""%
Left subplots: ECDF of the number of function evaluations (FEvals) divided by search space dimension $D$,
to fall below $\fopt+\Df$ with $\Df=10^{k}$, where $k$ is the first value in the legend.
The thick red line represents the most difficult target value $\fopt+10^{-8}$. """
caption_left_rlbased_targets = r"""%
Left subplots: ECDF of number of function evaluations (FEvals) divided by search space dimension $D$,
to fall below $\fopt+\Df$ where \Df\ is the
target just not reached by the GECCO-BBOB-2009 best algorithm within a budget of
% largest $\Df$-value $\ge10^{-8}$ for which the best \ERT\ seen in the GECCO-BBOB-2009 was yet above
$k\times\DIM$ evaluations, where $k$ is the first value in the legend. """
caption_wrap_up = r"""%
Legends indicate for each target the number of functions that were solved in at
least one trial within the displayed budget."""
caption_right = r"""%
Right subplots: ECDF of the
best achieved $\Df$
for running times of TO_BE_REPLACED
function evaluations
(from right to left cycling cyan-magenta-black\dots) and final $\Df$-value (red),
where \Df\ and \textsf{Df} denote the difference to the optimal function value.
Light brown lines in the background show ECDFs for the most difficult target of all
algorithms benchmarked during BBOB-2009."""
caption_single_fixed = caption_part_one + caption_left_fixed_targets + caption_wrap_up + caption_right
caption_single_rlbased = caption_part_one + caption_left_rlbased_targets + caption_wrap_up + caption_right
caption_two_part_one = r"""%
Empirical cumulative distributions (ECDF)
of run lengths and speed-up ratios in 5-D (left) and 20-D (right).
Left sub-columns: ECDF of
the number of function evaluations divided by dimension $D$
(FEvals/D) """
symbAlgorithmA = r'{%s%s}' % (color_to_latex('k'),
marker_to_latex(styles[0]['marker']))
symbAlgorithmB = r'{%s%s}' % (color_to_latex('k'),
marker_to_latex(styles[1]['marker']))
caption_two_fixed_targets_part1 = r"""%
to reach a target value $\fopt+\Df$ with $\Df=10^{k}$, where
$k\in\{1, -1, -4, -8\}$ is given by the first value in the legend, for
\algorithmA\ ("""
caption_two_fixed_targets_part2 = r""") and \algorithmB\ ("""
caption_two_fixed_targets_part3 = r""")%
. Light beige lines show the ECDF of FEvals for target value $\Df=10^{-8}$
of all algorithms benchmarked during BBOB-2009.
Right sub-columns:
ECDF of FEval ratios of \algorithmA\ divided by \algorithmB for target
function values $10^k$ with $k$ given in the legend; all
trial pairs for each function. Pairs where both trials failed are disregarded,
pairs where one trial failed are visible in the limits being $>0$ or $<1$. The
legend also indicates, after the colon, the number of functions that were
solved in at least one trial (\algorithmA\ first)."""
caption_two_rlbased_targets_part1 = r"""%
to fall below $\fopt+\Df$ for
\algorithmA\ ("""
caption_two_rlbased_targets_part2 = r""") and \algorithmB\ ("""
caption_two_rlbased_targets_part3 = r"""%
) where \Df\ is the target just not reached by the GECCO-BBOB-2009 best
algorithm within a budget of $k\times\DIM$ evaluations, with $k$ being the
value in the legend.
Right sub-columns:
ECDF of FEval ratios of \algorithmA\ divided by \algorithmB\ for
run-length-based targets; all trial pairs for each function. Pairs where
both trials failed are disregarded, pairs where one trial failed are visible
in the limits being $>0$ or $<1$. The legends indicate the target budget of
$k\times\DIM$ evaluations and, after the colon, the number of functions that
were solved in at least one trial (\algorithmA\ first)."""
caption_two_fixed = (caption_two_part_one
+ caption_two_fixed_targets_part1
+ symbAlgorithmA
+ caption_two_fixed_targets_part2
+ symbAlgorithmB
+ caption_two_fixed_targets_part3)
caption_two_rlbased = (caption_two_part_one
+ caption_two_rlbased_targets_part1
+ symbAlgorithmA
+ caption_two_rlbased_targets_part2
+ symbAlgorithmB
+ caption_two_rlbased_targets_part3)
previous_data_filename = 'pprldistr2009_1e-8.pickle.gz'
previous_RLBdata_filename = 'pprldistr2009_hardestRLB.pickle.gz'
previous_data_filename = os.path.join(os.path.split(__file__)[0], previous_data_filename)
previous_RLBdata_filename = os.path.join(os.path.split(__file__)[0], previous_RLBdata_filename)
previous_data_dict = None
previous_RLBdata_dict = None
def load_previous_data(filename = previous_data_filename, force = False):
if previous_data_dict and not force:
return previous_data_dict
try:
# cocofy(previous_data_filename)
f = gzip.open(previous_data_filename, 'r')
return pickle.load(f)
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
previous_algorithm_data_found = False
print 'Could not find file: ', previous_data_filename
else:
f.close()
return None
def load_previous_RLBdata(filename = previous_RLBdata_filename):
if previous_RLBdata_dict:
return previous_RLBdata_dict
try:
f = gzip.open(previous_RLBdata_filename, 'r')
return pickle.load(f)
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
print 'Could not find file: ', previous_RLBdata_filename
else:
f.close()
return None
def caption_single(max_evals_div_dim):
caption = caption_single_rlbased if genericsettings.runlength_based_targets else caption_single_fixed
return caption.replace(r'TO_BE_REPLACED', '$' + 'D, '.join([str(i) for i in single_runlength_factors[:6]]) + 'D,\dots$')
def caption_two():
caption = caption_two_rlbased if genericsettings.runlength_based_targets else caption_two_fixed
return caption
def beautifyECDF():
"""Generic formatting of ECDF figures."""
plt.ylim(-0.0, 1.01) # was plt.ylim(-0.01, 1.01)
plt.yticks(np.arange(0., 1.001, 0.2)) # , ('0.0', '', '0.5', '', '1.0'))
plt.grid(True)
xmin, xmax = plt.xlim()
# plt.xlim(xmin=xmin*0.90) # why this?
c = plt.gca().get_children()
for i in c: # TODO: we only want to extend ECDF lines...
try:
if i.get_drawstyle() == 'steps' and not i.get_linestyle() in ('', 'None'):
xdata = i.get_xdata()
ydata = i.get_ydata()
if len(xdata) > 0:
# if xmin < min(xdata):
# xdata = np.hstack((xmin, xdata))
# ydata = np.hstack((ydata[0], ydata))
if xmax > max(xdata):
xdata = np.hstack((xdata, xmax))
ydata = np.hstack((ydata, ydata[-1]))
plt.setp(i, 'xdata', xdata, 'ydata', ydata)
elif (i.get_drawstyle() == 'steps' and i.get_marker() != '' and
i.get_linestyle() in ('', 'None')):
xdata = i.get_xdata()
ydata = i.get_ydata()
if len(xdata) > 0:
# if xmin < min(xdata):
# minidx = np.ceil(np.log10(xmin) * nbperdecade)
# maxidx = np.floor(np.log10(xdata[0]) * nbperdecade)
# x = 10. ** (np.arange(minidx, maxidx + 1) / nbperdecade)
# xdata = np.hstack((x, xdata))
# ydata = np.hstack(([ydata[0]] * len(x), ydata))
if xmax > max(xdata):
minidx = np.ceil(np.log10(xdata[-1]) * nbperdecade)
maxidx = np.floor(np.log10(xmax) * nbperdecade)
x = 10. ** (np.arange(minidx, maxidx + 1) / nbperdecade)
xdata = np.hstack((xdata, x))
ydata = np.hstack((ydata, [ydata[-1]] * len(x)))
plt.setp(i, 'xdata', xdata, 'ydata', ydata)
except (AttributeError, IndexError):
pass
def beautifyRLD(xlimit_max = None):
"""Format and save the figure of the run length distribution.
After calling this function, changing the boundaries of the figure
will not update the ticks and tick labels.
"""
a = plt.gca()
a.set_xscale('log')
a.set_xlabel('log10 of FEvals / DIM')
a.set_ylabel('proportion of trials')
logxticks()
if xlimit_max:
plt.xlim(xmax = xlimit_max ** 1.0) # was 1.05
plt.xlim(xmin = runlen_xlimits_min)
plt.text(plt.xlim()[0],
plt.ylim()[0],
genericsettings.current_testbed.pprldistr_target_values.short_info,
fontsize = 14)
beautifyECDF()
def beautifyFVD(isStoringXMax = False, ylabel = True):
"""Formats the figure of the run length distribution.
This function is to be used with :py:func:`plotFVDistr`
:param bool isStoringMaxF: if set to True, the first call
:py:func:`beautifyFVD` sets the global
:py:data:`fmax` and all subsequent call
will have the same maximum xlim
:param bool ylabel: if True, y-axis will be labelled.
"""
a = plt.gca()
a.set_xscale('log')
if isStoringXMax:
global fmax
else:
fmax = None
if not fmax:
xmin, fmax = plt.xlim()
plt.xlim(1.01e-8, fmax) # 1e-8 was 1.
# axisHandle.invert_xaxis()
a.set_xlabel('log10 of Df') # / Dftarget
if ylabel:
a.set_ylabel('proportion of trials')
logxticks(limits=plt.xlim())
beautifyECDF()
if not ylabel:
a.set_yticklabels(())
def plotECDF(x, n = None, **plotArgs):
"""Plot an empirical cumulative distribution function.
:param seq x: data
:param int n: number of samples, if not provided len(x) is used
:param plotArgs: optional keyword arguments provided to plot.
:returns: handles of the plot elements.
"""
if n is None:
n = len(x)
nx = len(x)
if n == 0 or nx == 0:
res = plt.plot([], [], **plotArgs)
else:
x = sorted(x) # do not sort in place
x = np.hstack((x, x[-1]))
y = np.hstack((np.arange(0., nx) / n, float(nx) / n))
res = plotUnifLogXMarkers(x, y, nbperdecade = nbperdecade,
drawstyle = 'steps', **plotArgs)
return res
def _plotERTDistr(dsList, target, **plotArgs):
"""This method is obsolete, should be removed? The replacement for simulated runlengths is in pprldmany?
Creates simulated run time distributions (it is not an ERT distribution) from a DataSetList.
:keyword DataSet dsList: Input data sets
:keyword dict target: target precision
:keyword plotArgs: keyword arguments to pass to plot command
:return: resulting plot.
Details: calls ``plotECDF``.
"""
x = []
nn = 0
samplesize = genericsettings.simulated_runlength_bootstrap_sample_size # samplesize should be at least 1000
percentiles = 0.5 # could be anything...
for i in dsList:
# funcs.add(i.funcId)
for j in i.evals:
if j[0] <= target[i.funcId]:
runlengthsucc = j[1:][np.isfinite(j[1:])]
runlengthunsucc = i.maxevals[np.isnan(j[1:])]
tmp = toolsstats.drawSP(runlengthsucc, runlengthunsucc,
percentiles = percentiles,
samplesize = samplesize)
x.extend(tmp[1])
break
nn += samplesize
res = plotECDF(x, nn, **plotArgs)
return res
def _plotRLDistr_old(dsList, target, **plotArgs):
"""Creates run length distributions from a sequence dataSetList.
Labels of the line (for the legend) will be set automatically with
the following format: %+d: %d/%d % (log10()
:param DataSetList dsList: Input data sets
:param dict or float target: target precision
:param plotArgs: additional arguments passed to the plot command
:returns: handles of the resulting plot.
"""
x = []
nn = 0
fsolved = set()
funcs = set()
for i in dsList:
funcs.add(i.funcId)
try:
target = target[i.funcId] # TODO: this can only work for a single function, generally looks like a bug
if not genericsettings.test:
print 'target:', target
print 'function:', i.funcId
raise Exception('please check this, it looks like a bug')
except TypeError:
target = target
tmp = i.detEvals((target,))[0] / i.dim
tmp = tmp[np.isnan(tmp) == False] # keep only success
if len(tmp) > 0:
fsolved.add(i.funcId)
x.extend(tmp)
nn += i.nbRuns()
kwargs = plotArgs.copy()
label = ''
try:
label += '%+d:' % (np.log10(target))
except NameError:
pass
label += '%d/%d' % (len(fsolved), len(funcs))
kwargs['label'] = kwargs.setdefault('label', label)
res = plotECDF(x, nn, **kwargs)
return res
def erld_data(dsList, target, max_fun_evals = np.inf):
"""return ``[sorted_runlengths_divided_by_dimension, nb_of_all_runs, functions_ids_found, functions_ids_solved]``
`max_fun_evals` is only used to compute `function_ids_solved`,
that is elements in `sorted_runlengths...` can be larger.
copy-paste from `plotRLDistr` and not used.
"""
runlength_data = []
nruns = 0
fsolved = set()
funcs = set()
for ds in dsList: # ds is a DataSet
funcs.add(ds.funcId)
evals = ds.detEvals((target((ds.funcId, ds.dim)),))[0] / ds.dim
evals = evals[np.isnan(evals) == False] # keep only success
if len(evals) > 0 and sum(evals <= max_fun_evals):
fsolved.add(ds.funcId)
runlength_data.extend(evals)
nruns += ds.nbRuns()
return sorted(runlength_data), nruns, funcs, fsolved
def plotRLDistr(dsList, target, label = '', max_fun_evals = np.inf,
**plotArgs):
"""Creates run length distributions from a sequence dataSetList.
Labels of the line (for the legend) will be appended with the number
of functions at least solved once.
:param DataSetList dsList: Input data sets
:param target: a method that delivers single target values like ``target((fun, dim))``
:param str label: target value label to be displayed in the legend
:param max_fun_evals: only used to determine success on a single function
:param plotArgs: additional arguments passed to the plot command
:returns: handles of the resulting plot.
Example::
plotRLDistr(dsl, lambda f: 1e-6)
Details: ``target`` is a function taking a (function_number, dimension) pair
as input and returning a ``float``. It can be defined as
``lambda fun_dim: targets(fun_dim)[j]`` returning the j-th element of
``targets(fun_dim)``, where ``targets`` is an instance of
``class pproc.TargetValues`` (see the ``pproc.TargetValues.__call__`` method).
TODO: data generation and plotting should be in separate methods
TODO: different number of runs/data biases the results, shouldn't
the number of data made the same, in case?
"""
x = []
nn = 0
fsolved = set()
funcs = set()
for ds in dsList: # ds is a DataSet
funcs.add(ds.funcId)
tmp = ds.detEvals((target((ds.funcId, ds.dim)),))[0] / ds.dim
tmp = tmp[np.isnan(tmp) == False] # keep only success
if len(tmp) > 0 and sum(tmp <= max_fun_evals):
fsolved.add(ds.funcId)
x.extend(tmp)
nn += ds.nbRuns()
kwargs = plotArgs.copy()
label += ': %d/%d' % (len(fsolved), len(funcs))
kwargs['label'] = kwargs.setdefault('label', label)
res = plotECDF(x, nn, **kwargs)
return res
def plotFVDistr(dsList, budget, min_f = 1e-8, **plotArgs):
"""Creates ECDF of final function values plot from a DataSetList.
:param dsList: data sets
:param min_f: used for the left limit of the plot
:param float budget: maximum evaluations / dimension that "count"
:param plotArgs: additional arguments passed to plot
:returns: handle
"""
x = []
nn = 0
for ds in dsList:
for i, fvals in enumerate(ds.funvals):
if fvals[0] > budget * ds.dim:
assert i > 0, 'first entry ' + str(fvals[0]) + 'was smaller than maximal budget ' + str(budget * ds.dim)
fvals = ds.funvals[i - 1]
break
# vals = fvals[1:].copy() / target[i.funcId]
vals = fvals[1:].copy()
# replace negative values to prevent problem with log of vals
vals[vals <= 0] = min(np.append(vals[vals > 0], [min_f])) # works also when vals[vals > 0] is empty
if genericsettings.runlength_based_targets:
NotImplementedError('related function vals with respective budget (e.g. ERT(val)) see pplogloss.generateData()')
x.extend(vals)
nn += ds.nbRuns()
if nn > 0:
return plotECDF(x, nn, **plotArgs)
else:
return None
def comp(dsList0, dsList1, targets, isStoringXMax = False,
outputdir = '', info = 'default', verbose = True):
"""Generate figures of ECDF that compare 2 algorithms.
:param DataSetList dsList0: list of DataSet instances for ALG0
:param DataSetList dsList1: list of DataSet instances for ALG1
:param seq targets: target function values to be displayed
:param bool isStoringXMax: if set to True, the first call
:py:func:`beautifyFVD` sets the globals
:py:data:`fmax` and :py:data:`maxEvals`
and all subsequent calls will use these
values as rightmost xlim in the generated
figures.
:param string outputdir: output directory (must exist)
:param string info: string suffix for output file names.
:param bool verbose: control verbosity
"""
# plt.rc("axes", labelsize=20, titlesize=24)
# plt.rc("xtick", labelsize=20)
# plt.rc("ytick", labelsize=20)
# plt.rc("font", size=20)
# plt.rc("legend", fontsize=20)
if not isinstance(targets, pproc.RunlengthBasedTargetValues):
targets = pproc.TargetValues.cast(targets)
dictdim0 = dsList0.dictByDim()
dictdim1 = dsList1.dictByDim()
for d in set(dictdim0.keys()) & set(dictdim1.keys()):
maxEvalsFactor = max(max(i.mMaxEvals() / d for i in dictdim0[d]),
max(i.mMaxEvals() / d for i in dictdim1[d]))
if isStoringXMax:
global evalfmax
else:
evalfmax = None
if not evalfmax:
evalfmax = maxEvalsFactor ** 1.05
if runlen_xlimits_max is not None:
evalfmax = runlen_xlimits_max
filename = os.path.join(outputdir, 'pprldistr_%02dD_%s' % (d, info))
fig = plt.figure()
for j in range(len(targets)):
tmp = plotRLDistr(dictdim0[d], lambda fun_dim: targets(fun_dim)[j],
targets.label(j) if isinstance(targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j),
marker = genericsettings.line_styles[1]['marker'],
**rldStyles[j % len(rldStyles)])
plt.setp(tmp[-1], label = None) # Remove automatic legend
# Mods are added after to prevent them from appearing in the legend
plt.setp(tmp, markersize = 20.,
markeredgewidth = plt.getp(tmp[-1], 'linewidth'),
markeredgecolor = plt.getp(tmp[-1], 'color'),
markerfacecolor = 'none')
tmp = plotRLDistr(dictdim1[d], lambda fun_dim: targets(fun_dim)[j],
targets.label(j) if isinstance(targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j),
marker = genericsettings.line_styles[0]['marker'],
**rldStyles[j % len(rldStyles)])
# modify the automatic legend: remover marker and change text
plt.setp(tmp[-1], marker = '',
label = targets.label(j) if isinstance(targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j))
# Mods are added after to prevent them from appearing in the legend
plt.setp(tmp, markersize = 15.,
markeredgewidth = plt.getp(tmp[-1], 'linewidth'),
markeredgecolor = plt.getp(tmp[-1], 'color'),
markerfacecolor = 'none')
funcs = set(i.funcId for i in dictdim0[d]) | set(i.funcId for i in dictdim1[d])
text = consecutiveNumbers(sorted(funcs), 'f')
if not dsList0.isBiobjective():
if not isinstance(targets, pproc.RunlengthBasedTargetValues):
plot_previous_algorithms(d, funcs)
else:
plotRLB_previous_algorithms(d, funcs)
# plt.axvline(max(i.mMaxEvals()/i.dim for i in dictdim0[d]), ls='--', color='k')
# plt.axvline(max(i.mMaxEvals()/i.dim for i in dictdim1[d]), color='k')
plt.axvline(max(i.mMaxEvals() / i.dim for i in dictdim0[d]),
marker = '+', markersize = 20., color = 'k',
markeredgewidth = plt.getp(tmp[-1], 'linewidth',))
plt.axvline(max(i.mMaxEvals() / i.dim for i in dictdim1[d]),
marker = 'o', markersize = 15., color = 'k', markerfacecolor = 'None',
markeredgewidth = plt.getp(tmp[-1], 'linewidth'))
plt.legend(loc = 'best')
plt.text(0.5, 0.98, text, horizontalalignment = "center",
verticalalignment = "top", transform = plt.gca().transAxes) # bbox=dict(ec='k', fill=False),
beautifyRLD(evalfmax)
saveFigure(filename, verbose = verbose)
plt.close(fig)
def beautify():
"""Format the figure of the run length distribution.
Used in conjunction with plot method (obsolete/outdated, see functions ``beautifyFVD`` and ``beautifyRLD``).
"""
# raise NotImplementedError('this implementation is obsolete')
plt.subplot(121)
axisHandle = plt.gca()
axisHandle.set_xscale('log')
axisHandle.set_xlabel('log10 of FEvals / DIM')
axisHandle.set_ylabel('proportion of trials')
# Grid options
logxticks()
beautifyECDF()
plt.subplot(122)
axisHandle = plt.gca()
axisHandle.set_xscale('log')
xmin, fmax = plt.xlim()
plt.xlim(1., fmax)
axisHandle.set_xlabel('log10 of Df / Dftarget')
beautifyECDF()
logxticks()
axisHandle.set_yticklabels(())
plt.gcf().set_size_inches(16.35, 6.175)
# try:
# set_trace()
# plt.setp(plt.gcf(), 'figwidth', 16.35)
# except AttributeError: # version error?
# set_trace()
# plt.setp(plt.gcf(), 'figsize', (16.35, 6.))
def plot(dsList, targets=None, **plotArgs):
"""Plot ECDF of evaluations and final function values
in a single figure for demonstration purposes."""
# targets = targets() # TODO: this needs to be rectified
# targets = targets.target_values
dsList = pproc.DataSetList(dsList)
assert len(dsList.dictByDim()) == 1, ('Cannot display different '
'dimensionalities together')
res = []
if not targets:
targets = genericsettings.current_testbed.ppfigdim_target_values
plt.subplot(121)
maxEvalsFactor = max(i.mMaxEvals() / i.dim for i in dsList)
evalfmax = maxEvalsFactor
for j in range(len(targets)):
tmpplotArgs = dict(plotArgs, **rldStyles[j % len(rldStyles)])
tmp = plotRLDistr(dsList, lambda fun_dim: targets(fun_dim)[j], **tmpplotArgs)
res.extend(tmp)
res.append(plt.axvline(x = maxEvalsFactor, color = 'k', **plotArgs))
funcs = list(i.funcId for i in dsList)
text = consecutiveNumbers(sorted(funcs), 'f')
res.append(plt.text(0.5, 0.98, text, horizontalalignment = "center",
verticalalignment = "top", transform = plt.gca().transAxes))
plt.subplot(122)
for j in [range(len(targets))[-1]]:
tmpplotArgs = dict(plotArgs, **rldStyles[j % len(rldStyles)])
tmp = plotFVDistr(dsList, evalfmax, lambda fun_dim: targets(fun_dim)[j], **tmpplotArgs)
if tmp:
res.extend(tmp)
tmp = np.floor(np.log10(evalfmax))
# coloring right to left:
maxEvalsF = np.power(10, np.arange(0, tmp))
for j in range(len(maxEvalsF)):
tmpplotArgs = dict(plotArgs, **rldUnsuccStyles[j % len(rldUnsuccStyles)])
tmp = plotFVDistr(dsList, maxEvalsF[j], lambda fun_dim: targets(fun_dim)[-1], **tmpplotArgs)
if tmp:
res.extend(tmp)
res.append(plt.text(0.98, 0.02, text, horizontalalignment = "right",
transform = plt.gca().transAxes))
return res
def plot_previous_algorithms(dim, funcs):
"""Display BBOB 2009 data, by default from ``pprldistr.previous_data_filename = 'pprldistr2009_1e-8.pickle.gz'``"""
global previous_data_dict
if previous_data_dict is None:
previous_data_dict = load_previous_data() # this takes about 6 seconds
if previous_data_dict is not None:
for alg in previous_data_dict:
x = []
nn = 0
try:
tmp = previous_data_dict[alg]
for f in funcs:
tmp[f][dim] # simply test that they exists
except KeyError:
continue
for f in funcs:
tmp2 = tmp[f][dim][0][1:]
# [0], because the maximum #evals is also recorded
# [1:] because the target function value is recorded
x.append(tmp2[np.isnan(tmp2) == False])
nn += len(tmp2)
if x:
x = np.hstack(x)
plotECDF(x[np.isfinite(x)] / float(dim), nn,
color = refcolor, ls = '-', zorder = -1)
def plotRLB_previous_algorithms(dim, funcs):
"""Display BBOB 2009 data, by default from ``pprldistr.previous_data_filename = 'pprldistr2009_1e-8.pickle.gz'``"""
global previous_RLBdata_dict
if previous_RLBdata_dict is None:
previous_RLBdata_dict = load_previous_RLBdata()
if previous_RLBdata_dict is not None:
for alg in previous_RLBdata_dict:
x = []
nn = 0
try:
tmp = previous_RLBdata_dict[alg]
for f in funcs:
tmp[f][dim] # simply test that they exists
except KeyError:
continue
for f in funcs:
tmp2 = np.array(tmp[f][dim][0][1:][0])
# [0], because the maximum #evals is also recorded
# [1:] because the target function value is recorded
x.append(tmp2[np.isnan(tmp2) == False])
nn += len(tmp2)
if x:
x = np.hstack(x)
plotECDF(x[np.isfinite(x)] / float(dim), nn,
color = refcolor, ls = '-', zorder = -1)
def main(dsList, isStoringXMax = False, outputdir = '',
info = 'default', verbose = True):
"""Generate figures of empirical cumulative distribution functions.
This method has a feature which allows to keep the same boundaries
for the x-axis, if ``isStoringXMax==True``. This makes sense when
dealing with different functions or subsets of functions for one
given dimension.
CAVE: this is bug-prone, as some data depend on the maximum
evaluations and the appearence therefore depends on the
calling order.
:param DataSetList dsList: list of DataSet instances to process.
:param bool isStoringXMax: if set to True, the first call
:py:func:`beautifyFVD` sets the
globals :py:data:`fmax` and
:py:data:`maxEvals` and all subsequent
calls will use these values as rightmost
xlim in the generated figures.
:param string outputdir: output directory (must exist)
:param string info: string suffix for output file names.
:param bool verbose: control verbosity
"""
# plt.rc("axes", labelsize=20, titlesize=24)
# plt.rc("xtick", labelsize=20)
# plt.rc("ytick", labelsize=20)
# plt.rc("font", size=20)
# plt.rc("legend", fontsize=20)
targets = genericsettings.current_testbed.pprldistr_target_values # convenience abbreviation
for d, dictdim in dsList.dictByDim().iteritems():
maxEvalsFactor = max(i.mMaxEvals() / d for i in dictdim)
if isStoringXMax:
global evalfmax
else:
evalfmax = None
if not evalfmax:
evalfmax = maxEvalsFactor
if runlen_xlimits_max is not None:
evalfmax = runlen_xlimits_max
# first figure: Run Length Distribution
filename = os.path.join(outputdir, 'pprldistr_%02dD_%s' % (d, info))
fig = plt.figure()
for j in range(len(targets)):
plotRLDistr(dictdim,
lambda fun_dim: targets(fun_dim)[j],
targets.label(j) if isinstance(targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j),
evalfmax, # can be larger maxEvalsFactor with no effect
** rldStyles[j % len(rldStyles)])
funcs = list(i.funcId for i in dictdim)
text = '{%s}, %d-D' % (consecutiveNumbers(sorted(funcs), 'f'), d)
if not dsList.isBiobjective():
# try:
if not isinstance(targets, pproc.RunlengthBasedTargetValues):
# if targets.target_values[-1] == 1e-8: # this is a hack
plot_previous_algorithms(d, funcs)
else:
plotRLB_previous_algorithms(d, funcs)
# except:
# pass
plt.axvline(x = maxEvalsFactor, color = 'k') # vertical line at maxevals
plt.legend(loc = 'best')
plt.text(0.5, 0.98, text, horizontalalignment = "center",
verticalalignment = "top",
transform = plt.gca().transAxes
# bbox=dict(ec='k', fill=False)
)
try: # was never tested, so let's make it safe
if len(funcs) == 1:
plt.title(genericsettings.current_testbed.info(funcs[0])[:27])
except:
warnings.warn('could not print title')
beautifyRLD(evalfmax)
saveFigure(filename, verbose = verbose)
plt.close(fig)
# second figure: Function Value Distribution
filename = os.path.join(outputdir, 'ppfvdistr_%02dD_%s' % (d, info))
fig = plt.figure()
plotFVDistr(dictdim, np.inf, 1e-8, **rldStyles[-1])
# coloring right to left
for j, max_eval_factor in enumerate(single_runlength_factors):
if max_eval_factor > maxEvalsFactor:
break
plotFVDistr(dictdim, max_eval_factor, 1e-8,
**rldUnsuccStyles[j % len(rldUnsuccStyles)])
plt.text(0.98, 0.02, text, horizontalalignment = "right",
transform = plt.gca().transAxes) # bbox=dict(ec='k', fill=False),
beautifyFVD(isStoringXMax = isStoringXMax, ylabel = False)
saveFigure(filename, verbose = verbose)
plt.close(fig)
# plt.rcdefaults()
|
bsd-3-clause
|
wscullin/spack
|
var/spack/repos/builtin/packages/espressopp/package.py
|
2
|
3323
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Espressopp(CMakePackage):
"""ESPResSo++ is an extensible, flexible, fast and parallel simulation
software for soft matter research. It is a highly versatile software
package for the scientific simulation and analysis of coarse-grained
atomistic or bead-spring models as they are used in soft matter research
"""
homepage = "https://espressopp.github.io"
url = "https://github.com/espressopp/espressopp/tarball/v1.9.4.1"
version('develop', git='https://github.com/espressopp/espressopp.git', branch='master')
version('1.9.4.1', '0da74a6d4e1bfa6a2a24fca354245a4f')
version('1.9.4', 'f2a27993a83547ad014335006eea74ea')
variant('ug', default=False, description='Build user guide')
variant('pdf', default=False, description='Build user guide in pdf format')
variant('dg', default=False, description='Build developer guide')
depends_on("[email protected]:", type='build')
depends_on("mpi")
depends_on("boost+serialization+filesystem+system+python+mpi", when='@1.9.4:')
extends("python")
depends_on("python@2:2.8")
depends_on("[email protected]:", when='@1.9.4', type=('build', 'run'))
depends_on("[email protected]:", when='@1.9.4.1:', type=('build', 'run'))
depends_on("fftw")
depends_on("py-sphinx", when="+ug", type='build')
depends_on("py-sphinx", when="+pdf", type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-matplotlib', when="+ug", type='build')
depends_on('py-matplotlib', when="+pdf", type='build')
depends_on("texlive", when="+pdf", type='build')
depends_on("doxygen", when="+dg", type='build')
def cmake_args(self):
return ['-DEXTERNAL_MPI4PY=ON', '-DEXTERNAL_BOOST=ON']
def build(self, spec, prefix):
with working_dir(self.build_directory):
make()
if '+ug' in spec:
make("ug", parallel=False)
if '+pdf' in spec:
make("ug-pdf", parallel=False)
if '+dg' in spec:
make("doc", parallel=False)
|
lgpl-2.1
|
JosmanPS/scikit-learn
|
sklearn/decomposition/nmf.py
|
100
|
19059
|
""" Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted, check_non_negative
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
random_state = check_random_state(random_state)
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
rng = check_random_state(self.random_state)
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_, random_state=rng)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a',
random_state=rng)
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar',
random_state=rng)
elif init == "random":
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
|
bsd-3-clause
|
MTG/sms-tools
|
lectures/06-Harmonic-model/plots-code/sines-partials-harmonics.py
|
24
|
2020
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/sine-440-490.wav')
w = np.hamming(3529)
N = 32768
hN = N/2
t = -20
pin = 4850
x1 = x[pin:pin+w.size]
mX1, pX1 = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX1, t)
pmag = mX1[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX1, pX1, ploc)
plt.figure(1, figsize=(9, 6))
plt.subplot(311)
plt.plot(fs*np.arange(mX1.size)/float(N), mX1-max(mX1), 'r', lw=1.5)
plt.plot(fs * iploc/N, ipmag-max(mX1), marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([200, 1000, -80, 4])
plt.title('mX + peaks (sine-440-490.wav)')
(fs, x) = UF.wavread('../../../sounds/vibraphone-C6.wav')
w = np.blackman(401)
N = 1024
hN = N/2
t = -80
pin = 200
x2 = x[pin:pin+w.size]
mX2, pX2 = DFT.dftAnal(x2, w, N)
ploc = UF.peakDetection(mX2, t)
pmag = mX2[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX2, pX2, ploc)
plt.subplot(3,1,2)
plt.plot(fs*np.arange(mX2.size)/float(N), mX2-max(mX2), 'r', lw=1.5)
plt.plot(fs * iploc/N, ipmag-max(mX2), marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([500,10000,-100,4])
plt.title('mX + peaks (vibraphone-C6.wav)')
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
w = np.blackman(651)
N = 2048
hN = N/2
t = -80
pin = 10000
x3 = x[pin:pin+w.size]
mX3, pX3 = DFT.dftAnal(x3, w, N)
ploc = UF.peakDetection(mX3, t)
pmag = mX3[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX3, pX3, ploc)
plt.subplot(3,1,3)
plt.plot(fs*np.arange(mX3.size)/float(N), mX3-max(mX3), 'r', lw=1.5)
plt.plot(fs * iploc/N, ipmag-max(mX3), marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([0,6000,-70,2])
plt.title('mX + peaks (oboe-A4.wav)')
plt.tight_layout()
plt.savefig('sines-partials-harmonics.png')
plt.show()
|
agpl-3.0
|
blondegeek/pymatgen
|
pymatgen/util/plotting.py
|
3
|
20879
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import math
import numpy as np
from pymatgen.core.periodic_table import Element
from itertools import combinations
"""
Utilities for generating nicer plots.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Mar 13, 2012"
def pretty_plot(width=8, height=None, plt=None, dpi=None,
color_cycle=("qualitative", "Set1_9")):
"""
Provides a publication quality plot, with nice defaults for font sizes etc.
Args:
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width * golden
ratio.
plt (matplotlib.pyplot): If plt is supplied, changes will be made to an
existing plot. Otherwise, a new plot will be created.
dpi (int): Sets dot per inch for figure. Defaults to 300.
color_cycle (tuple): Set the color cycle for new plots to one of the
color sets in palettable. Defaults to a qualitative Set1_9.
Returns:
Matplotlib plot object with properly sized fonts.
"""
ticksize = int(width * 2.5)
golden_ratio = (math.sqrt(5) - 1) / 2
if not height:
height = int(width * golden_ratio)
if plt is None:
import matplotlib.pyplot as plt
import importlib
mod = importlib.import_module("palettable.colorbrewer.%s" %
color_cycle[0])
colors = getattr(mod, color_cycle[1]).mpl_colors
from cycler import cycler
plt.figure(figsize=(width, height), facecolor="w", dpi=dpi)
ax = plt.gca()
ax.set_prop_cycle(cycler('color', colors))
else:
fig = plt.gcf()
fig.set_size_inches(width, height)
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
ax = plt.gca()
ax.set_title(ax.get_title(), size=width * 4)
labelsize = int(width * 3)
ax.set_xlabel(ax.get_xlabel(), size=labelsize)
ax.set_ylabel(ax.get_ylabel(), size=labelsize)
return plt
def pretty_plot_two_axis(x, y1, y2, xlabel=None, y1label=None, y2label=None,
width=8, height=None, dpi=300):
"""
Variant of pretty_plot that does a dual axis plot. Adapted from matplotlib
examples. Makes it easier to create plots with different axes.
Args:
x (np.ndarray/list): Data for x-axis.
y1 (dict/np.ndarray/list): Data for y1 axis (left). If a dict, it will
be interpreted as a {label: sequence}.
y2 (dict/np.ndarray/list): Data for y2 axis (right). If a dict, it will
be interpreted as a {label: sequence}.
xlabel (str): If not None, this will be the label for the x-axis.
y1label (str): If not None, this will be the label for the y1-axis.
y2label (str): If not None, this will be the label for the y2-axis.
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width * golden
ratio.
dpi (int): Sets dot per inch for figure. Defaults to 300.
Returns:
matplotlib.pyplot
"""
import palettable.colorbrewer.diverging
colors = palettable.colorbrewer.diverging.RdYlBu_4.mpl_colors
c1 = colors[0]
c2 = colors[-1]
golden_ratio = (math.sqrt(5) - 1) / 2
if not height:
height = int(width * golden_ratio)
import matplotlib.pyplot as plt
width = 12
labelsize = int(width * 3)
ticksize = int(width * 2.5)
styles = ["-", "--", "-.", "."]
fig, ax1 = plt.subplots()
fig.set_size_inches((width, height))
if dpi:
fig.set_dpi(dpi)
if isinstance(y1, dict):
for i, (k, v) in enumerate(y1.items()):
ax1.plot(x, v, c=c1, marker='s', ls=styles[i % len(styles)],
label=k)
ax1.legend(fontsize=labelsize)
else:
ax1.plot(x, y1, c=c1, marker='s', ls='-')
if xlabel:
ax1.set_xlabel(xlabel, fontsize=labelsize)
if y1label:
# Make the y-axis label, ticks and tick labels match the line color.
ax1.set_ylabel(y1label, color=c1, fontsize=labelsize)
ax1.tick_params('x', labelsize=ticksize)
ax1.tick_params('y', colors=c1, labelsize=ticksize)
ax2 = ax1.twinx()
if isinstance(y2, dict):
for i, (k, v) in enumerate(y2.items()):
ax2.plot(x, v, c=c2, marker='o', ls=styles[i % len(styles)],
label=k)
ax2.legend(fontsize=labelsize)
else:
ax2.plot(x, y2, c=c2, marker='o', ls='-')
if y2label:
# Make the y-axis label, ticks and tick labels match the line color.
ax2.set_ylabel(y2label, color=c2, fontsize=labelsize)
ax2.tick_params('y', colors=c2, labelsize=ticksize)
return plt
def pretty_polyfit_plot(x, y, deg=1, xlabel=None, ylabel=None, **kwargs):
"""
Convenience method to plot data with trend lines based on polynomial fit.
Args:
x: Sequence of x data.
y: Sequence of y data.
deg (int): Degree of polynomial. Defaults to 1.
xlabel (str): Label for x-axis.
ylabel (str): Label for y-axis.
\\*\\*kwargs: Keyword args passed to pretty_plot.
Returns:
matplotlib.pyplot object.
"""
plt = pretty_plot(**kwargs)
pp = np.polyfit(x, y, deg)
xp = np.linspace(min(x), max(x), 200)
plt.plot(xp, np.polyval(pp, xp), 'k--', x, y, 'o')
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
return plt
def periodic_table_heatmap(elemental_data, cbar_label="",
show_plot=False, cmap="YlOrRd", blank_color="grey",
value_format=None, max_row=9):
"""
A static method that generates a heat map overlapped on a periodic table.
Args:
elemental_data (dict): A dictionary with the element as a key and a
value assigned to it, e.g. surface energy and frequency, etc.
Elements missing in the elemental_data will be grey by default
in the final table elemental_data={"Fe": 4.2, "O": 5.0}.
cbar_label (string): Label of the colorbar. Default is "".
figure_name (string): Name of the plot (absolute path) being saved
if not None.
show_plot (bool): Whether to show the heatmap. Default is False.
value_format (str): Formatting string to show values. If None, no value
is shown. Example: "%.4f" shows float to four decimals.
cmap (string): Color scheme of the heatmap. Default is 'coolwarm'.
blank_color (string): Color assigned for the missing elements in
elemental_data. Default is "grey".
max_row (integer): Maximum number of rows of the periodic table to be
shown. Default is 9, which means the periodic table heat map covers
the first 9 rows of elements.
"""
# Convert primitive_elemental data in the form of numpy array for plotting.
max_val = max(elemental_data.values())
min_val = min(elemental_data.values())
max_row = min(max_row, 9)
if max_row <= 0:
raise ValueError("The input argument 'max_row' must be positive!")
value_table = np.empty((max_row, 18)) * np.nan
blank_value = min_val - 0.01
for el in Element:
if el.row > max_row: continue
value = elemental_data.get(el.symbol, blank_value)
value_table[el.row - 1, el.group - 1] = value
# Initialize the plt object
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
plt.gcf().set_size_inches(12, 8)
# We set nan type values to masked values (ie blank spaces)
data_mask = np.ma.masked_invalid(value_table.tolist())
heatmap = ax.pcolor(data_mask, cmap=cmap, edgecolors='w', linewidths=1,
vmin=min_val-0.001, vmax=max_val+0.001)
cbar = fig.colorbar(heatmap)
# Grey out missing elements in input data
cbar.cmap.set_under(blank_color)
cbar.set_label(cbar_label, rotation=270, labelpad=15)
cbar.ax.tick_params(labelsize=14)
# Refine and make the table look nice
ax.axis('off')
ax.invert_yaxis()
# Label each block with corresponding element and value
for i, row in enumerate(value_table):
for j, el in enumerate(row):
if not np.isnan(el):
symbol = Element.from_row_and_group(i+1, j+1).symbol
plt.text(j + 0.5, i + 0.25, symbol,
horizontalalignment='center',
verticalalignment='center', fontsize=14)
if el != blank_value and value_format is not None:
plt.text(j + 0.5, i + 0.5, value_format % el,
horizontalalignment='center',
verticalalignment='center', fontsize=10)
plt.tight_layout()
if show_plot:
plt.show()
return plt
def format_formula(formula):
"""
Converts str of chemical formula into
latex format for labelling purposes
Args:
formula (str): Chemical formula
"""
formatted_formula = ""
number_format = ""
for i, s in enumerate(formula):
if s.isdigit():
if not number_format:
number_format = "_{"
number_format += s
if i == len(formula) - 1:
number_format += "}"
formatted_formula += number_format
else:
if number_format:
number_format += "}"
formatted_formula += number_format
number_format = ""
formatted_formula += s
return r"$%s$" % (formatted_formula)
def van_arkel_triangle(list_of_materials, annotate=True):
"""
A static method that generates a binary van Arkel-Ketelaar triangle to
quantify the ionic, metallic and covalent character of a compound
by plotting the electronegativity difference (y) vs average (x).
See:
A.E. van Arkel, Molecules and Crystals in Inorganic Chemistry,
Interscience, New York (1956)
and
J.A.A Ketelaar, Chemical Constitution (2nd edn.), An Introduction
to the Theory of the Chemical Bond, Elsevier, New York (1958)
Args:
list_of_materials (list): A list of computed entries of binary
materials or a list of lists containing two elements (str).
annotate (bool): Whether or not to lable the points on the
triangle with reduced formula (if list of entries) or pair
of elements (if list of list of str).
"""
# F-Fr has the largest X difference. We set this
# as our top corner of the triangle (most ionic)
pt1 = np.array([(Element("F").X + Element("Fr").X) / 2,
abs(Element("F").X - Element("Fr").X)])
# Cs-Fr has the lowest average X. We set this as our
# bottom left corner of the triangle (most metallic)
pt2 = np.array([(Element("Cs").X + Element("Fr").X) / 2,
abs(Element("Cs").X - Element("Fr").X)])
# O-F has the highest average X. We set this as our
# bottom right corner of the triangle (most covalent)
pt3 = np.array([(Element("O").X + Element("F").X) / 2,
abs(Element("O").X - Element("F").X)])
# get the parameters for the lines of the triangle
d = np.array(pt1) - np.array(pt2)
slope1 = d[1] / d[0]
b1 = pt1[1] - slope1 * pt1[0]
d = pt3 - pt1
slope2 = d[1] / d[0]
b2 = pt3[1] - slope2 * pt3[0]
# Initialize the plt object
import matplotlib.pyplot as plt
# set labels and appropriate limits for plot
plt.xlim(pt2[0] - 0.45, -b2 / slope2 + 0.45)
plt.ylim(-0.45, pt1[1] + 0.45)
plt.annotate("Ionic", xy=[pt1[0] - 0.3, pt1[1] + 0.05], fontsize=20)
plt.annotate("Covalent", xy=[-b2 / slope2 - 0.65, -0.4], fontsize=20)
plt.annotate("Metallic", xy=[pt2[0] - 0.4, -0.4], fontsize=20)
plt.xlabel(r"$\frac{\chi_{A}+\chi_{B}}{2}$", fontsize=25)
plt.ylabel(r"$|\chi_{A}-\chi_{B}|$", fontsize=25)
# Set the lines of the triangle
chi_list = [el.X for el in Element]
plt.plot([min(chi_list), pt1[0]], [slope1 * min(chi_list) + b1, pt1[1]], 'k-', linewidth=3)
plt.plot([pt1[0], -b2 / slope2], [pt1[1], 0], 'k-', linewidth=3)
plt.plot([min(chi_list), -b2 / slope2], [0, 0], 'k-', linewidth=3)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
# Shade with appropriate colors corresponding to ionic, metallci and covalent
ax = plt.gca()
# ionic filling
ax.fill_between([min(chi_list), pt1[0]],
[slope1 * min(chi_list) + b1, pt1[1]], facecolor=[1, 1, 0],
zorder=-5, edgecolor=[1, 1, 0])
ax.fill_between([pt1[0], -b2 / slope2],
[pt1[1], slope2 * min(chi_list) - b1], facecolor=[1, 1, 0],
zorder=-5, edgecolor=[1, 1, 0])
# metal filling
XPt = Element("Pt").X
ax.fill_between([min(chi_list), (XPt + min(chi_list)) / 2],
[0, slope1 * (XPt + min(chi_list)) / 2 + b1],
facecolor=[1, 0, 0], zorder=-3, alpha=0.8)
ax.fill_between([(XPt + min(chi_list)) / 2, XPt],
[slope1 * ((XPt + min(chi_list)) / 2) + b1, 0],
facecolor=[1, 0, 0], zorder=-3, alpha=0.8)
# covalent filling
ax.fill_between([(XPt + min(chi_list)) / 2, ((XPt + min(chi_list)) / 2 + -b2 / slope2) / 2],
[0, slope2 * (((XPt + min(chi_list)) / 2 + -b2 / slope2) / 2) + b2],
facecolor=[0, 1, 0], zorder=-4, alpha=0.8)
ax.fill_between([((XPt + min(chi_list)) / 2 + -b2 / slope2) / 2, -b2 / slope2],
[slope2 * (((XPt + min(chi_list)) / 2 + -b2 / slope2) / 2) + b2, 0],
facecolor=[0, 1, 0], zorder=-4, alpha=0.8)
# Label the triangle with datapoints
for entry in list_of_materials:
if type(entry).__name__ not in ['ComputedEntry', 'ComputedStructureEntry']:
X_pair = [Element(el).X for el in entry]
formatted_formula = "%s-%s" % tuple(entry)
else:
X_pair = [Element(el).X for el in entry.composition.as_dict().keys()]
formatted_formula = format_formula(entry.composition.reduced_formula)
plt.scatter(np.mean(X_pair), abs(X_pair[0] - X_pair[1]), c='b', s=100)
if annotate:
plt.annotate(formatted_formula, fontsize=15,
xy=[np.mean(X_pair) + 0.005, abs(X_pair[0] - X_pair[1])])
plt.tight_layout()
return plt
def get_ax_fig_plt(ax=None, **kwargs):
"""
Helper function used in plot functions supporting an optional Axes argument.
If ax is None, we build the `matplotlib` figure and create the Axes else
we return the current active figure.
Args:
kwargs: keyword arguments are passed to plt.figure if ax is not None.
Returns:
ax: :class:`Axes` object
figure: matplotlib figure
plt: matplotlib pyplot module.
"""
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure(**kwargs)
ax = fig.add_subplot(1, 1, 1)
else:
fig = plt.gcf()
return ax, fig, plt
def get_ax3d_fig_plt(ax=None, **kwargs):
"""
Helper function used in plot functions supporting an optional Axes3D
argument. If ax is None, we build the `matplotlib` figure and create the
Axes3D else we return the current active figure.
Args:
kwargs: keyword arguments are passed to plt.figure if ax is not None.
Returns:
ax: :class:`Axes` object
figure: matplotlib figure
plt: matplotlib pyplot module.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
if ax is None:
fig = plt.figure(**kwargs)
ax = axes3d.Axes3D(fig)
else:
fig = plt.gcf()
return ax, fig, plt
def get_axarray_fig_plt(ax_array, nrows=1, ncols=1, sharex=False, sharey=False,
squeeze=True, subplot_kw=None, gridspec_kw=None,
**fig_kw):
"""
Helper function used in plot functions that accept an optional array of Axes
as argument. If ax_array is None, we build the `matplotlib` figure and
create the array of Axes by calling plt.subplots else we return the
current active figure.
Returns:
ax: Array of :class:`Axes` objects
figure: matplotlib figure
plt: matplotlib pyplot module.
"""
import matplotlib.pyplot as plt
if ax_array is None:
fig, ax_array = plt.subplots(nrows=nrows, ncols=ncols, sharex=sharex,
sharey=sharey, squeeze=squeeze,
subplot_kw=subplot_kw,
gridspec_kw=gridspec_kw, **fig_kw)
else:
fig = plt.gcf()
ax_array = np.reshape(np.array(ax_array), (nrows, ncols))
if squeeze:
if ax_array.size == 1:
ax_array = ax_array[0]
elif any(s == 1 for s in ax_array.shape):
ax_array = ax_array.ravel()
return ax_array, fig, plt
def add_fig_kwargs(func):
"""
Decorator that adds keyword arguments for functions returning matplotlib
figures.
The function should return either a matplotlib figure or None to signal
some sort of error/unexpected event.
See doc string below for the list of supported options.
"""
from functools import wraps
@wraps(func)
def wrapper(*args, **kwargs):
# pop the kwds used by the decorator.
title = kwargs.pop("title", None)
size_kwargs = kwargs.pop("size_kwargs", None)
show = kwargs.pop("show", True)
savefig = kwargs.pop("savefig", None)
tight_layout = kwargs.pop("tight_layout", False)
ax_grid = kwargs.pop("ax_grid", None)
ax_annotate = kwargs.pop("ax_annotate", None)
# Call func and return immediately if None is returned.
fig = func(*args, **kwargs)
if fig is None:
return fig
# Operate on matplotlib figure.
if title is not None:
fig.suptitle(title)
if size_kwargs is not None:
fig.set_size_inches(size_kwargs.pop("w"), size_kwargs.pop("h"),
**size_kwargs)
if ax_grid is not None:
for ax in fig.axes:
ax.grid(bool(ax_grid))
if ax_annotate:
from string import ascii_letters
tags = ascii_letters
if len(fig.axes) > len(tags):
tags = (1 + len(ascii_letters) // len(fig.axes)) * ascii_letters
for ax, tag in zip(fig.axes, tags):
ax.annotate("(%s)" % tag, xy=(0.05, 0.95), xycoords="axes fraction")
if tight_layout:
try:
fig.tight_layout()
except Exception as exc:
# For some unknown reason, this problem shows up only on travis.
# https://stackoverflow.com/questions/22708888/valueerror-when-using-matplotlib-tight-layout
print("Ignoring Exception raised by fig.tight_layout\n", str(exc))
if savefig:
fig.savefig(savefig)
if show:
import matplotlib.pyplot as plt
plt.show()
return fig
# Add docstring to the decorated method.
s = "\n\n" + """\
Keyword arguments controlling the display of the figure:
================ ====================================================
kwargs Meaning
================ ====================================================
title Title of the plot (Default: None).
show True to show the figure (default: True).
savefig "abc.png" or "abc.eps" to save the figure to a file.
size_kwargs Dictionary with options passed to fig.set_size_inches
e.g. size_kwargs=dict(w=3, h=4)
tight_layout True to call fig.tight_layout (default: False)
ax_grid True (False) to add (remove) grid from all axes in fig.
Default: None i.e. fig is left unchanged.
ax_annotate Add labels to subplots e.g. (a), (b).
Default: False
================ ====================================================
"""
if wrapper.__doc__ is not None:
# Add s at the end of the docstring.
wrapper.__doc__ += "\n" + s
else:
# Use s
wrapper.__doc__ = s
return wrapper
|
mit
|
anirudhjayaraman/scikit-learn
|
sklearn/tree/tests/test_export.py
|
130
|
9950
|
"""
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
|
bsd-3-clause
|
dparks1134/STAMP
|
stamp/plugins/samples/plots/ExtendedErrorBar.py
|
1
|
19167
|
#=======================================================================
# Author: Donovan Parks
#
# Extended error bar plot.
#
# Copyright 2011 Donovan Parks
#
# This file is part of STAMP.
#
# STAMP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# STAMP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with STAMP. If not, see <http://www.gnu.org/licenses/>.
#=======================================================================
from PyQt4 import QtGui, QtCore
import sys
import math
import numpy as np
from mpl_toolkits.axes_grid import make_axes_locatable, Size
from stamp.plugins.samples.AbstractSamplePlotPlugin import AbstractSamplePlotPlugin, TestWindow, ConfigureDialog
from stamp.plugins.samples.plots.configGUI.extendedErrorBarUI import Ui_ExtendedErrorBarDialog
from stamp.metagenomics import TableHelper
from matplotlib.patches import Rectangle
class ExtendedErrorBar(AbstractSamplePlotPlugin):
'''
Extended error bar plot.
'''
def __init__(self, preferences, parent=None):
AbstractSamplePlotPlugin.__init__(self, preferences, parent)
self.name = 'Extended error bar'
self.type = 'Statistical'
self.settings = preferences['Settings']
self.figWidth = self.settings.value(self.name + '/width', 7.0).toDouble()[0]
self.figHeightPerRow = self.settings.value(self.name + '/row height', 0.2).toDouble()[0]
self.sortingField = self.settings.value(self.name + '/field', 'p-values').toString()
self.bShowBarPlot = self.settings.value(self.name + '/sequences subplot', True).toBool()
self.bShowPValueLabels = self.settings.value(self.name + '/p-value labels', True).toBool()
self.bShowCorrectedPvalues = self.settings.value(self.name + '/show corrected p-values', True).toBool()
self.bCustomLimits = self.settings.value(self.name + '/use custom limits', False).toBool()
self.minX = self.settings.value(self.name + '/minimum', 0.0).toDouble()[0]
self.maxX = self.settings.value(self.name + '/maximum', 1.0).toDouble()[0]
self.markerSize = self.settings.value(self.name + '/marker size', 30).toInt()[0]
self.percentageOrSeqCount = self.settings.value(self.name + '/percentage or seq count', 'Proportion (%)').toString()
self.legendPos = self.settings.value(self.name + '/legend position', -1).toInt()[0]
def mirrorProperties(self, plotToCopy):
self.name = plotToCopy.name
self.figWidth = plotToCopy.figWidth
self.figHeightPerRow = plotToCopy.figHeightPerRow
self.sortingField = plotToCopy.sortingField
self.bShowBarPlot = plotToCopy.bShowBarPlot
self.bShowPValueLabels = plotToCopy.bShowPValueLabels
self.bShowCorrectedPvalues = plotToCopy.bShowCorrectedPvalues
self.bCustomLimits = plotToCopy.bCustomLimits
self.minX = plotToCopy.minX
self.maxX = plotToCopy.maxX
self.markerSize = plotToCopy.markerSize
self.percentageOrSeqCount = plotToCopy.percentageOrSeqCount
self.legendPos = plotToCopy.legendPos
def plot(self, profile, statsResults):
# *** Check if there is sufficient data to generate the plot
if len(statsResults.activeData) <= 0:
self.emptyAxis()
return
features = statsResults.getColumn('Features')
if len(features) > 200:
QtGui.QApplication.instance().setOverrideCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
reply = QtGui.QMessageBox.question(self, 'Continue?', 'Profile contains ' + str(len(features)) + ' features. ' +
'It may take several seconds to generate this plot. We recommend filtering your profile first. ' +
'Do you wish to continue?', QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
QtGui.QApplication.instance().restoreOverrideCursor()
if reply == QtGui.QMessageBox.No:
self.emptyAxis()
return
# *** Colour of plot elements
axesColour = str(self.preferences['Axes colour'].name())
profile1Colour = str(self.preferences['Sample 1 colour'].name())
profile2Colour = str(self.preferences['Sample 2 colour'].name())
# *** Colour of plot elements
highlightColor = (0.9, 0.9, 0.9)
# *** Sort data
if self.sortingField == 'p-values':
statsResults.activeData = TableHelper.SortTable(statsResults.activeData,\
[statsResults.dataHeadings['pValues']], False)
elif self.sortingField == 'Effect sizes':
statsResults.activeData = TableHelper.SortTable(statsResults.activeData,\
[statsResults.dataHeadings['EffectSize']],
True, True, statsResults.confIntervMethod.bRatio)
elif self.sortingField == 'Feature labels':
statsResults.activeData = TableHelper.SortTableStrCol(statsResults.activeData,\
statsResults.dataHeadings['Features'], False)
features = statsResults.getColumn('Features') # get sorted feature labels
# *** Create lists for each quantity of interest
if statsResults.multCompCorrection.method == 'False discovery rate':
pValueTitle = 'q-value'
else:
pValueTitle = 'p-value'
if self.bShowCorrectedPvalues:
pValueLabels = statsResults.getColumnAsStr('pValuesCorrected')
if statsResults.multCompCorrection.method != 'No correction':
pValueTitle += ' (corrected)'
else:
pValueLabels = statsResults.getColumnAsStr('pValues')
effectSizes = statsResults.getColumn('EffectSize')
lowerCIs = statsResults.getColumn('LowerCI')
upperCIs = statsResults.getColumn('UpperCI')
ciTitle = ('%.3g' % (statsResults.oneMinusAlpha()*100)) + '% confidence intervals'
seqs1 = statsResults.getColumn('Seq1')
seqs2 = statsResults.getColumn('Seq2')
parentSeqs1 = statsResults.getColumn('ParentalSeq1')
parentSeqs2 = statsResults.getColumn('ParentalSeq2')
# *** Truncate feature labels
highlightedFeatures = list(self.preferences['Highlighted sample features'])
if self.preferences['Truncate feature names']:
length = self.preferences['Length of truncated feature names']
for i in xrange(0, len(features)):
if len(features[i]) > length+3:
features[i] = features[i][0:length] + '...'
for i in xrange(0, len(highlightedFeatures)):
if len(highlightedFeatures[i]) > length+3:
highlightedFeatures[i] = highlightedFeatures[i][0:length] + '...'
# *** Adjust effect size for axis scale
dominateInSample2 = []
percentage1 = []
percentage2 = []
for i in xrange(0, len(effectSizes)):
percentage1.append(float(seqs1[i])*100 / parentSeqs1[i])
percentage2.append(float(seqs2[i])*100 / parentSeqs2[i])
if statsResults.confIntervMethod.bRatio:
if effectSizes[i] < 1:
# mirror CI across y-axis
effectSizes[i] = 1.0 / effectSizes[i]
lowerCI = effectSizes[i] - (1.0 / upperCIs[i])
upperCI = (1.0 / lowerCIs[i]) - effectSizes[i]
lowerCIs[i] = lowerCI
upperCIs[i] = upperCI
dominateInSample2.append(i)
else:
lowerCIs[i] = effectSizes[i] - lowerCIs[i]
upperCIs[i] = upperCIs[i] - effectSizes[i]
else:
lowerCIs[i] = effectSizes[i] - lowerCIs[i]
upperCIs[i] = upperCIs[i] - effectSizes[i]
if effectSizes[i] < 0.0:
dominateInSample2.append(i)
# *** Set figure size
if self.legendPos == 3 or self.legendPos == 4 or self.legendPos == 8: # bottom legend
heightBottomLabels = 0.56 # inches
else:
heightBottomLabels = 0.4 # inches
heightTopLabels = 0.25
plotHeight = self.figHeightPerRow*len(features)
self.imageWidth = self.figWidth
self.imageHeight = plotHeight + heightBottomLabels + heightTopLabels
if self.imageWidth > 256 or self.imageHeight > 256:
QtGui.QApplication.instance().setOverrideCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.emptyAxis()
reply = QtGui.QMessageBox.question(self, 'Excessively large plot', 'The resulting plot is too large to display.')
QtGui.QApplication.instance().restoreOverrideCursor()
return
self.fig.set_size_inches(self.imageWidth, self.imageHeight)
# *** Determine width of y-axis labels
yLabelBounds = self.yLabelExtents(features, 8)
# *** Size plots which comprise the extended errorbar plot
self.fig.clear()
spacingBetweenPlots = 0.25 # inches
widthNumSeqPlot = 1.25 # inches
if self.bShowBarPlot == False:
widthNumSeqPlot = 0.0
spacingBetweenPlots = 0.0
widthPvalueLabels = 0.75 # inches
if self.bShowPValueLabels == False:
widthPvalueLabels = 0.1
yPlotOffsetFigSpace = heightBottomLabels / self.imageHeight
heightPlotFigSpace = plotHeight / self.imageHeight
xPlotOffsetFigSpace = yLabelBounds.width + 0.1 / self.imageWidth
pValueLabelWidthFigSpace = widthPvalueLabels / self.imageWidth
widthPlotFigSpace = 1.0 - pValueLabelWidthFigSpace - xPlotOffsetFigSpace
widthErrorBarPlot = widthPlotFigSpace*self.imageWidth - widthNumSeqPlot - spacingBetweenPlots
axInitAxis = self.fig.add_axes([xPlotOffsetFigSpace,yPlotOffsetFigSpace,widthPlotFigSpace,heightPlotFigSpace])
divider = make_axes_locatable(axInitAxis)
divider.get_vertical()[0] = Size.Fixed(len(features)*self.figHeightPerRow)
if self.bShowBarPlot == True:
divider.get_horizontal()[0] = Size.Fixed(widthNumSeqPlot)
axErrorbar = divider.new_horizontal(widthErrorBarPlot, pad=spacingBetweenPlots, sharey=axInitAxis)
self.fig.add_axes(axErrorbar)
else:
divider.get_horizontal()[0] = Size.Fixed(widthErrorBarPlot)
axErrorbar = axInitAxis
# *** Plot of sequences for each subsystem
if self.bShowBarPlot == True:
axNumSeq = axInitAxis
if self.percentageOrSeqCount == 'Proportion (%)':
# plot percentage
axNumSeq.barh(np.arange(len(features))+0.0, percentage1, height = 0.3, color=profile1Colour, zorder=10, ecolor='black')
axNumSeq.barh(np.arange(len(features))-0.3, percentage2, height = 0.3, color=profile2Colour, zorder=10, ecolor='black')
for value in np.arange(-0.5, len(features)-1, 2):
axNumSeq.axhspan(value, value+1, facecolor=highlightColor,edgecolor='none',zorder=1)
axNumSeq.set_xlabel(self.percentageOrSeqCount)
maxPercentage = max(max(percentage1), max(percentage2))
axNumSeq.set_xticks([0, maxPercentage])
axNumSeq.set_xlim([0, maxPercentage*1.05])
maxPercentageStr = '%.1f' % maxPercentage
axNumSeq.set_xticklabels(['0.0', maxPercentageStr])
else:
# plot sequence count
axNumSeq.barh(np.arange(len(features))+0.0, seqs1, height = 0.3, color=profile1Colour, zorder=10, ecolor='black')
axNumSeq.barh(np.arange(len(features))-0.3, seqs2, height = 0.3, color=profile2Colour, zorder=10, ecolor='black')
for value in np.arange(-0.5, len(features)-1, 2):
axNumSeq.axhspan(value, value+1, facecolor=highlightColor,edgecolor='none',zorder=1)
axNumSeq.set_xlabel(self.percentageOrSeqCount)
maxSeqs = max(max(seqs1), max(seqs2))
axNumSeq.set_xticks([0, maxSeqs])
axNumSeq.set_xlim([0, maxSeqs*1.05])
axNumSeq.set_xticklabels([0, str(maxSeqs)])
axNumSeq.set_yticks(np.arange(len(features)))
axNumSeq.set_yticklabels(features)
axNumSeq.set_ylim([-1, len(features)])
for label in axNumSeq.get_yticklabels():
if label.get_text() in highlightedFeatures:
label.set_color('red')
for a in axNumSeq.yaxis.majorTicks:
a.tick1On=False
a.tick2On=False
for a in axNumSeq.xaxis.majorTicks:
a.tick1On=True
a.tick2On=False
for line in axNumSeq.yaxis.get_ticklines():
line.set_color(axesColour)
for line in axNumSeq.xaxis.get_ticklines():
line.set_color(axesColour)
for loc, spine in axNumSeq.spines.iteritems():
if loc in ['left', 'right','top']:
spine.set_color('none')
else:
spine.set_color(axesColour)
# *** Plot confidence intervals for each subsystem
lastAxes = axErrorbar
markerSize = math.sqrt(float(self.markerSize))
axErrorbar.errorbar(effectSizes, np.arange(len(features)), xerr=[lowerCIs,upperCIs], fmt='o', ms=markerSize, mfc=profile1Colour, mec='black', ecolor='black', zorder=10)
effectSizesSample2 = [effectSizes[value] for value in dominateInSample2]
axErrorbar.plot(effectSizesSample2, dominateInSample2, ls='', marker='o', ms=markerSize, mfc=profile2Colour, mec='black', zorder=100)
if statsResults.confIntervMethod.bRatio:
axErrorbar.vlines(1, -1, len(features), linestyle='dashed', color=axesColour)
else:
axErrorbar.vlines(0, -1, len(features), linestyle='dashed', color=axesColour)
for value in np.arange(-0.5, len(features)-1, 2):
axErrorbar.axhspan(value, value+1, facecolor=highlightColor,edgecolor='none',zorder=1)
axErrorbar.set_title(ciTitle)
axErrorbar.set_xlabel(statsResults.confIntervMethod.plotLabel)
if self.bCustomLimits:
axErrorbar.set_xlim([self.minX, self.maxX])
else:
self.minX, self.maxX = axErrorbar.get_xlim()
if self.bShowBarPlot == False:
axErrorbar.set_yticks(np.arange(len(features)))
axErrorbar.set_yticklabels(features)
axErrorbar.set_ylim([-1, len(features)])
for label in axErrorbar.get_yticklabels():
if label.get_text() in highlightedFeatures:
label.set_color('red')
else:
for label in axErrorbar.get_yticklabels():
label.set_visible(False)
for a in axErrorbar.yaxis.majorTicks:
a.set_visible(False)
for a in axErrorbar.xaxis.majorTicks:
a.tick1On=True
a.tick2On=False
for a in axErrorbar.yaxis.majorTicks:
a.tick1On=False
a.tick2On=False
for line in axErrorbar.yaxis.get_ticklines():
line.set_visible(False)
for line in axErrorbar.xaxis.get_ticklines():
line.set_color(axesColour)
for loc, spine in axErrorbar.spines.iteritems():
if loc in ['left','right','top']:
spine.set_color('none')
else:
spine.set_color(axesColour)
# *** Show p-values on right of last plot
if self.bShowPValueLabels == True:
axRight = lastAxes.twinx()
axRight.set_yticks(np.arange(len(pValueLabels)))
axRight.set_yticklabels(pValueLabels)
axRight.set_ylim([-1, len(pValueLabels)])
axRight.set_ylabel(pValueTitle)
for a in axRight.yaxis.majorTicks:
a.tick1On=False
a.tick2On=False
for loc, spine in axRight.spines.iteritems():
spine.set_color('none')
# *** Legend
# *** Legend
if self.legendPos != -1:
legend1 = Rectangle((0, 0), 1, 1, fc=profile1Colour)
legend2 = Rectangle((0, 0), 1, 1, fc=profile2Colour)
legend = self.fig.legend([legend1, legend2], (statsResults.profile.sampleNames[0], statsResults.profile.sampleNames[1]), loc=self.legendPos, ncol=2)
legend.get_frame().set_linewidth(0)
self.updateGeometry()
self.draw()
def configure(self, profile, statsResults):
self.statsResults = statsResults
self.configDlg = ConfigureDialog(Ui_ExtendedErrorBarDialog)
# set enabled state of controls
self.configDlg.ui.cboPercentageOrSeqCount.setEnabled(self.bShowBarPlot)
self.configDlg.ui.spinMinimumX.setEnabled(self.bCustomLimits)
self.configDlg.ui.spinMaximumX.setEnabled(self.bCustomLimits)
# set current value of controls
self.configDlg.ui.cboSortingField.setCurrentIndex(self.configDlg.ui.cboSortingField.findText(self.sortingField))
self.configDlg.ui.spinFigWidth.setValue(self.figWidth)
self.configDlg.ui.spinFigRowHeight.setValue(self.figHeightPerRow)
self.configDlg.ui.chkShowBarPlot.setChecked(self.bShowBarPlot)
self.configDlg.ui.chkPValueLabels.setChecked(self.bShowPValueLabels)
self.configDlg.ui.chkCorrectedPvalues.setChecked(self.bShowCorrectedPvalues)
self.configDlg.ui.chkCustomLimits.setChecked(self.bCustomLimits)
self.configDlg.ui.spinMinimumX.setValue(self.minX)
self.configDlg.ui.spinMaximumX.setValue(self.maxX)
self.configDlg.ui.spinMarkerSize.setValue(self.markerSize)
self.configDlg.ui.cboPercentageOrSeqCount.setCurrentIndex(self.configDlg.ui.cboPercentageOrSeqCount.findText(self.percentageOrSeqCount))
if self.legendPos == 2:
self.configDlg.ui.radioLegendPosUpperLeft.setChecked(True)
elif self.legendPos == 3:
self.configDlg.ui.radioLegendPosLowerLeft.setChecked(True)
elif self.legendPos == 4:
self.configDlg.ui.radioLegendPosLowerRight.setChecked(True)
elif self.legendPos == 8:
self.configDlg.ui.radioLegendPosLowerCentre.setChecked(True)
else:
self.configDlg.ui.radioLegendPosNone.setChecked(True)
if self.configDlg.exec_() == QtGui.QDialog.Accepted:
QtGui.QApplication.instance().setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
self.sortingField = str(self.configDlg.ui.cboSortingField.currentText())
self.figWidth = self.configDlg.ui.spinFigWidth.value()
self.figHeightPerRow = self.configDlg.ui.spinFigRowHeight.value()
self.bShowBarPlot = self.configDlg.ui.chkShowBarPlot.isChecked()
self.bShowPValueLabels = self.configDlg.ui.chkPValueLabels.isChecked()
self.bShowCorrectedPvalues = self.configDlg.ui.chkCorrectedPvalues.isChecked()
self.bCustomLimits = self.configDlg.ui.chkCustomLimits.isChecked()
self.minX = self.configDlg.ui.spinMinimumX.value()
self.maxX = self.configDlg.ui.spinMaximumX.value()
self.markerSize = self.configDlg.ui.spinMarkerSize.value()
self.percentageOrSeqCount = self.configDlg.ui.cboPercentageOrSeqCount.currentText()
# legend position
if self.configDlg.ui.radioLegendPosUpperLeft.isChecked() == True:
self.legendPos = 2
elif self.configDlg.ui.radioLegendPosLowerLeft.isChecked() == True:
self.legendPos = 3
elif self.configDlg.ui.radioLegendPosLowerCentre.isChecked() == True:
self.legendPos = 8
elif self.configDlg.ui.radioLegendPosLowerRight.isChecked() == True:
self.legendPos = 4
else:
self.legendPos = -1
self.settings.setValue(self.name + '/width', self.figWidth)
self.settings.setValue(self.name + '/row height', self.figHeightPerRow)
self.settings.setValue(self.name + '/field', self.sortingField)
self.settings.setValue(self.name + '/sequences subplot', self.bShowBarPlot)
self.settings.setValue(self.name + '/p-value labels', self.bShowPValueLabels)
self.settings.setValue(self.name + '/show corrected p-values', self.bShowCorrectedPvalues)
self.settings.setValue(self.name + 'use custom limits', self.bCustomLimits)
self.settings.setValue(self.name + '/minimum', self.minX)
self.settings.setValue(self.name + '/maximum', self.maxX)
self.settings.setValue(self.name + '/marker size', self.markerSize)
self.settings.setValue(self.name + '/percentage or seq count', self.percentageOrSeqCount)
self.settings.setValue(self.name + '/legend position', self.legendPos)
self.plot(profile, statsResults)
QtGui.QApplication.instance().restoreOverrideCursor()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
testWindow = TestWindow(ExtendedErrorBar)
testWindow.show()
sys.exit(app.exec_())
|
gpl-3.0
|
psi4/psi4
|
psi4/driver/qcdb/pytest/addons.py
|
7
|
3612
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import pytest
def _plugin_import(plug):
import sys
if sys.version_info >= (3, 4):
from importlib import util
plug_spec = util.find_spec(plug)
else:
import pkgutil
plug_spec = pkgutil.find_loader(plug)
if plug_spec is None:
return False
else:
return True
def is_psi4_new_enough(version_feature_introduced):
if not _plugin_import('psi4'):
return False
import psi4
from pkg_resources import parse_version
return parse_version(psi4.__version__) >= parse_version(version_feature_introduced)
#def is_numpy_new_enough(version_feature_introduced):
# if not _plugin_import('numpy'):
# return False
# import numpy
# from pkg_resources import parse_version
# return parse_version(numpy.version.version) >= parse_version(version_feature_introduced)
#
#
#using_scipy = pytest.mark.skipif(_plugin_import('scipy') is False,
# reason='Not detecting module scipy. Install package if necessary and add to envvar PYTHONPATH')
using_psi4 = pytest.mark.skipif(_plugin_import('psi4') is False,
reason='Not detecting module psi4. Install package and add to envvar PYTHONPATH')
#using_psi4_libxc = pytest.mark.skipif(is_psi4_new_enough("1.2a1.dev100") is False,
# reason="Psi4 does not include DFT rewrite to use Libxc. Update to development head")
#
#using_psi4_efpmints = pytest.mark.skipif(is_psi4_new_enough("1.2a1.dev507") is False,
# reason="Psi4 does not include EFP integrals in mints. Update to development head")
#
#using_psi4_python_integral_deriv = pytest.mark.skipif(is_psi4_new_enough("1000") is False,
# reason="Psi4 does not include derivatives of integrals exported to python. Update to development head")
using_psi4_molrec = pytest.mark.skipif(is_psi4_new_enough("1.2a1.dev999") is False,
reason="Psi4 does not use the new Molecule parsing. Update to development head")
#using_numpy_113 = pytest.mark.skipif(is_numpy_new_enough("1.13.0") is False,
# reason='NumPy does not include 1.13 features. Update package and add to envvar PYTHONPATH')
#
#using_matplotlib = pytest.mark.skipif(_plugin_import('matplotlib') is False,
# reason='Note detecting module matplotlib. Install package if necessary and add to envvar PYTHONPATH')
using_pylibefp = pytest.mark.skipif(_plugin_import('pylibefp') is False,
reason='Not detecting module pylibefp. Install package if necessary and add to envvar PYTHONPATH')
|
lgpl-3.0
|
SCECcode/BBP
|
bbp/comps/pynga/__init__.py
|
1
|
24373
|
#!/usr/bin/env python
# This is the main module
# when you import pynga
# what it does is to do the following statements
# Note: NGA08 provides GMRotI50, while NGA14 provides RotD50, so before do the comparison, do the conversion
# GMPE Package content
import CB08
import BA08
import CY08
import AS08
import SC08
import BSSA14
import CB14
import CY14
import ASK14
# CENA GROUP1 GMPEs
import PZT11
import A0811E
import S03SCVS
# PyNGA
from utils import *
# NGA08 Period list (available for each NGA models)
# -1.0: PGA; -2.0: PGV
TsDict = {
'BA': [0.01, 0.02, 0.03, 0.05, 0.075, 0.10, 0.15, 0.20, 0.25,
0.30, 0.40, 0.50, 0.75, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 7.5, 10.0,-1,-2],
'CB': [0.01, 0.02, 0.03, 0.05, 0.075, 0.10, 0.15, 0.20, 0.25,
0.30, 0.40, 0.50, 0.75, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 7.5, 10.0,-1,-2],
'CY': [0.01, 0.02, 0.03, 0.04, 0.05, 0.075, 0.10, 0.15, 0.20, 0.25,
0.30, 0.40, 0.50, 0.75, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 7.5, 10.0,-1,-2],
'AS': [0.01, 0.02, 0.03, 0.04, 0.05, 0.075, 0.10, 0.15, 0.20, 0.25,
0.30, 0.40, 0.50, 0.75, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 7.5, 10.0,-1,-2],
}
# ============================================
# Integrated function for CENA GROUP 1 models
# ============================================
def CENA1(model_name, Mw, Rjb, Rrup, period):
"""
Combined function to calculate CENA1 models
"""
if model_name == "PZT11":
model = PZT11.PZT11()
dist = Rrup
elif model_name == "A0811E":
model = A0811E.A0811E()
dist = Rjb
elif model_name == "S03SCVS":
model = S03SCVS.S03SCVS()
dist = Rjb
else:
print "Invalid CENA1 model_name"
raise ValueError
# Check if period requested is supported
if period not in model.periods:
print "Period requested is invalid"
raise ValueError
# Calculate median
value = model(Mw, dist, period)
return value
# ============================================
# Integrated function for NGA 2008 models
# ============================================
def NGA08(model_name, Mw, Rjb, Vs30, period, epislon=0, NGAs=None, \
rake=None, Mech=0, Ftype=None, Fnm=None, Frv=None, \
dip=None, W=None, Ztor=None, Zhypo=None, Fas=0, \
Rrup=None, Rx=None, Fhw=None, azimuth=None, \
VsFlag=0, Z25=None, Z15=None, Z10=None, \
AS09=None, AB11=None, ArbCB=0 ):
"""
Combined function to compute median and standard deviation
Arguments (has to be specified)
----------
model_name : choose NGA model you want to use (AS,BA,CB,CY)
Mw : moment magnitude
Rjb: Joyner-Boore distance in km
defined as the shortest distance from a site to the surface projection of the rupture surface
Vs30: The average shear-wave velocity between 0 and 30-meters depth (site condition) in m/s
period: period at which you want to use NGA
This function allow to use periods that are not in the available periods (refer to TsDict)
Keywords
--------
[*] shows the default value
# ================
# General Keywords
# ================
epislon : deviation from the median value [0]
NGAs : dictionary to select terms in NGA models and use updated coefficents
default:
{'CB':{'NewCoefs':None,'terms':(1,1,1,1,1,1)},\
'BA':{'NewCoefs':None,'terms':(1,1,1)},\
'CY':{'NewCoefs':None,'terms':(1,1,1,1,1,1)},\
'AS':{'NewCoefs':None,'terms':(1,1,1,1,1,1,1)}}\
# ===============
# Source Keywords
# ===============
rake: rake angle (in degree) [None]
used to determine the fault type
Mech: Used in BA model [3]
(0:Strike-slip, 1:Normal, 2:Reverse, 3:Unknown
Ftype: fault type string [None]
'SS': Strike-slip, 'NM': Normal, 'RV': Reverse, 'U': Unknown (unknown is only used in BA model)
Fnm : 0: not a normal fault; 1: Normal [None]
default: None
Frv : 0: not a reverse fault; 1: reverse [None]
default: None
dip : dip angle of the fault plane [None]
default: None
W : Rupture width (down-dip) [None]
Ztor : depth to the top of rupture [None]
Zhypo: depth to the hypocenter location [None]
Fas : Aftershock flag [None]
0: Mainshock; 1: Aftershock
# ================
# Path Keywords
# ================
Rrup: Rupture distance in km [None]
defined as the distance from a site the to the fault plane
For simple fault geometry, function calc_Rrup in utils.py can be used to compute Rrup, otherwise
use DistanceToEvenlyGriddedSurface function in utils.py to compute given fault geometry and site location
Rx : horizontal distance between a site and fault trace, in km [None]
defined by extending the fault trace (or the top edge of the rupture) to infinity in both directions.
For simple fault geometry, function calc_Rx in utils.py can be used to compute Rrup, otherwise,
use DistanceToEvenlyGriddedSurface function in utils.py to compute given fault geometry and site location
Fhw : hanging wall flag [None]
0: in footwall; 1: in hanging wall
azimuth: source-to-site azimuth [None]
defined as the angle between the positive fault strike direction and the line connecting
a site to the closet point on the surface projection of the top edge of rupture (clockwise)
(used in simple fault geometry)
# =================
# Site Keywords
# =================
VsFlag : Vs30 inferred or measured flag [0]
0: inferred Vs30; 1: measured Vs30
Z25: basin depth to S wave velocity equal to 2.5 km/s [None], in km
Z25 could be estimated by using calc_Z25 function in utils.py given Vs30
Z15: basin depth to S wave velocity equal to 1.5 km/s [None], in km
used to estimate Z2.5 when Z2.5 = None
Z10: basin depth to S wave velocity equal to 1.0 km/s [None], in meter
Z10 could be estimated by using calc_Z1 function in utils.py given Vs30
# =================
# Updated models
# =================
AS09 : Abrahamson and Silva 2009 updated model (taper5 hanging wall effect) [None]
AB11 : Atkinson and Boore 2011 updated model with correction term (after more small magnitude events recordings)
# =================
# Other Keywords
# =================
ArbCB: Campbell and Bozorgnia 2008 model standard deviation [0]
0: output total standard deviation is for GMRotIpp intensity measures (rotation-independent)
1: output total standard deviation is for arbitrary horizontal component
"""
if NGAs == None:
NGAs={'CB':{'NewCoefs':None,'terms':(1,1,1,1,1,1)},\
'BA':{'NewCoefs':None,'terms':(1,1,1)},\
'CY':{'NewCoefs':None,'terms':(1,1,1,1,1,1)},\
'AS':{'NewCoefs':None,'terms':(1,1,1,1,1,1,1)}}\
dict1 = NGAs
itmp = 0
# check the input period
if period > 10.0 or 0<period<0.01:
print 'Positive period value should be within [0.01,10] for SA at corresponding periods'
raise ValueError
if period < 0 and period not in [-1,-2]:
print 'negative period should be -1,-2 for PGA and PGV'
raise ValueError
if model_name == 'BA':
ngaM = BA08.BA08_nga()
kwds = {'Mech':Mech,'Ftype':Ftype,'AB11':AB11, 'CoefTerms':dict1[model_name]} # OpenSHA doesn't have this
if model_name == 'CB':
ngaM = CB08.CB08_nga()
kwds = {'Ftype':Ftype,'Rrup':Rrup,'Ztor':Ztor,'dip':dip,'Z25':Z25,'W':W,'Zhypo':Zhypo,'azimuth':azimuth,'Fhw':Fhw,'Z10':Z10,'Z15':Z15,'Arb':ArbCB,'CoefTerms':dict1[model_name]}
if model_name == 'CY':
ngaM = CY08.CY08_nga()
kwds = {'Ftype':Ftype,'Rrup':Rrup,'Rx':Rx,'Ztor':Ztor,'dip':dip,'W':W,'Zhypo':Zhypo,'azimuth':azimuth,'Fhw':Fhw,'Z10':Z10,'AS':Fas,'VsFlag':VsFlag,'CoefTerms':dict1[model_name]}
if model_name == 'AS':
ngaM = AS08.AS08_nga()
kwds = {'Ftype':Ftype,'Rrup':Rrup,'Rx':Rx,'Ztor':Ztor,'dip':dip,'W':W,'Zhypo':Zhypo,'azimuth':azimuth,'Fhw':Fhw,'Z10':Z10,'Fas':Fas,'VsFlag':VsFlag, 'CoefTerms':dict1[model_name]}
# Common interpolation and calculation for all models
periods = np.array(ngaM.periods)
for ip in xrange( len(periods) ):
if abs( period-periods[ip] ) < 0.0001:
# period is within the periods list
itmp = 1
break
if itmp == 1:
# compute median, std directly for the existing period in the period list of the NGA model
values = mapfunc( ngaM, Mw, Rjb, Vs30, period, rake, **kwds )
values = np.array( values )
if itmp == 0:
# do the interpolation for periods that is not in the period
# list of the NGA model
ind_low = (periods < period).nonzero()[0]
ind_high = (periods > period).nonzero()[0]
period_low = max( periods[ind_low] )
period_high = min( periods[ind_high] )
values_low = np.array( mapfunc( ngaM, Mw, Rjb, Vs30, period_low, rake, **kwds ) )
values_high = np.array( mapfunc( ngaM, Mw, Rjb, Vs30, period_high, rake, **kwds ) )
N1,N2 = np.array( values_low).shape
values = np.zeros( (N1,N2) )
for icmp in xrange( N2 ):
if icmp != 0:
# stardand values are in ln (g)
values[:,icmp] = logline( np.log(period_low), np.log(period_high), values_low[:,icmp], values_high[:,icmp], np.log(period) )
else:
# median value is in g
values[:,icmp] = logline( np.log(period_low), np.log(period_high), np.log(values_low[:,icmp]), np.log(values_high[:,icmp]), np.log(period) )
values[:,icmp] = np.exp( values[:,icmp] ) # change the median into g unit (logline gives the result in ln(g))
# outputs
NGAsigmaT = values[:,1]
NGAtau = values[:,2]
NGAsigma = values[:,3]
if epislon:
NGAmedian = np.exp( np.log(values[:,0]) + epislon * NGAsigmaT )
else:
NGAmedian = values[:,0]
# returned quantities are all in g, not in log(g), event for the standard deviations
return NGAmedian, np.exp( NGAsigmaT ), np.exp( NGAtau ), np.exp( NGAsigma ) # all in g, include the standard deviation
def BA08Test(T):
# to reproduce BA model (shown in Earthquake Spectra 2008)
#import matplotlib.pyplot as plt
NGAs={'CB':{'NewCoefs':None,'terms':(1,1,1,1,1,1)},\
'BA':{'NewCoefs':None,'terms':(1,1,1)},\
'CY':{'NewCoefs':None,'terms':(1,1,1,1,1,1)},\
'AS':{'NewCoefs':None,'terms':(1,1,1,1,1,1,1)}}\
# validation with BA
nga = 'BA'
Mws = [5,6,7,8]
Mws = [4,]
Vs30 = 760
FT = 'U'
Rjb = np.arange( 0.1, 100, 0.5 )
fig = plt.figure(1)
ax = fig.add_subplot( 111 )
lines = []
for Mw in Mws:
median, std, tau, sigma = NGA08( nga, Mw, Rjb, Vs30, T, Mech=1, NGAs=NGAs )
line = ax.loglog( Rjb, median*100 * 9.8 )
lines.append( line )
ax.legend( lines, ('M=5','M=6','M=7','M=8'), loc=0 )
ax.set_title(r"T=%s, $V_{S30}$ = 760 m/s, mech='SS'"%('%.2f'%T))
ax.set_xlabel( r'$R_{JB}$ (km)' )
ax.set_ylabel( r'5%-damped PSA (cm/s)' )
plt.show()
def NGA08test(nga):
# simple test comparing with file: ./Validation/NGAmodelsTestFiles/nga_Sa_v19a.xls
M = 6.93
Ztor = 3
Ftype = 'RV'
W = 3.85
dip = 70
Rrup = Rjb = Rx = 30
Fhw = 0
Vs30 = 760
Z10 = 0.024 * 1000 # in meter
Z25 = 2.974 # in km
VsFlag = 0
periods = TsDict[nga]
NT = len(periods)
Medians = []; SigmaTs = []
for ip in xrange( NT ):
Ti = periods[ip]
median, std, tau, sigma = NGA08( nga, M, Rjb, Vs30, Ti, Ftype=Ftype, W=W,Ztor=Ztor,dip=dip,Rrup=Rrup,Rx=Rx,Fhw=Fhw,Z10=Z10,Z25=Z25,VsFlag=VsFlag )
Medians.append( median )
SigmaTs.append( np.log(std) )
output = np.c_[ np.array( periods), np.array( Medians ), np.array( SigmaTs ) ]
pth = './tmp'
if not os.path.exists( pth ):
os.mkdir(pth)
np.savetxt( pth + '/NGA08_SimpleTest%s.txt'%nga, output )
print output
# NGA 14 period list
# -1: PGA; -2: PGV
TsDict14 = {
'BSSA': [0.01, 0.02, 0.03, 0.04, 0.05, 0.075, 0.10, 0.15, 0.20, 0.25,
0.30, 0.40, 0.50, 0.75, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 7.5, 10.0,-1,-2],
'CB': [0.01, 0.02, 0.03, 0.05, 0.075, 0.10, 0.15, 0.20, 0.25,
0.30, 0.40, 0.50, 0.75, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 7.5, 10.0,-1,-2],
'CY': [0.01, 0.02, 0.03, 0.04, 0.05, 0.075, 0.10, 0.12, 0.15, 0.17, 0.20, 0.25,
0.30, 0.40, 0.50, 0.75, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 7.5, 10.0,-1],
'ASK': [0.01, 0.02, 0.03, 0.05, 0.075, 0.10, 0.15, 0.20, 0.25,
0.30, 0.40, 0.50, 0.75, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 7.5, 10.0,-1,-2],
}
# ============================================
# Integrated function for NGA 2014 models
# =============================================
def NGA14(model_name, Mw, Rjb, Vs30, period, epislon=0, NGAs=None, \
rake=None, Mech=3, Ftype=None, Fnm=None, Frv=None, \
dip=None, W=None, Ztor=None, Zhypo=None, Fas=0, \
Rrup=None, Rx=None, Fhw=None, azimuth=None, \
VsFlag=0, Z25=None, Z15=None, Z10=None, \
ArbCB=0, SJ=0, \
country='California', region='CA', \
Dregion='GlobalCATW', \
CRjb=15, Ry0=None, \
D_DPP=0 ):
if NGAs == None:
NGAs={'CB':{'NewCoefs':None,'terms':(1,1,1,1,1,1,1,1,1)},\
'BSSA':{'NewCoefs':None,'terms':(1,1,1)},\
'CY':{'NewCoefs':None,'terms':(1,1,1,1,1,1,1)},\
'ASK':{'NewCoefs':None,'terms':(1,1,1,1,1,1,1)}}\
dict1 = NGAs
itmp = 0
# check the input period
# Note: this function is better used at a given period with a set of other parameters (not with a set of periods)
if period > 10.0 or 0<period<0.01:
print 'Positive period value should be within [0.01,10] for SA at corresponding periods'
raise ValueError
if period < 0 and period not in [-1,-2]:
print 'negative period should be -1,-2 for PGA and PGV'
raise ValueError
if model_name == 'BSSA':
ngaM = BSSA14.BSSA14_nga()
kwds = {'Mech':Mech,'Ftype':Ftype,'Z10':Z10,'Dregion':Dregion,'country':country,'CoefTerms':dict1[model_name]}
if model_name == 'CB':
ngaM = CB14.CB14_nga()
kwds = {'Ftype':Ftype,'Rrup':Rrup,'Ztor':Ztor,'dip':dip,'Z25':Z25,'W':W,'Zhypo':Zhypo,'azimuth':azimuth,'Fhw':Fhw,'Z10':Z10,'Z15':Z15,'Arb':ArbCB,'SJ':SJ,'region':region,'CoefTerms':dict1[model_name]}
if model_name == 'CY':
ngaM = CY14.CY14_nga()
kwds = {'Ftype':Ftype,'Rrup':Rrup,'Rx':Rx,'Ztor':Ztor,'dip':dip,'W':W,'Zhypo':Zhypo,'azimuth':azimuth,'Fhw':Fhw,'Z10':Z10,'AS':Fas,'VsFlag':VsFlag,'country':country,'D_DPP':D_DPP,'CoefTerms':dict1[model_name]}
# the new CY model treat PGA = SA(0.01)
if period == -1:
period = 0.01
if model_name == 'ASK':
ngaM = ASK14.ASK14_nga()
kwds = {'Ftype':Ftype,'Rrup':Rrup,'Rx':Rx,'Ztor':Ztor,'dip':dip,'W':W,'Zhypo':Zhypo,'azimuth':azimuth,'Fhw':Fhw,'Z10':Z10,'Fas':Fas,'CRjb':CRjb,'Ry0':Ry0,'region':region,'country':country,'VsFlag':VsFlag, 'CoefTerms':dict1[model_name]}
# common interpolate for all models
periods = np.array(ngaM.periods)
for ip in xrange( len(periods) ):
if abs( period-periods[ip] ) < 0.0001:
# period is within the periods list
itmp = 1
break
if itmp == 1:
# compute median, std directly for the existing period in the period list of the NGA model
values = mapfunc( ngaM, Mw, Rjb, Vs30, period, rake, **kwds )
values = np.array( values )
if itmp == 0:
#print 'do the interpolation for periods that is not in the period list of the NGA model'
ind_low = (periods <= period*1.0).nonzero()[0]
ind_high = (periods >= period*1.0).nonzero()[0]
period_low = max( periods[ind_low] )
period_high = min( periods[ind_high] )
values_low = np.array( mapfunc( ngaM, Mw, Rjb, Vs30, period_low, rake, **kwds ) )
values_high = np.array( mapfunc( ngaM, Mw, Rjb, Vs30, period_high, rake, **kwds ) )
N1,N2 = np.array(values_low).shape
values = np.zeros( (N1,N2) )
for icmp in xrange( N2 ):
if icmp != 0:
# stardand values are in ln (g)
values[:,icmp] = logline( np.log(period_low), np.log(period_high), values_low[:,icmp], values_high[:,icmp], np.log(period) )
else:
# median value is in g
values[:,icmp] = logline( np.log(period_low), np.log(period_high), np.log(values_low[:,icmp]), np.log(values_high[:,icmp]), np.log(period) )
values[:,icmp] = np.exp( values[:,icmp] ) # change the median into g unit (logline gives the result in ln(g))
# outputs
NGAsigmaT = values[:,1]
NGAtau = values[:,2]
NGAsigma = values[:,3]
if epislon:
NGAmedian = np.exp( np.log(values[:,0]) + epislon * NGAsigmaT )
else:
NGAmedian = values[:,0]
# returned quantities are all in g, not in log(g), event for the standard deviations
return NGAmedian, np.exp( NGAsigmaT ), np.exp( NGAtau ), np.exp( NGAsigma ) # all in g, include the standard deviation
def BSSA14_validation(infile, outfile, iset):
# read in files (mainly parameters for run using pynga)
hdrs = open(infile,'r').readlines()[3].strip().split()
inputs = {}
data = np.loadtxt(infile,skiprows=4)
for ih in xrange(len(hdrs)):
hdr = hdrs[ih]
inputs[hdr] = data[:,ih]
regionDict = {'0':'GlobalCATW','1':'GlobalCATW','2':'ChinaTurkey','3':'ItalyJapan'}
# calculate and save to file (or plot directly) (following the same format)
#fid = open(outfile,'w')
BSSAnga = BSSA14.BSSA14_nga()
BAnga = BA08.BA08_nga()
Nl = len(inputs['T'])
Y = []; sig_lnY = []; tau = []; sigma = []
Y1 = []; sig_lnY1 = []; tau1 = []; sigma1 = []
Nls1 = []; Nls2 = []; Nls3 = []
rake = None
for il in xrange(Nl):
for key in ['T','M','Rjb','V30', 'mech', 'iregion', 'z1']:
cmd = "%s = inputs['%s'][%d]"%(key,key,il)
exec(cmd)
Dregion = regionDict[str(int(iregion))]
if z1 == -1.0: Z10 = None
if z1 != -1.0: Z10 = z1
kwds14 = {'Mech':int(mech),'Dregion':Dregion,'Z10':Z10}
kwds08 = {'Mech':int(mech)}
if T == -1.0: T = -2
if T == 0.0: T = -1
if T not in TsDict14['BSSA']:
pass
else:
Nls1.append(il)
Y0, sT, tau0, sigma0 = BSSAnga(M,Rjb,V30,T,rake,**kwds14)
Y.append(Y0)
sig_lnY.append(sT)
tau.append(tau0)
sigma.append(sigma0)
if T not in TsDict['BA']:
pass
else:
Nls3.append(il)
Y0, sT, tau0, sigma0 = BAnga(M,Rjb,V30,T,rake,**kwds08)
Y1.append(Y0)
sig_lnY1.append(sT)
tau1.append(tau0)
sigma1.append(sigma0)
Nls2.append(il)
pyNGAs = [Y,sig_lnY,tau,sigma]
pyNGAs08 = [Y1,sig_lnY1,tau1,sigma1]
pyNGAs = np.array(pyNGAs)
pyNGAs08 = np.array(pyNGAs08)
ftNGAs = np.array([inputs['Y(g)'],inputs['sigma'],inputs['tau'],inputs['phi']])
# plot
fig = plt.figure(1)
texts = ['IM',r'$\sigma_T$',r'$\tau$',r'$\sigma$']
for iax in xrange( len(pyNGAs) ):
ax = fig.add_subplot(2,2,iax+1)
ax.plot(Nls1,pyNGAs[iax],'bx', label='pyNGA14')
ax.plot(Nls3,pyNGAs08[iax],'r+', label='pyNGA08')
ax.plot(Nls2,ftNGAs[iax],'k.', label='orgNGA14')
ax.set_xlabel('points')
ax.set_ylabel('values')
ax.legend(loc=0)
ax.text(0.9,0.9,texts[iax],transform=ax.transAxes)
pltpth = './NGA_west2/validation/BSSA14/outputs'
pltnam = pltpth + '/validation_BSSA14_set%s.png'%iset
fig.savefig(pltnam)
plt.show()
def NGA_Test():
# common set to test and compare
M = 6.93
Ztor = 3
Ftype = 'RV'
Mech=3
W = 3.85
rake = 90
dip = 70
Rrup = Rjb = Rx = 30
Fhw = 0
Vs30 = 760
Vs30 = 128.
Z10 = 0.024 * 1000 # in meter
Z25 = 2.974 # in km
VsFlag = 0
# for NGA 08
for nga in ['BA','CB','CY','AS']:
periods = TsDict[nga]
NT = len(periods)
Medians = []; SigmaTs = []
for ip in xrange( NT ):
Ti = periods[ip]
median, std, tau, sigma = NGA08( nga, M, Rjb, Vs30, Ti, Ftype=Ftype, W=W,Ztor=Ztor,dip=dip,Rrup=Rrup,Rx=Rx,Fhw=Fhw,Z10=Z10,Z25=Z25,VsFlag=VsFlag )
Medians.append( median )
SigmaTs.append( np.log(std) )
output = np.c_[ np.array( periods), np.array( Medians ), np.array( SigmaTs ) ]
pth = './tmp'
if not os.path.exists( pth ):
os.mkdir(pth)
np.savetxt( pth + '/NGA08_SimpleTest%s.txt'%nga, output )
# NGA 14
for nga in ['BSSA','CB','CY','ASK']:
periods = TsDict14[nga]
NT = len(periods)
Medians = []; SigmaTs = []
for ip in xrange( NT ):
Ti = periods[ip]
median, std, tau, sigma = NGA14( nga, M, Rjb, Vs30, Ti, Ftype=Ftype, Mech=Mech, rake=rake, W=W,Ztor=Ztor,dip=dip,Rrup=Rrup,Rx=Rx,Fhw=Fhw,Z10=Z10,Z25=Z25,VsFlag=VsFlag )
Medians.append( median )
SigmaTs.append( np.log(std) )
output = np.c_[ np.array( periods), np.array( Medians ), np.array( SigmaTs ) ]
pth = './tmp'
if not os.path.exists( pth ):
os.mkdir(pth)
np.savetxt( pth + '/NGA14_SimpleTest%s.txt'%nga, output )
#print output
def PlotTest():
# Debug the period for CY
pth = './tmp'
nga1 = ['BA','CB','CY','AS']
nga2 = ['BSSA','CB','CY','ASK']
fig = plt.figure(1)
for i in xrange(4):
ax = fig.add_subplot(2,2,i+1)
inputs = np.loadtxt(pth+'/NGA08_SimpleTest%s.txt'%nga1[i])
inputs1 = np.loadtxt(pth+'/NGA14_SimpleTEst%s.txt'%nga2[i])
Ts = inputs[:-1,0]
values = inputs[:-1,1]
ax.semilogx(Ts, values,'b+',label='%s08'%nga1[i])
Ts = inputs1[:-1,0]
values = inputs1[:-1,1]
ax.semilogx(Ts,values,'rx', label='%s14'%nga2[i])
ax.legend(loc=0)
ax.set_xlabel('period')
ax.set_ylabel('SA (g)')
fig.savefig(pth+'/ComparisonsNGA08_NGA14.png')
def BSSA14_test(Ti):
# simple test comparing with file: ./Validation/NGAmodelsTestFiles/nga_Sa_v19a.xls
Rjb = Rrup=20.
Vs30 = 760.
Mw = 6
rake = 0.
Ftype='SS'
Mech = 1
CoefTerms={'terms':(1,1,1),'NewCoefs':None}
kwds = {'Mech':Mech,'Ftype':Ftype, 'Z10':None, 'Dregion':'GlobalCATW', 'country':'California', 'CoefTerms':CoefTerms}
BSSAnga = BSSA14.BSSA14_nga() # BA08nga instance
# debug mode (show each term)
IM, sigmaT, tau, sigma = BSSAnga(Mw,Rjb,Vs30,Ti,rake, **kwds)
print Ti, 'BSSA14:', IM, sigmaT, tau, sigma
VsFlag = 0
Z10=Z25=None
dip = 90; Ztor = 3; W = 10; Fhw = 0
median, std, tau, sigma = NGA14( 'BSSA', Mw, Rjb, Vs30, Ti, Ftype=Ftype, Mech=Mech, rake=rake, W=W,Ztor=Ztor,dip=dip,Rrup=Rrup,Fhw=Fhw,Z10=Z10,Z25=Z25,VsFlag=VsFlag )
print Ti, 'NGA14_BSSA:',median, std, tau, sigma
# ====================
# self_application
# ====================
if __name__ == '__main__':
import sys
opt = sys.argv[1]
if opt == 'BA08':
BA08Test(0.3)
if opt == 'NGA08':
nga = sys.argv[2] # choose one NGA model in NGA08
NGA08test(nga)
if opt == 'BSSA14':
# validation of code with BSSA outputs
opt1 = sys.argv[3] # 1, 2, 3 to choose reference files
wrkpth = r'H:\local\pylib\pynga\NGA_west2\validation\BSSA14'
inpth = wrkpth + r'\inputs'
outpth = wrkpth + r'\outputs'
if opt1 == '1':
# set 1:
file0 = r'\bssa14_vs_period_r_20_v30_760_mech_1.out'
if opt1 == '2':
# set 2:
file0 = r'\bssa14_vs_period_r_20_v30_200_mech_1.out' # (period, magnitude, distance)
if opt1 == '3':
# set 3:
file0 = r'\bssa14_vs_rjb_m_4_5_6_7_8_8.5.vs30_760_mech_1.out' # (period, magnitude)
infile = inpth + file0
outfile = outpth + file0
BSSA14_validation(infile, outfile, int(opt1))
if opt == 'NGAComparison':
NGA_Test()
if opt == 'PlotTest':
PlotTest()
if opt == 'BSSA':
BSSA14_test(0.5)
BSSA14_test(0.75)
|
apache-2.0
|
pradyu1993/scikit-learn
|
sklearn/neighbors/tests/test_neighbors.py
|
1
|
20954
|
import warnings
from nose.tools import assert_equal, assert_true
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from numpy.testing import assert_raises
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from scipy.spatial import cKDTree
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_warn_on_equidistant(n_samples=100, n_features=3, k=3):
"""test the production of a warning if equidistant points are discarded"""
X = rng.random_sample(size=(n_samples, n_features))
q = rng.random_sample(size=n_features)
neigh = neighbors.NearestNeighbors(n_neighbors=k)
neigh.fit(X[:-1])
ind = neigh.kneighbors(q, return_distance=False)
# make the last point identical to the furthest neighbor
# querying this should set warning_flag to True
X[-1] = X[ind[0, k - 1]]
y = np.zeros(X.shape[0])
expected_message = ("kneighbors: neighbor k+1 and neighbor k "
"have the same distance: results will be "
"dependent on data order.")
algorithms = ('ball_tree', 'brute')
estimators = (neighbors.KNeighborsClassifier,
neighbors.KNeighborsRegressor)
for algorithm in algorithms:
for estimator in estimators:
with warnings.catch_warnings(record=True) as warn_queue:
neigh = estimator(n_neighbors=k, algorithm=algorithm)
neigh.fit(X, y)
neigh.predict(q)
assert_equal(len(warn_queue), 1)
assert_equal(str(warn_queue[0].message), expected_message)
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5,
random_state=0):
"""Test unsupervised neighbors methods"""
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
"""test the types of valid input into NearestNeighbors"""
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), cKDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
"""Test unsupervised radius-based query"""
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
"""Test k-neighbors classification"""
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
"""Test k-neighbors classification"""
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
"""Test KNeighborsClassifier.predict_proba() method"""
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
"""Test radius-based classification"""
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
""" Test radius-based classifier when no neighbors found.
In this case it should rise an informative exception """
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
clf.predict(z1)
assert_raises(ValueError, clf.predict, z2)
def test_radius_neighbors_classifier_outlier_labeling():
""" Test radius-based classifier when no neighbors found and outliers
are labeled. """
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
""" Test radius-based classifier, when distance to a sample is zero. """
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
"""Test k-NN classifier on sparse matrices"""
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
"""Test k-neighbors regression"""
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
"""Test radius-based neighbors regression"""
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
"""Test radius-based regression on sparse matrices"""
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
"""Sanity checks on the iris dataset
Puts three points of each label in the plane and performs a
nearest neighbor query on points near the decision boundary.
"""
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm, warn_on_equidistant=False)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm,
warn_on_equidistant=False)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
"""Sanity check on the digits dataset
the 'brute' algorithm has been observed to fail if the input
dtype is uint8 due to overflow in distance calculations.
"""
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute',
warn_on_equidistant=False)
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
"""Test kneighbors_graph to build the k-Nearest Neighbor graph."""
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.todense(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.todense(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.todense(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.todense(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.todense(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_radius_neighbors_graph():
"""Test radius_neighbors_graph to build the Nearest Neighbor graph."""
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.todense(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.todense(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_neighbors_badargs():
"""Test bad argument values: these should all raise ValueErrors"""
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
nbrs = cls()
assert_raises(ValueError,
nbrs.predict,
X)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError,
nbrs.kneighbors_graph,
X, mode='blah')
assert_raises(ValueError,
nbrs.radius_neighbors_graph,
X, mode='blah')
if __name__ == '__main__':
import nose
nose.runmodule()
|
bsd-3-clause
|
z01nl1o02/tests
|
imagemd5/imagemd5_interset.py
|
1
|
1178
|
import os,sys,pdb,cPickle
import pandas as pd
import argparse
import numpy as np
def run(firstFile, secondFile, flag_scan_first,outcsv):
with open(firstFile, 'rb') as f:
dictA = cPickle.load(f)
with open(secondFile, 'rb') as f:
dictB = cPickle.load(f)
keyA = set(dictA.keys())
keyB = set(dictB.keys())
keys = list(keyA & keyB)
print 'interset total %d'%len(keys)
if flag_scan_first == 1:
dictC = dictA
else:
dictC = dictB
line_list = []
for key in keys:
files = dictC[key]
line_list.extend(files)
line_list = set(list(line_list))
line = '\r\n'.join(line_list)
with open(outcsv,'wb') as f:
f.writelines(line)
if __name__=="__main__":
ap = argparse.ArgumentParser()
ap.add_argument('firstpkl',help='first pkl')
ap.add_argument('secondpkl',help='second pkl')
ap.add_argument('flag_use_first',type=np.int64,help='1-list file name of interset in first 0-list file name of iterset in second file name')
ap.add_argument('outcsv',help='output csv')
args = ap.parse_args()
run(args.firstpkl, args.secondpkl, args.flag_use_first, args.outcsv)
|
gpl-2.0
|
kingfink/Newport-Folk-Festival
|
wikipedia_origins.py
|
1
|
1064
|
import urllib
import urllib2
from bs4 import BeautifulSoup
import pandas as pd
def get_location(article):
article = urllib.quote(article)
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')] #wikipedia needs this
str_ret = ''
try:
resource = opener.open("http://en.wikipedia.org/wiki/" + article)
data = resource.read()
resource.close()
soup = BeautifulSoup(data)
# get birth
try:
str_ret = soup.find("th", text="Born").parent.find("a").get_text()
except:
str_ret = "Not Available"
# get origin
try:
str_ret = str_ret + ' | ' + soup.find("th", text="Origin").parent.find("td").get_text()
except:
str_ret = str_ret + ' | ' + "Not Available"
except:
str_ret = "Artist not Found"
return str_ret
if __name__ == "__main__":
df = pd.read_csv('artists.csv')
for x in df.T.iteritems():
print x[1][0] + ' | ' + get_location(x[1][0])
|
mit
|
nmayorov/scikit-learn
|
benchmarks/bench_plot_nmf.py
|
90
|
5742
|
"""
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, init='random'):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
init : string
Method used to initialize the procedure.
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
W, H = _initialize_nmf(V, r, init, random_state=0)
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init='random', max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, init='random', tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization'
'benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
|
bsd-3-clause
|
jskDr/jamespy_py3
|
kkegg.py
|
1
|
1536
|
# KEGG related file
import json
import pandas as pd
def get_lr_string( KEGG_ID_list, sep_str):
"""
Divide left and right kegg_id using seperation function in string.
"""
kegg_id_l, kegg_id_r = [], []
for kegg_id in KEGG_ID_list:
kl, kr = kegg_id.split(sep_str)
kegg_id_l.append( kl)
kegg_id_r.append( kr)
return kegg_id_l, kegg_id_r
#for kegg_id in df["KEGG_ID"]
def get_lr_kegg_id( KEGG_ID_list):
return get_lr_string( KEGG_ID_list, " = ")
def get_lr_kegg_lr( kegg_id_l):
return get_lr_string( kegg_id_l, " + ")
def get_l0r0_kegg( KEGG_ID_list):
"""
Return the first id of each part in KEGG_ID.
"""
l, r = get_lr_kegg_id( KEGG_ID_list)
l0 = get_lr_kegg_lr( l)[0]
r0 = get_lr_kegg_lr( r)[0]
return l0, r0
def get_lr_smiles( kegg_d, KEGG_ID_list):
"""
Translate SMILES
"""
l0_l, r0_l = get_l0r0_kegg( KEGG_ID_list)
ls = [ kegg_d[l] for l in l0_l]
rs = [ kegg_d[r] for r in r0_l]
return ls, rs
class KEGG_ID_to_SMILES:
"""
Translate KEGG_ID to Left and Right SMILES Strings.
"""
def __init__(self, kegg_d_fname = 'sheet/Kegg_Dict.json'):
"""
Usage
-----
ls, rs = KEGG_ID_to_SMILES().transform( df["KEGG_ID"].tolist())
"""
with open( kegg_d_fname) as data_file:
self.kegg_d = json.load(data_file)
def transform( self, KEGG_ID_list):
kegg_d = self.kegg_d
return get_lr_smiles( kegg_d, KEGG_ID_list)
|
mit
|
JosmanPS/scikit-learn
|
sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py
|
221
|
5517
|
"""
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
|
bsd-3-clause
|
jakereps/qiime2
|
qiime2/metadata/metadata.py
|
1
|
39745
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import abc
import collections
import itertools
import sqlite3
import types
import warnings
import pandas as pd
import numpy as np
import qiime2
from qiime2.core.util import find_duplicates
from .base import SUPPORTED_COLUMN_TYPES, FORMATTED_ID_HEADERS, is_id_header
class _MetadataBase:
"""Base class for functionality shared between Metadata and MetadataColumn.
Parameters
----------
index : pandas.Index
IDs associated with the metadata.
"""
@property
def id_header(self):
"""Name identifying the IDs associated with the metadata.
This property is read-only.
Returns
-------
str
Name of IDs associated with the metadata.
"""
return self._id_header
@property
def ids(self):
"""IDs associated with the metadata.
This property is read-only.
Returns
-------
tuple of str
Metadata IDs.
"""
return self._ids
@property
def id_count(self):
"""Number of metadata IDs.
This property is read-only.
Returns
-------
int
Number of metadata IDs.
"""
return len(self._ids)
@property
def artifacts(self):
"""Artifacts that are the source of the metadata.
This property is read-only.
Returns
-------
tuple of qiime2.Artifact
Source artifacts of the metadata.
"""
return tuple(self._artifacts)
def __init__(self, index):
if index.empty:
raise ValueError(
"%s must contain at least one ID." % self.__class__.__name__)
id_header = index.name
self._assert_valid_id_header(id_header)
self._id_header = id_header
self._validate_index(index, axis='id')
self._ids = tuple(index)
self._artifacts = []
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self._id_header == other._id_header and
self._artifacts == other._artifacts
)
def __ne__(self, other):
return not (self == other)
def _add_artifacts(self, artifacts):
deduped = set(self._artifacts)
for artifact in artifacts:
if not isinstance(artifact, qiime2.Artifact):
raise TypeError(
"Expected Artifact object, received %r" % artifact)
if artifact in deduped:
raise ValueError(
"Duplicate source artifacts are not supported on %s "
"objects. The following artifact is a duplicate of "
"another source artifact: %r" %
(self.__class__.__name__, artifact))
deduped.add(artifact)
self._artifacts.extend(artifacts)
# Static helpers below for code reuse in Metadata and MetadataColumn
@classmethod
def _assert_valid_id_header(cls, name):
if not is_id_header(name):
raise ValueError(
"pandas index name (`Index.name`) must be one of the "
"following values, not %r:\n\n%s" %
(name, FORMATTED_ID_HEADERS))
@classmethod
def _validate_index(cls, index, *, axis):
if axis == 'id':
label = 'ID'
elif axis == 'column':
label = 'column name'
else:
raise NotImplementedError
for value in index:
if not isinstance(value, str):
raise TypeError(
"Detected non-string metadata %s of type %r: %r" %
(label, type(value), value))
if not value:
raise ValueError(
"Detected empty metadata %s. %ss must consist of at least "
"one character." % (label, label))
if axis == 'id' and value.startswith('#'):
raise ValueError(
"Detected metadata %s that begins with a pound sign "
"(#): %r" % (label, value))
if is_id_header(value):
raise ValueError(
"Detected metadata %s %r that conflicts with a name "
"reserved for the ID header. Reserved ID headers:\n\n%s" %
(label, value, FORMATTED_ID_HEADERS))
if len(index) != len(set(index)):
duplicates = find_duplicates(index)
raise ValueError(
"Metadata %ss must be unique. The following %ss are "
"duplicated: %s" %
(label, label, ', '.join(repr(e) for e in sorted(duplicates))))
@classmethod
def _filter_ids_helper(cls, df_or_series, ids, ids_to_keep):
# `ids_to_keep` can be any iterable, so turn it into a list so that it
# can be iterated over multiple times below (and length-checked).
ids_to_keep = list(ids_to_keep)
if len(ids_to_keep) == 0:
raise ValueError("`ids_to_keep` must contain at least one ID.")
duplicates = find_duplicates(ids_to_keep)
if duplicates:
raise ValueError(
"`ids_to_keep` must contain unique IDs. The following IDs are "
"duplicated: %s" %
(', '.join(repr(e) for e in sorted(duplicates))))
ids_to_keep = set(ids_to_keep)
missing_ids = ids_to_keep - ids
if missing_ids:
raise ValueError(
"The following IDs are not present in the metadata: %s"
% (', '.join(repr(e) for e in sorted(missing_ids))))
# While preserving order, get rid of any IDs not contained in
# `ids_to_keep`.
ids_to_discard = ids - ids_to_keep
return df_or_series.drop(labels=ids_to_discard, axis='index',
inplace=False, errors='raise')
# Other properties such as units can be included here in the future!
ColumnProperties = collections.namedtuple('ColumnProperties', ['type'])
class Metadata(_MetadataBase):
"""Store metadata associated with identifiers in a study.
Metadata is tabular in nature, mapping study identifiers (e.g. sample or
feature IDs) to columns of metadata associated with each ID.
For more details about metadata in QIIME 2, including the TSV metadata file
format, see the Metadata Tutorial at https://docs.qiime2.org.
The following text focuses on design and considerations when working with
``Metadata`` objects at the API level.
A ``Metadata`` object is composed of zero or more ``MetadataColumn``
objects. A ``Metadata`` object always contains at least one ID, regardless
of the number of columns. Each column in the ``Metadata`` object has an
associated column type representing either *categorical* or *numeric*
data. Each metadata column is represented by an object corresponding to the
column's type: ``CategoricalMetadataColumn`` or ``NumericMetadataColumn``,
respectively.
A ``Metadata`` object is closely linked to its corresponding TSV metadata
file format described at https://docs.qiime2.org. Therefore, certain
requirements present in the file format are also enforced on the in-memory
object in order to make serialized ``Metadata`` objects roundtrippable when
loaded from disk again. For example, IDs cannot begin with a pound
character (``#``) because those IDs would be interpreted as comment rows
when written to disk as TSV. See the metadata file format spec for more
details about data formatting requirements.
In addition to being loaded from or saved to disk, a ``Metadata`` object
can be constructed from a ``pandas.DataFrame`` object. See the *Parameters*
section below for details on how to construct ``Metadata`` objects from
dataframes.
``Metadata`` objects have various methods to access, filter, and merge
data. A dataframe can be retrieved from the ``Metadata`` object for further
data manipulation using the pandas API. Individual ``MetadataColumn``
objects can be retrieved to gain access to APIs applicable to a single
metadata column.
Parameters
----------
dataframe : pandas.DataFrame
Dataframe containing metadata. The dataframe's index defines the IDs,
and the index name (``Index.name``) must match one of the required ID
headers described in the metadata file format spec. Each column in the
dataframe defines a metadata column, and the metadata column's type
(i.e. *categorical* or *numeric*) is determined based on the column's
dtype. If a column has ``dtype=object``, it may contain strings or
pandas missing values (e.g. ``np.nan``, ``None``). Columns matching
this requirement are assumed to be *categorical*. If a column in the
dataframe has ``dtype=float`` or ``dtype=int``, it may contain floating
point numbers or integers, as well as pandas missing values
(e.g. ``np.nan``). Columns matching this requirement are assumed to be
*numeric*. Regardless of column type (categorical vs numeric), the
dataframe stored within the ``Metadata`` object will have any missing
values normalized to ``np.nan``. Columns with ``dtype=int`` will be
cast to ``dtype=float``. To obtain a dataframe from the ``Metadata``
object containing these normalized data types and values, use
``Metadata.to_dataframe()``.
"""
@classmethod
def load(cls, filepath, column_types=None):
"""Load a TSV metadata file.
The TSV metadata file format is described at https://docs.qiime2.org in
the Metadata Tutorial.
Parameters
----------
filepath : str
Path to TSV metadata file to be loaded.
column_types : dict, optional
Override metadata column types specified or inferred in the file.
This is a dict mapping column names (str) to column types (str).
Valid column types are 'categorical' and 'numeric'. Column names
may be omitted from this dict to use the column types read from the
file.
Returns
-------
Metadata
Metadata object loaded from `filepath`.
Raises
------
MetadataFileError
If the metadata file is invalid in any way (e.g. doesn't meet the
file format's requirements).
See Also
--------
save
"""
from .io import MetadataReader
return MetadataReader(filepath).read(into=cls,
column_types=column_types)
@property
def columns(self):
"""Ordered mapping of column names to ColumnProperties.
The mapping that is returned is read-only. This property is also
read-only.
Returns
-------
types.MappingProxyType
Ordered mapping of column names to ColumnProperties.
"""
# Read-only proxy to the OrderedDict mapping column names to
# ColumnProperties.
return types.MappingProxyType(self._columns)
@property
def column_count(self):
"""Number of metadata columns.
This property is read-only.
Returns
-------
int
Number of metadata columns.
Notes
-----
Zero metadata columns are allowed.
See Also
--------
id_count
"""
return len(self._columns)
def __init__(self, dataframe):
if not isinstance(dataframe, pd.DataFrame):
raise TypeError(
"%s constructor requires a pandas.DataFrame object, not "
"%r" % (self.__class__.__name__, type(dataframe)))
super().__init__(dataframe.index)
self._dataframe, self._columns = self._normalize_dataframe(dataframe)
self._validate_index(self._dataframe.columns, axis='column')
def _normalize_dataframe(self, dataframe):
norm_df = dataframe.copy()
# Do not attempt to strip empty metadata
if not norm_df.columns.empty:
norm_df.columns = norm_df.columns.str.strip()
norm_df.index = norm_df.index.str.strip()
columns = collections.OrderedDict()
for column_name, series in norm_df.items():
metadata_column = self._metadata_column_factory(series)
norm_df[column_name] = metadata_column.to_series()
properties = ColumnProperties(type=metadata_column.type)
columns[column_name] = properties
return norm_df, columns
def _metadata_column_factory(self, series):
dtype = series.dtype
if NumericMetadataColumn._is_supported_dtype(dtype):
column = NumericMetadataColumn(series)
elif CategoricalMetadataColumn._is_supported_dtype(dtype):
column = CategoricalMetadataColumn(series)
else:
raise TypeError(
"Metadata column %r has an unsupported pandas dtype of %s. "
"Supported dtypes: float, int, object" %
(series.name, dtype))
column._add_artifacts(self.artifacts)
return column
def __repr__(self):
"""String summary of the metadata and its columns."""
lines = []
# Header
lines.append(self.__class__.__name__)
lines.append('-' * len(self.__class__.__name__))
# Dimensions
lines.append('%d ID%s x %d column%s' % (
self.id_count,
'' if self.id_count == 1 else 's',
self.column_count,
'' if self.column_count == 1 else 's',
))
# Column properties
if self.column_count != 0:
max_name_len = max((len(name) for name in self.columns))
for name, props in self.columns.items():
padding = ' ' * ((max_name_len - len(name)) + 1)
lines.append('%s:%s%r' % (name, padding, props))
# Epilogue
lines.append('')
lines.append('Call to_dataframe() for a tabular representation.')
return '\n'.join(lines)
def __eq__(self, other):
"""Determine if this metadata is equal to another.
``Metadata`` objects are equal if their IDs, columns (including column
names, types, and ordering), ID headers, source artifacts, and metadata
values are equal.
Parameters
----------
other : Metadata
Metadata to test for equality.
Returns
-------
bool
Indicates whether this ``Metadata`` object is equal to `other`.
See Also
--------
__ne__
"""
return (
super().__eq__(other) and
self._columns == other._columns and
self._dataframe.equals(other._dataframe)
)
def __ne__(self, other):
"""Determine if this metadata is not equal to another.
``Metadata`` objects are not equal if their IDs, columns (including
column names, types, or ordering), ID headers, source artifacts, or
metadata values are not equal.
Parameters
----------
other : Metadata
Metadata to test for inequality.
Returns
-------
bool
Indicates whether this ``Metadata`` object is not equal to `other`.
See Also
--------
__eq__
"""
return not (self == other)
def save(self, filepath):
"""Save a TSV metadata file.
The TSV metadata file format is described at https://docs.qiime2.org in
the Metadata Tutorial.
The file will always include the ``#q2:types`` directive in order to
make the file roundtrippable without relying on column type inference.
Parameters
----------
filepath : str
Path to save TSV metadata file at.
See Also
--------
load
"""
from .io import MetadataWriter
MetadataWriter(self).write(filepath)
def to_dataframe(self):
"""Create a pandas dataframe from the metadata.
The dataframe's index name (``Index.name``) will match this metadata
object's ``id_header``, and the index will contain this metadata
object's IDs. The dataframe's column names will match the column names
in this metadata. Categorical columns will be stored as
``dtype=object`` (containing strings), and numeric columns will be
stored as ``dtype=float``.
Returns
-------
pandas.DataFrame
Dataframe constructed from the metadata.
"""
return self._dataframe.copy()
def get_column(self, name):
"""Retrieve metadata column based on column name.
Parameters
----------
name : str
Name of the metadata column to retrieve.
Returns
-------
MetadataColumn
Requested metadata column (``CategoricalMetadataColumn`` or
``NumericMetadataColumn``).
See Also
--------
get_ids
"""
try:
series = self._dataframe[name]
except KeyError:
raise ValueError(
'%r is not a column in the metadata. Available columns: '
'%s' % (name, ', '.join(repr(c) for c in self.columns)))
return self._metadata_column_factory(series)
def get_ids(self, where=None):
"""Retrieve IDs matching search criteria.
Parameters
----------
where : str, optional
SQLite WHERE clause specifying criteria IDs must meet to be
included in the results. All IDs are included by default.
Returns
-------
set
IDs matching search criteria specified in `where`.
See Also
--------
ids
filter_ids
get_column
Notes
-----
The ID header (``Metadata.id_header``) may be used in the `where`
clause to query the table's ID column.
"""
if where is None:
return set(self._ids)
conn = sqlite3.connect(':memory:')
conn.row_factory = lambda cursor, row: row[0]
# https://github.com/pandas-dev/pandas/blob/
# 7c7bd569ce8e0f117c618d068e3d2798134dbc73/pandas/io/sql.py#L1306
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', 'The spaces in these column names will not.*')
self._dataframe.to_sql('metadata', conn, index=True,
index_label=self.id_header)
c = conn.cursor()
# In general we wouldn't want to format our query in this way because
# it leaves us open to sql injection, but it seems acceptable here for
# a few reasons:
# 1) This is a throw-away database which we're just creating to have
# access to the query language, so any malicious behavior wouldn't
# impact any data that isn't temporary
# 2) The substitution syntax recommended in the docs doesn't allow
# us to specify complex `where` statements, which is what we need to
# do here. For example, we need to specify things like:
# WHERE Subject='subject-1' AND SampleType='gut'
# but their qmark/named-style syntaxes only supports substition of
# variables, such as:
# WHERE Subject=?
# 3) sqlite3.Cursor.execute will only execute a single statement so
# inserting multiple statements
# (e.g., "Subject='subject-1'; DROP...") will result in an
# OperationalError being raised.
query = ('SELECT "{0}" FROM metadata WHERE {1} GROUP BY "{0}" '
'ORDER BY "{0}";'.format(self.id_header, where))
try:
c.execute(query)
except sqlite3.OperationalError as e:
conn.close()
raise ValueError("Selection of IDs failed with query:\n %s\n\n"
"If one of the metadata column names specified "
"in the `where` statement is on this list "
"of reserved keywords "
"(http://www.sqlite.org/lang_keywords.html), "
"please ensure it is quoted appropriately in the "
"`where` statement." % query) from e
ids = set(c.fetchall())
conn.close()
return ids
def merge(self, *others):
"""Merge this ``Metadata`` object with other ``Metadata`` objects.
Returns a new ``Metadata`` object containing the merged contents of
this ``Metadata`` object and `others`. The merge is not in-place and
will always return a **new** merged ``Metadata`` object.
The merge will include only those IDs that are shared across **all**
``Metadata`` objects being merged (i.e. the merge is an *inner join*).
Each metadata column being merged must have a unique name; merging
metadata with overlapping column names will result in an error.
Parameters
----------
others : tuple
One or more ``Metadata`` objects to merge with this ``Metadata``
object.
Returns
-------
Metadata
New object containing merged metadata. The merged IDs will be in
the same relative order as the IDs in this ``Metadata`` object
after performing the inner join. The merged column order will match
the column order of ``Metadata`` objects being merged from left to
right.
Raises
------
ValueError
If zero ``Metadata`` objects are provided in `others` (there is
nothing to merge in this case).
Notes
-----
The merged ``Metadata`` object will always have its ``id_header``
property set to ``'id'``, regardless of the ``id_header`` values on the
``Metadata`` objects being merged.
The merged ``Metadata`` object tracks all source artifacts that it was
built from to preserve provenance (i.e. the ``.artifacts`` property
on all ``Metadata`` objects is merged).
"""
if len(others) < 1:
raise ValueError(
"At least one Metadata object must be provided to merge into "
"this Metadata object (otherwise there is nothing to merge).")
dfs = []
columns = []
artifacts = []
for md in itertools.chain([self], others):
df = md._dataframe
dfs.append(df)
columns.extend(df.columns.tolist())
artifacts.extend(md.artifacts)
columns = pd.Index(columns)
if columns.has_duplicates:
raise ValueError(
"Cannot merge metadata with overlapping columns. The "
"following columns overlap: %s" %
', '.join([repr(e) for e in
columns[columns.duplicated()].unique()]))
merged_df = dfs[0].join(dfs[1:], how='inner')
# Not using DataFrame.empty because empty columns are allowed in
# Metadata.
if merged_df.index.empty:
raise ValueError(
"Cannot merge because there are no IDs shared across metadata "
"objects.")
merged_df.index.name = 'id'
merged_md = self.__class__(merged_df)
merged_md._add_artifacts(artifacts)
return merged_md
def filter_ids(self, ids_to_keep):
"""Filter metadata by IDs.
Parameters
----------
ids_to_keep : iterable of str
IDs that should be retained in the filtered ``Metadata`` object. If
any IDs in `ids_to_keep` are not contained in this ``Metadata``
object, a ``ValueError`` will be raised. The filtered ``Metadata``
object will retain the same relative ordering of IDs in this
``Metadata`` object. Thus, the ordering of IDs in `ids_to_keep`
does not determine the ordering of IDs in the filtered ``Metadata``
object.
Returns
-------
Metadata
The metadata filtered by IDs.
See Also
--------
get_ids
filter_columns
"""
filtered_df = self._filter_ids_helper(self._dataframe, self.get_ids(),
ids_to_keep)
filtered_md = self.__class__(filtered_df)
filtered_md._add_artifacts(self.artifacts)
return filtered_md
def filter_columns(self, *, column_type=None, drop_all_unique=False,
drop_zero_variance=False, drop_all_missing=False):
"""Filter metadata by columns.
Parameters
----------
column_type : str, optional
If supplied, will retain only columns of this type. The currently
supported column types are 'numeric' and 'categorical'.
drop_all_unique : bool, optional
If ``True``, columns that contain a unique value for every ID will
be dropped. Missing data (``np.nan``) are ignored when determining
unique values. If a column consists solely of missing data, it will
be dropped.
drop_zero_variance : bool, optional
If ``True``, columns that contain the same value for every ID will
be dropped. Missing data (``np.nan``) are ignored when determining
variance. If a column consists solely of missing data, it will be
dropped.
drop_all_missing : bool, optional
If ``True``, columns that have a missing value (``np.nan``) for
every ID will be dropped.
Returns
-------
Metadata
The metadata filtered by columns.
See Also
--------
filter_ids
"""
if (column_type is not None and
column_type not in SUPPORTED_COLUMN_TYPES):
raise ValueError(
"Unknown column type %r. Supported column types: %s" %
(column_type, ', '.join(sorted(SUPPORTED_COLUMN_TYPES))))
# Build up a set of columns to drop. Short-circuit as soon as we know a
# given column can be dropped (no need to apply further filters to it).
columns_to_drop = set()
for column, props in self.columns.items():
if column_type is not None and props.type != column_type:
columns_to_drop.add(column)
continue
series = self._dataframe[column]
if drop_all_unique or drop_zero_variance:
# Ignore nans in the unique count, and compare to the number of
# non-nan values in the series.
num_unique = series.nunique(dropna=True)
if drop_all_unique and num_unique == series.count():
columns_to_drop.add(column)
continue
# If num_unique == 0, the series was empty (all nans). If
# num_unique == 1, the series contained only a single unique
# value (ignoring nans).
if drop_zero_variance and num_unique < 2:
columns_to_drop.add(column)
continue
if drop_all_missing and series.isnull().all():
columns_to_drop.add(column)
continue
filtered_df = self._dataframe.drop(columns_to_drop, axis=1,
inplace=False)
filtered_md = self.__class__(filtered_df)
filtered_md._add_artifacts(self.artifacts)
return filtered_md
class MetadataColumn(_MetadataBase, metaclass=abc.ABCMeta):
"""Abstract base class representing a single metadata column.
Concrete subclasses represent specific metadata column types, e.g.
``CategoricalMetadataColumn`` and ``NumericMetadataColumn``.
See the ``Metadata`` class docstring for details about ``Metadata`` and
``MetadataColumn`` objects, including a description of column types.
The main difference in constructing ``MetadataColumn`` vs ``Metadata``
objects is that ``MetadataColumn`` objects are constructed from a
``pandas.Series`` object instead of a ``pandas.DataFrame``. Otherwise, the
same restrictions, considerations, and data normalization are applied as
with ``Metadata`` objects.
"""
# Abstract, must be defined by subclasses.
type = None
@classmethod
@abc.abstractmethod
def _is_supported_dtype(cls, dtype):
"""
Contract: Return ``True`` if the series `dtype` is supported by this
object and can be handled appropriately by ``_normalize_``. Return
``False`` otherwise.
"""
raise NotImplementedError
@classmethod
@abc.abstractmethod
def _normalize_(cls, series):
"""
Contract: Return a copy of `series` that has been converted to the
appropriate internal dtype and has any other necessary normalization or
validation applied (e.g. missing value representations, disallowing
certain values, etc). Raise an error with a detailed error message if
the operation cannot be completed.
"""
raise NotImplementedError
@property
def name(self):
"""Metadata column name.
This property is read-only.
Returns
-------
str
Metadata column name.
"""
return self._series.name
def __init__(self, series):
if not isinstance(series, pd.Series):
raise TypeError(
"%s constructor requires a pandas.Series object, not %r" %
(self.__class__.__name__, type(series)))
super().__init__(series.index)
if not self._is_supported_dtype(series.dtype):
raise TypeError(
"%s %r does not support a pandas.Series object with dtype %s" %
(self.__class__.__name__, series.name, series.dtype))
self._series = self._normalize_(series)
self._validate_index([self._series.name], axis='column')
def __repr__(self):
"""String summary of the metadata column."""
return '<%s name=%r id_count=%d>' % (self.__class__.__name__,
self.name, self.id_count)
def __eq__(self, other):
"""Determine if this metadata column is equal to another.
``MetadataColumn`` objects are equal if their IDs, column names, column
types, ID headers, source artifacts, and metadata values are equal.
Parameters
----------
other : MetadataColumn
Metadata column to test for equality.
Returns
-------
bool
Indicates whether this ``MetadataColumn`` object is equal to
`other`.
See Also
--------
__ne__
"""
return (
super().__eq__(other) and
self.name == other.name and
self._series.equals(other._series)
)
def __ne__(self, other):
"""Determine if this metadata column is not equal to another.
``MetadataColumn`` objects are not equal if their IDs, column names,
column types, ID headers, source artifacts, or metadata values are not
equal.
Parameters
----------
other : MetadataColumn
Metadata column to test for inequality.
Returns
-------
bool
Indicates whether this ``MetadataColumn`` object is not equal to
`other`.
See Also
--------
__eq__
"""
return not (self == other)
def save(self, filepath):
"""Save a TSV metadata file containing this metadata column.
The TSV metadata file format is described at https://docs.qiime2.org in
the Metadata Tutorial.
The file will always include the ``#q2:types`` directive in order to
make the file roundtrippable without relying on column type inference.
Parameters
----------
filepath : str
Path to save TSV metadata file at.
"""
from .io import MetadataWriter
MetadataWriter(self).write(filepath)
def to_series(self):
"""Create a pandas series from the metadata column.
The series index name (``Index.name``) will match this metadata
column's ``id_header``, and the index will contain this metadata
column's IDs. The series name will match this metadata column's name.
Returns
-------
pandas.Series
Series constructed from the metadata column.
See Also
--------
to_dataframe
"""
return self._series.copy()
def to_dataframe(self):
"""Create a pandas dataframe from the metadata column.
The dataframe will contain exactly one column. The dataframe's index
name (``Index.name``) will match this metadata column's ``id_header``,
and the index will contain this metadata column's IDs. The dataframe's
column name will match this metadata column's name.
Returns
-------
pandas.DataFrame
Dataframe constructed from the metadata column.
See Also
--------
to_series
"""
return self._series.to_frame()
def get_value(self, id):
"""Retrieve metadata column value associated with an ID.
Parameters
----------
id : str
ID corresponding to the metadata column value to retrieve.
Returns
-------
object
Value associated with the provided `id`.
"""
if id not in self._series.index:
raise ValueError("ID %r is not present in %r" % (id, self))
return self._series.loc[id]
def has_missing_values(self):
"""Determine if the metadata column has one or more missing values.
Returns
-------
bool
``True`` if the metadata column has one or more missing values
(``np.nan``), ``False`` otherwise.
See Also
--------
drop_missing_values
get_ids
"""
return len(self.get_ids(where_values_missing=True)) > 0
def drop_missing_values(self):
"""Filter out missing values from the metadata column.
Returns
-------
MetadataColumn
Metadata column with missing values removed.
See Also
--------
has_missing_values
get_ids
"""
missing = self.get_ids(where_values_missing=True)
present = self.get_ids() - missing
return self.filter_ids(present)
def get_ids(self, where_values_missing=False):
"""Retrieve IDs matching search criteria.
Parameters
----------
where_values_missing : bool, optional
If ``True``, only return IDs that are associated with missing
values (``np.nan``). If ``False`` (the default), return all IDs in
the metadata column.
Returns
-------
set
IDs matching search criteria.
See Also
--------
ids
filter_ids
has_missing_values
drop_missing_values
"""
if where_values_missing:
ids = self._series[self._series.isnull()].index
else:
ids = self._ids
return set(ids)
def filter_ids(self, ids_to_keep):
"""Filter metadata column by IDs.
Parameters
----------
ids_to_keep : iterable of str
IDs that should be retained in the filtered ``MetadataColumn``
object. If any IDs in `ids_to_keep` are not contained in this
``MetadataColumn`` object, a ``ValueError`` will be raised. The
filtered ``MetadataColumn`` object will retain the same relative
ordering of IDs in this ``MetadataColumn`` object. Thus, the
ordering of IDs in `ids_to_keep` does not determine the ordering of
IDs in the filtered ``MetadataColumn`` object.
Returns
-------
MetadataColumn
The metadata column filtered by IDs.
See Also
--------
get_ids
"""
filtered_series = self._filter_ids_helper(self._series, self.get_ids(),
ids_to_keep)
filtered_mdc = self.__class__(filtered_series)
filtered_mdc._add_artifacts(self.artifacts)
return filtered_mdc
class CategoricalMetadataColumn(MetadataColumn):
"""A single metadata column containing categorical data.
See the ``Metadata`` class docstring for details about ``Metadata`` and
``MetadataColumn`` objects, including a description of column types and
supported data formats.
"""
type = 'categorical'
@classmethod
def _is_supported_dtype(cls, dtype):
return dtype == 'object'
@classmethod
def _normalize_(cls, series):
def normalize(value):
if isinstance(value, str):
value = value.strip()
if value == '':
raise ValueError(
"%s does not support empty strings as values. Use an "
"appropriate pandas missing value type "
"(e.g. `numpy.nan`) or supply a non-empty string as "
"the value in column %r." %
(cls.__name__, series.name))
else:
return value
elif pd.isnull(value): # permits np.nan, Python float nan, None
return np.nan
else:
raise TypeError(
"%s only supports strings or missing values. Found value "
"%r of type %r in column %r." %
(cls.__name__, value, type(value), series.name))
norm_series = series.apply(normalize, convert_dtype=False)
norm_series.index = norm_series.index.str.strip()
norm_series.name = norm_series.name.strip()
return norm_series
class NumericMetadataColumn(MetadataColumn):
"""A single metadata column containing numeric data.
See the ``Metadata`` class docstring for details about ``Metadata`` and
``MetadataColumn`` objects, including a description of column types and
supported data formats.
"""
type = 'numeric'
@classmethod
def _is_supported_dtype(cls, dtype):
return dtype == 'float' or dtype == 'int'
@classmethod
def _normalize_(cls, series):
series = series.astype(float, copy=True, errors='raise')
if np.isinf(series).any():
raise ValueError(
"%s does not support positive or negative infinity as a "
"floating point value in column %r." %
(cls.__name__, series.name))
return series
|
bsd-3-clause
|
JFriel/honours_project
|
venv/lib/python2.7/site-packages/nltk/probability.py
|
7
|
87564
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Probability and Statistics
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]> (additions)
# Trevor Cohn <[email protected]> (additions)
# Peter Ljunglöf <[email protected]> (additions)
# Liang Dong <[email protected]> (additions)
# Geoffrey Sampson <[email protected]> (additions)
# Ilia Kurenkov <[email protected]> (additions)
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Classes for representing and processing probabilistic information.
The ``FreqDist`` class is used to encode "frequency distributions",
which count the number of times that each outcome of an experiment
occurs.
The ``ProbDistI`` class defines a standard interface for "probability
distributions", which encode the probability of each outcome for an
experiment. There are two types of probability distribution:
- "derived probability distributions" are created from frequency
distributions. They attempt to model the probability distribution
that generated the frequency distribution.
- "analytic probability distributions" are created directly from
parameters (such as variance).
The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface
are used to encode conditional distributions. Conditional probability
distributions can be derived or analytic; but currently the only
implementation of the ``ConditionalProbDistI`` interface is
``ConditionalProbDist``, a derived distribution.
"""
from __future__ import print_function, unicode_literals, division
import math
import random
import warnings
import array
from operator import itemgetter
from collections import defaultdict
from functools import reduce
from nltk import compat
from nltk.compat import Counter
from nltk.internals import raise_unorderable_types
_NINF = float('-1e300')
##//////////////////////////////////////////////////////
## Frequency Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class FreqDist(Counter):
"""
A frequency distribution for the outcomes of an experiment. A
frequency distribution records the number of times each outcome of
an experiment has occurred. For example, a frequency distribution
could be used to record the frequency of each word type in a
document. Formally, a frequency distribution can be defined as a
function mapping from each sample to the number of times that
sample occurred as an outcome.
Frequency distributions are generally constructed by running a
number of experiments, and incrementing the count for a sample
every time it is an outcome of an experiment. For example, the
following code will produce a frequency distribution that encodes
how often each word occurs in a text:
>>> from nltk.tokenize import word_tokenize
>>> from nltk.probability import FreqDist
>>> sent = 'This is an example sentence'
>>> fdist = FreqDist()
>>> for word in word_tokenize(sent):
... fdist[word.lower()] += 1
An equivalent way to do this is with the initializer:
>>> fdist = FreqDist(word.lower() for word in word_tokenize(sent))
"""
def __init__(self, samples=None):
"""
Construct a new frequency distribution. If ``samples`` is
given, then the frequency distribution will be initialized
with the count of each object in ``samples``; otherwise, it
will be initialized to be empty.
In particular, ``FreqDist()`` returns an empty frequency
distribution; and ``FreqDist(samples)`` first creates an empty
frequency distribution, and then calls ``update`` with the
list ``samples``.
:param samples: The samples to initialize the frequency
distribution with.
:type samples: Sequence
"""
Counter.__init__(self, samples)
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this FreqDist. For the number of unique
sample values (or bins) with counts greater than zero, use
``FreqDist.B()``.
:rtype: int
"""
return sum(self.values())
def B(self):
"""
Return the total number of sample values (or "bins") that
have counts greater than zero. For the total
number of sample outcomes recorded, use ``FreqDist.N()``.
(FreqDist.B() is the same as len(FreqDist).)
:rtype: int
"""
return len(self)
def hapaxes(self):
"""
Return a list of all samples that occur once (hapax legomena)
:rtype: list
"""
return [item for item in self if self[item] == 1]
def Nr(self, r, bins=None):
return self.r_Nr(bins)[r]
def r_Nr(self, bins=None):
"""
Return the dictionary mapping r to Nr, the number of samples with frequency r, where Nr > 0.
:type bins: int
:param bins: The number of possible sample outcomes. ``bins``
is used to calculate Nr(0). In particular, Nr(0) is
``bins-self.B()``. If ``bins`` is not specified, it
defaults to ``self.B()`` (so Nr(0) will be 0).
:rtype: int
"""
_r_Nr = defaultdict(int)
for count in self.values():
_r_Nr[count] += 1
# Special case for Nr[0]:
_r_Nr[0] = bins - self.B() if bins is not None else 0
return _r_Nr
def _cumulative_frequencies(self, samples):
"""
Return the cumulative frequencies of the specified samples.
If no samples are specified, all counts are returned, starting
with the largest.
:param samples: the samples whose frequencies should be returned.
:type samples: any
:rtype: list(float)
"""
cf = 0.0
for sample in samples:
cf += self[sample]
yield cf
# slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs,
# here, freq() does probs
def freq(self, sample):
"""
Return the frequency of a given sample. The frequency of a
sample is defined as the count of that sample divided by the
total number of sample outcomes that have been recorded by
this FreqDist. The count of a sample is defined as the
number of times that sample outcome was recorded by this
FreqDist. Frequencies are always real numbers in the range
[0, 1].
:param sample: the sample whose frequency
should be returned.
:type sample: any
:rtype: float
"""
if self.N() == 0:
return 0
return self[sample] / self.N()
def max(self):
"""
Return the sample with the greatest number of outcomes in this
frequency distribution. If two or more samples have the same
number of outcomes, return one of them; which sample is
returned is undefined. If no outcomes have occurred in this
frequency distribution, return None.
:return: The sample with the maximum number of outcomes in this
frequency distribution.
:rtype: any or None
"""
if len(self) == 0:
raise ValueError('A FreqDist must have at least one sample before max is defined.')
return self.most_common(1)[0][0]
def plot(self, *args, **kwargs):
"""
Plot samples from the frequency distribution
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param title: The title for the graph
:type title: str
:param cumulative: A flag to specify whether the plot is cumulative (default = False)
:type title: bool
"""
try:
from matplotlib import pylab
except ImportError:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
if len(args) == 0:
args = [len(self)]
samples = [item for item, _ in self.most_common(*args)]
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
else:
freqs = [self[sample] for sample in samples]
ylabel = "Counts"
# percents = [f * 100 for f in freqs] only in ProbDist?
pylab.grid(True, color="silver")
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
if "title" in kwargs:
pylab.title(kwargs["title"])
del kwargs["title"]
pylab.plot(freqs, **kwargs)
pylab.xticks(range(len(samples)), [compat.text_type(s) for s in samples], rotation=90)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the frequency distribution (cumulative),
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted.
:param samples: The samples to plot (default is all samples)
:type samples: list
:param cumulative: A flag to specify whether the freqs are cumulative (default = False)
:type title: bool
"""
if len(args) == 0:
args = [len(self)]
samples = [item for item, _ in self.most_common(*args)]
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
else:
freqs = [self[sample] for sample in samples]
# percents = [f * 100 for f in freqs] only in ProbDist?
width = max(len("%s" % s) for s in samples)
width = max(width, max(len("%d" % f) for f in freqs))
for i in range(len(samples)):
print("%*s" % (width, samples[i]), end=' ')
print()
for i in range(len(samples)):
print("%*d" % (width, freqs[i]), end=' ')
print()
def copy(self):
"""
Create a copy of this frequency distribution.
:rtype: FreqDist
"""
return self.__class__(self)
# Mathematical operatiors
def __add__(self, other):
"""
Add counts from two counters.
>>> FreqDist('abbb') + FreqDist('bcc')
FreqDist({'b': 4, 'c': 2, 'a': 1})
"""
return self.__class__(super(FreqDist, self).__add__(other))
def __sub__(self, other):
"""
Subtract count, but keep only results with positive counts.
>>> FreqDist('abbbc') - FreqDist('bccd')
FreqDist({'b': 2, 'a': 1})
"""
return self.__class__(super(FreqDist, self).__sub__(other))
def __or__(self, other):
"""
Union is the maximum of value in either of the input counters.
>>> FreqDist('abbb') | FreqDist('bcc')
FreqDist({'b': 3, 'c': 2, 'a': 1})
"""
return self.__class__(super(FreqDist, self).__or__(other))
def __and__(self, other):
"""
Intersection is the minimum of corresponding counts.
>>> FreqDist('abbb') & FreqDist('bcc')
FreqDist({'b': 1})
"""
return self.__class__(super(FreqDist, self).__and__(other))
def __le__(self, other):
if not isinstance(other, FreqDist):
raise_unorderable_types("<=", self, other)
return set(self).issubset(other) and all(self[key] <= other[key] for key in self)
# @total_ordering doesn't work here, since the class inherits from a builtin class
__ge__ = lambda self, other: not self <= other or self == other
__lt__ = lambda self, other: self <= other and not self == other
__gt__ = lambda self, other: not self <= other
def __repr__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return self.pformat()
def pprint(self, maxlen=10, stream=None):
"""
Print a string representation of this FreqDist to 'stream'
:param maxlen: The maximum number of items to print
:type maxlen: int
:param stream: The stream to print to. stdout by default
"""
print(self.pformat(maxlen=maxlen), file=stream)
def pformat(self, maxlen=10):
"""
Return a string representation of this FreqDist.
:param maxlen: The maximum number of items to display
:type maxlen: int
:rtype: string
"""
items = ['{0!r}: {1!r}'.format(*item) for item in self.most_common(maxlen)]
if len(self) > maxlen:
items.append('...')
return 'FreqDist({{{0}}})'.format(', '.join(items))
def __str__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return '<FreqDist with %d samples and %d outcomes>' % (len(self), self.N())
##//////////////////////////////////////////////////////
## Probability Distributions
##//////////////////////////////////////////////////////
class ProbDistI(object):
"""
A probability distribution for the outcomes of an experiment. A
probability distribution specifies how likely it is that an
experiment will have any given outcome. For example, a
probability distribution could be used to predict the probability
that a token in a document will have a given type. Formally, a
probability distribution can be defined as a function mapping from
samples to nonnegative real numbers, such that the sum of every
number in the function's range is 1.0. A ``ProbDist`` is often
used to model the probability distribution of the experiment used
to generate a frequency distribution.
"""
SUM_TO_ONE = True
"""True if the probabilities of the samples in this probability
distribution will always sum to one."""
def __init__(self):
if self.__class__ == ProbDistI:
raise NotImplementedError("Interfaces can't be instantiated")
def prob(self, sample):
"""
Return the probability for a given sample. Probabilities
are always real numbers in the range [0, 1].
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
raise NotImplementedError()
def logprob(self, sample):
"""
Return the base 2 logarithm of the probability for a given sample.
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
# Default definition, in terms of prob()
p = self.prob(sample)
return (math.log(p, 2) if p != 0 else _NINF)
def max(self):
"""
Return the sample with the greatest probability. If two or
more samples have the same probability, return one of them;
which sample is returned is undefined.
:rtype: any
"""
raise NotImplementedError()
def samples(self):
"""
Return a list of all samples that have nonzero probabilities.
Use ``prob`` to find the probability of each sample.
:rtype: list
"""
raise NotImplementedError()
# cf self.SUM_TO_ONE
def discount(self):
"""
Return the ratio by which counts are discounted on average: c*/c
:rtype: float
"""
return 0.0
# Subclasses should define more efficient implementations of this,
# where possible.
def generate(self):
"""
Return a randomly selected sample from this probability distribution.
The probability of returning each sample ``samp`` is equal to
``self.prob(samp)``.
"""
p = random.random()
p_init = p
for sample in self.samples():
p -= self.prob(sample)
if p <= 0: return sample
# allow for some rounding error:
if p < .0001:
return sample
# we *should* never get here
if self.SUM_TO_ONE:
warnings.warn("Probability distribution %r sums to %r; generate()"
" is returning an arbitrary sample." % (self, p_init-p))
return random.choice(list(self.samples()))
@compat.python_2_unicode_compatible
class UniformProbDist(ProbDistI):
"""
A probability distribution that assigns equal probability to each
sample in a given set; and a zero probability to all other
samples.
"""
def __init__(self, samples):
"""
Construct a new uniform probability distribution, that assigns
equal probability to each sample in ``samples``.
:param samples: The samples that should be given uniform
probability.
:type samples: list
:raise ValueError: If ``samples`` is empty.
"""
if len(samples) == 0:
raise ValueError('A Uniform probability distribution must '+
'have at least one sample.')
self._sampleset = set(samples)
self._prob = 1.0/len(self._sampleset)
self._samples = list(self._sampleset)
def prob(self, sample):
return (self._prob if sample in self._sampleset else 0)
def max(self):
return self._samples[0]
def samples(self):
return self._samples
def __repr__(self):
return '<UniformProbDist with %d samples>' % len(self._sampleset)
@compat.python_2_unicode_compatible
class RandomProbDist(ProbDistI):
"""
Generates a random probability distribution whereby each sample
will be between 0 and 1 with equal probability (uniform random distribution.
Also called a continuous uniform distribution).
"""
def __init__(self, samples):
if len(samples) == 0:
raise ValueError('A probability distribution must '+
'have at least one sample.')
self._probs = self.unirand(samples)
self._samples = list(self._probs.keys())
@classmethod
def unirand(cls, samples):
"""
The key function that creates a randomized initial distribution
that still sums to 1. Set as a dictionary of prob values so that
it can still be passed to MutableProbDist and called with identical
syntax to UniformProbDist
"""
randrow = [random.random() for i in range(len(samples))]
total = sum(randrow)
for i, x in enumerate(randrow):
randrow[i] = x/total
total = sum(randrow)
if total != 1:
#this difference, if present, is so small (near NINF) that it
#can be subtracted from any element without risking probs not (0 1)
randrow[-1] -= total - 1
return dict((s, randrow[i]) for i, s in enumerate(samples))
def prob(self, sample):
return self._probs.get(sample, 0)
def samples(self):
return self._samples
def __repr__(self):
return '<RandomUniformProbDist with %d samples>' %len(self._probs)
@compat.python_2_unicode_compatible
class DictionaryProbDist(ProbDistI):
"""
A probability distribution whose probabilities are directly
specified by a given dictionary. The given dictionary maps
samples to probabilities.
"""
def __init__(self, prob_dict=None, log=False, normalize=False):
"""
Construct a new probability distribution from the given
dictionary, which maps values to probabilities (or to log
probabilities, if ``log`` is true). If ``normalize`` is
true, then the probability values are scaled by a constant
factor such that they sum to 1.
If called without arguments, the resulting probability
distribution assigns zero probability to all values.
"""
self._prob_dict = (prob_dict.copy() if prob_dict is not None else {})
self._log = log
# Normalize the distribution, if requested.
if normalize:
if len(prob_dict) == 0:
raise ValueError('A DictionaryProbDist must have at least one sample ' +
'before it can be normalized.')
if log:
value_sum = sum_logs(list(self._prob_dict.values()))
if value_sum <= _NINF:
logp = math.log(1.0/len(prob_dict), 2)
for x in prob_dict:
self._prob_dict[x] = logp
else:
for (x, p) in self._prob_dict.items():
self._prob_dict[x] -= value_sum
else:
value_sum = sum(self._prob_dict.values())
if value_sum == 0:
p = 1.0/len(prob_dict)
for x in prob_dict:
self._prob_dict[x] = p
else:
norm_factor = 1.0/value_sum
for (x, p) in self._prob_dict.items():
self._prob_dict[x] *= norm_factor
def prob(self, sample):
if self._log:
return (2**(self._prob_dict[sample]) if sample in self._prob_dict else 0)
else:
return self._prob_dict.get(sample, 0)
def logprob(self, sample):
if self._log:
return self._prob_dict.get(sample, _NINF)
else:
if sample not in self._prob_dict: return _NINF
elif self._prob_dict[sample] == 0: return _NINF
else: return math.log(self._prob_dict[sample], 2)
def max(self):
if not hasattr(self, '_max'):
self._max = max((p,v) for (v,p) in self._prob_dict.items())[1]
return self._max
def samples(self):
return self._prob_dict.keys()
def __repr__(self):
return '<ProbDist with %d samples>' % len(self._prob_dict)
@compat.python_2_unicode_compatible
class MLEProbDist(ProbDistI):
"""
The maximum likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"maximum likelihood estimate" approximates the probability of
each sample as the frequency of that sample in the frequency
distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the maximum likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
"""
self._freqdist = freqdist
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
return self._freqdist.freq(sample)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<MLEProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Lidstone estimate" is parameterized by a real number *gamma*,
which typically ranges from 0 to 1. The Lidstone estimate
approximates the probability of a sample with count *c* from an
experiment with *N* outcomes and *B* bins as
``c+gamma)/(N+B*gamma)``. This is equivalent to adding
*gamma* to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
SUM_TO_ONE = False
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type gamma: float
:param gamma: A real number used to parameterize the
estimate. The Lidstone estimate is equivalent to adding
*gamma* to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError('A %s probability distribution ' % name +
'must have at least one bin.')
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError('\nThe number of bins in a %s distribution ' % name +
'(%d) must be greater than or equal to\n' % bins +
'the number of bins in the FreqDist used ' +
'to create it (%d).' % freqdist.B())
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None:
bins = freqdist.B()
self._bins = bins
self._divisor = self._N + bins * gamma
if self._divisor == 0.0:
# In extreme cases we force the probability to be 0,
# which it will be, since the count will be 0:
self._gamma = 0
self._divisor = 1
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist[sample]
return (c + self._gamma) / self._divisor
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
gb = self._gamma * self._bins
return gb / (self._N + gb)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<LidstoneProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class LaplaceProbDist(LidstoneProbDist):
"""
The Laplace estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Laplace estimate" approximates the probability of a sample with
count *c* from an experiment with *N* outcomes and *B* bins as
*(c+1)/(N+B)*. This is equivalent to adding one to the count for
each bin, and taking the maximum likelihood estimate of the
resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the Laplace estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 1, bins)
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<LaplaceProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class ELEProbDist(LidstoneProbDist):
"""
The expected likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"expected likelihood estimate" approximates the probability of a
sample with count *c* from an experiment with *N* outcomes and
*B* bins as *(c+0.5)/(N+B/2)*. This is equivalent to adding 0.5
to the count for each bin, and taking the maximum likelihood
estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the expected likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<ELEProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class HeldoutProbDist(ProbDistI):
"""
The heldout estimate for the probability distribution of the
experiment used to generate two frequency distributions. These
two frequency distributions are called the "heldout frequency
distribution" and the "base frequency distribution." The
"heldout estimate" uses uses the "heldout frequency
distribution" to predict the probability of each sample, given its
frequency in the "base frequency distribution".
In particular, the heldout estimate approximates the probability
for a sample that occurs *r* times in the base distribution as
the average frequency in the heldout distribution of all samples
that occur *r* times in the base distribution.
This average frequency is *Tr[r]/(Nr[r].N)*, where:
- *Tr[r]* is the total count in the heldout distribution for
all samples that occur *r* times in the base distribution.
- *Nr[r]* is the number of samples that occur *r* times in
the base distribution.
- *N* is the number of outcomes recorded by the heldout
frequency distribution.
In order to increase the efficiency of the ``prob`` member
function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r*
when the ``HeldoutProbDist`` is created.
:type _estimate: list(float)
:ivar _estimate: A list mapping from *r*, the number of
times that a sample occurs in the base distribution, to the
probability estimate for that sample. ``_estimate[r]`` is
calculated by finding the average frequency in the heldout
distribution of all samples that occur *r* times in the base
distribution. In particular, ``_estimate[r]`` =
*Tr[r]/(Nr[r].N)*.
:type _max_r: int
:ivar _max_r: The maximum number of times that any sample occurs
in the base distribution. ``_max_r`` is used to decide how
large ``_estimate`` must be.
"""
SUM_TO_ONE = False
def __init__(self, base_fdist, heldout_fdist, bins=None):
"""
Use the heldout estimate to create a probability distribution
for the experiment used to generate ``base_fdist`` and
``heldout_fdist``.
:type base_fdist: FreqDist
:param base_fdist: The base frequency distribution.
:type heldout_fdist: FreqDist
:param heldout_fdist: The heldout frequency distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._base_fdist = base_fdist
self._heldout_fdist = heldout_fdist
# The max number of times any sample occurs in base_fdist.
self._max_r = base_fdist[base_fdist.max()]
# Calculate Tr, Nr, and N.
Tr = self._calculate_Tr()
r_Nr = base_fdist.r_Nr(bins)
Nr = [r_Nr[r] for r in range(self._max_r+1)]
N = heldout_fdist.N()
# Use Tr, Nr, and N to compute the probability estimate for
# each value of r.
self._estimate = self._calculate_estimate(Tr, Nr, N)
def _calculate_Tr(self):
"""
Return the list *Tr*, where *Tr[r]* is the total count in
``heldout_fdist`` for all samples that occur *r*
times in ``base_fdist``.
:rtype: list(float)
"""
Tr = [0.0] * (self._max_r+1)
for sample in self._heldout_fdist:
r = self._base_fdist[sample]
Tr[r] += self._heldout_fdist[sample]
return Tr
def _calculate_estimate(self, Tr, Nr, N):
"""
Return the list *estimate*, where *estimate[r]* is the probability
estimate for any sample that occurs *r* times in the base frequency
distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*.
In the special case that *N[r]=0*, *estimate[r]* will never be used;
so we define *estimate[r]=None* for those cases.
:rtype: list(float)
:type Tr: list(float)
:param Tr: the list *Tr*, where *Tr[r]* is the total count in
the heldout distribution for all samples that occur *r*
times in base distribution.
:type Nr: list(float)
:param Nr: The list *Nr*, where *Nr[r]* is the number of
samples that occur *r* times in the base distribution.
:type N: int
:param N: The total number of outcomes recorded by the heldout
frequency distribution.
"""
estimate = []
for r in range(self._max_r+1):
if Nr[r] == 0: estimate.append(None)
else: estimate.append(Tr[r]/(Nr[r]*N))
return estimate
def base_fdist(self):
"""
Return the base frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._base_fdist
def heldout_fdist(self):
"""
Return the heldout frequency distribution that this
probability distribution is based on.
:rtype: FreqDist
"""
return self._heldout_fdist
def samples(self):
return self._base_fdist.keys()
def prob(self, sample):
# Use our precomputed probability estimate.
r = self._base_fdist[sample]
return self._estimate[r]
def max(self):
# Note: the Heldout estimation is *not* necessarily monotonic;
# so this implementation is currently broken. However, it
# should give the right answer *most* of the time. :)
return self._base_fdist.max()
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
s = '<HeldoutProbDist: %d base samples; %d heldout samples>'
return s % (self._base_fdist.N(), self._heldout_fdist.N())
@compat.python_2_unicode_compatible
class CrossValidationProbDist(ProbDistI):
"""
The cross-validation estimate for the probability distribution of
the experiment used to generate a set of frequency distribution.
The "cross-validation estimate" for the probability of a sample
is found by averaging the held-out estimates for the sample in
each pair of frequency distributions.
"""
SUM_TO_ONE = False
def __init__(self, freqdists, bins):
"""
Use the cross-validation estimate to create a probability
distribution for the experiment used to generate
``freqdists``.
:type freqdists: list(FreqDist)
:param freqdists: A list of the frequency distributions
generated by the experiment.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._freqdists = freqdists
# Create a heldout probability distribution for each pair of
# frequency distributions in freqdists.
self._heldout_probdists = []
for fdist1 in freqdists:
for fdist2 in freqdists:
if fdist1 is not fdist2:
probdist = HeldoutProbDist(fdist1, fdist2, bins)
self._heldout_probdists.append(probdist)
def freqdists(self):
"""
Return the list of frequency distributions that this ``ProbDist`` is based on.
:rtype: list(FreqDist)
"""
return self._freqdists
def samples(self):
# [xx] nb: this is not too efficient
return set(sum([list(fd) for fd in self._freqdists], []))
def prob(self, sample):
# Find the average probability estimate returned by each
# heldout distribution.
prob = 0.0
for heldout_probdist in self._heldout_probdists:
prob += heldout_probdist.prob(sample)
return prob/len(self._heldout_probdists)
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<CrossValidationProbDist: %d-way>' % len(self._freqdists)
@compat.python_2_unicode_compatible
class WittenBellProbDist(ProbDistI):
"""
The Witten-Bell estimate of a probability distribution. This distribution
allocates uniform probability mass to as yet unseen events by using the
number of events that have only been seen once. The probability mass
reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood estimate
of a new type event occurring. The remaining probability mass is discounted
such that all probability estimates sum to one, yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
"""
def __init__(self, freqdist, bins=None):
"""
Creates a distribution of Witten-Bell probability estimates. This
distribution allocates uniform probability mass to as yet unseen
events by using the number of events that have only been seen once. The
probability mass reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood
estimate of a new type event occurring. The remaining probability mass
is discounted such that all probability estimates sum to one,
yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
The parameters *T* and *N* are taken from the ``freqdist`` parameter
(the ``B()`` and ``N()`` values). The normalizing factor *Z* is
calculated using these values along with the ``bins`` parameter.
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'bins parameter must not be less than %d=freqdist.B()' % freqdist.B()
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._T = self._freqdist.B()
self._Z = bins - self._freqdist.B()
self._N = self._freqdist.N()
# self._P0 is P(0), precalculated for efficiency:
if self._N==0:
# if freqdist is empty, we approximate P(0) by a UniformProbDist:
self._P0 = 1.0 / self._Z
else:
self._P0 = self._T / (self._Z * (self._N + self._T))
def prob(self, sample):
# inherit docs from ProbDistI
c = self._freqdist[sample]
return (c / (self._N + self._T) if c != 0 else self._P0)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<WittenBellProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Good-Turing Probability Distributions
##//////////////////////////////////////////////////////
# Good-Turing frequency estimation was contributed by Alan Turing and
# his statistical assistant I.J. Good, during their collaboration in
# the WWII. It is a statistical technique for predicting the
# probability of occurrence of objects belonging to an unknown number
# of species, given past observations of such objects and their
# species. (In drawing balls from an urn, the 'objects' would be balls
# and the 'species' would be the distinct colors of the balls (finite
# but unknown in number).
#
# Good-Turing method calculates the probability mass to assign to
# events with zero or low counts based on the number of events with
# higher counts. It does so by using the adjusted count *c\**:
#
# - *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1
# - *things with frequency zero in training* = N(1) for c == 0
#
# where *c* is the original count, *N(i)* is the number of event types
# observed with count *i*. We can think the count of unseen as the count
# of frequency one (see Jurafsky & Martin 2nd Edition, p101).
#
# This method is problematic because the situation ``N(c+1) == 0``
# is quite common in the original Good-Turing estimation; smoothing or
# interpolation of *N(i)* values is essential in practice.
#
# Bill Gale and Geoffrey Sampson present a simple and effective approach,
# Simple Good-Turing. As a smoothing curve they simply use a power curve:
#
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
#
# They estimate a and b by simple linear regression technique on the
# logarithmic form of the equation:
#
# log Nr = a + b*log(r)
#
# However, they suggest that such a simple curve is probably only
# appropriate for high values of r. For low values of r, they use the
# measured Nr directly. (see M&S, p.213)
#
# Gale and Sampson propose to use r while the difference between r and
# r* is 1.96 greater than the standard deviation, and switch to r* if
# it is less or equal:
#
# |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr))
#
# The 1.96 coefficient correspond to a 0.05 significance criterion,
# some implementations can use a coefficient of 1.65 for a 0.1
# significance criterion.
#
##//////////////////////////////////////////////////////
## Simple Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class SimpleGoodTuringProbDist(ProbDistI):
"""
SimpleGoodTuring ProbDist approximates from frequency to frequency of
frequency into a linear line under log space by linear regression.
Details of Simple Good-Turing algorithm can be found in:
- Good Turing smoothing without tears" (Gale & Sampson 1995),
Journal of Quantitative Linguistics, vol. 2 pp. 217-237.
- "Speech and Language Processing (Jurafsky & Martin),
2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c))
- http://www.grsampson.net/RGoodTur.html
Given a set of pair (xi, yi), where the xi denotes the frequency and
yi denotes the frequency of frequency, we want to minimize their
square variation. E(x) and E(y) represent the mean of xi and yi.
- slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x)))
- intercept: a = E(y) - b.E(x)
"""
SUM_TO_ONE = False
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be
larger than the number of bins in the ``freqdist``. If None,
then it's assumed to be equal to ``freqdist``.B() + 1
:type bins: int
"""
assert bins is None or bins > freqdist.B(),\
'bins parameter must not be less than %d=freqdist.B()+1' % (freqdist.B()+1)
if bins is None:
bins = freqdist.B() + 1
self._freqdist = freqdist
self._bins = bins
r, nr = self._r_Nr()
self.find_best_fit(r, nr)
self._switch(r, nr)
self._renormalize(r, nr)
def _r_Nr_non_zero(self):
r_Nr = self._freqdist.r_Nr()
del r_Nr[0]
return r_Nr
def _r_Nr(self):
"""
Split the frequency distribution in two list (r, Nr), where Nr(r) > 0
"""
nonzero = self._r_Nr_non_zero()
if not nonzero:
return [], []
return zip(*sorted(nonzero.items()))
def find_best_fit(self, r, nr):
"""
Use simple linear regression to tune parameters self._slope and
self._intercept in the log-log space based on count and Nr(count)
(Work in log space to avoid floating point underflow.)
"""
# For higher sample frequencies the data points becomes horizontal
# along line Nr=1. To create a more evident linear model in log-log
# space, we average positive Nr values with the surrounding zero
# values. (Church and Gale, 1991)
if not r or not nr:
# Empty r or nr?
return
zr = []
for j in range(len(r)):
i = (r[j-1] if j > 0 else 0)
k = (2 * r[j] - i if j == len(r) - 1 else r[j+1])
zr_ = 2.0 * nr[j] / (k - i)
zr.append(zr_)
log_r = [math.log(i) for i in r]
log_zr = [math.log(i) for i in zr]
xy_cov = x_var = 0.0
x_mean = sum(log_r) / len(log_r)
y_mean = sum(log_zr) / len(log_zr)
for (x, y) in zip(log_r, log_zr):
xy_cov += (x - x_mean) * (y - y_mean)
x_var += (x - x_mean)**2
self._slope = (xy_cov / x_var if x_var != 0 else 0.0)
if self._slope >= -1:
warnings.warn('SimpleGoodTuring did not find a proper best fit '
'line for smoothing probabilities of occurrences. '
'The probability estimates are likely to be '
'unreliable.')
self._intercept = y_mean - self._slope * x_mean
def _switch(self, r, nr):
"""
Calculate the r frontier where we must switch from Nr to Sr
when estimating E[Nr].
"""
for i, r_ in enumerate(r):
if len(r) == i + 1 or r[i+1] != r_ + 1:
# We are at the end of r, or there is a gap in r
self._switch_at = r_
break
Sr = self.smoothedNr
smooth_r_star = (r_ + 1) * Sr(r_+1) / Sr(r_)
unsmooth_r_star = (r_ + 1) * nr[i+1] / nr[i]
std = math.sqrt(self._variance(r_, nr[i], nr[i+1]))
if abs(unsmooth_r_star-smooth_r_star) <= 1.96 * std:
self._switch_at = r_
break
def _variance(self, r, nr, nr_1):
r = float(r)
nr = float(nr)
nr_1 = float(nr_1)
return (r + 1.0)**2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr)
def _renormalize(self, r, nr):
"""
It is necessary to renormalize all the probability estimates to
ensure a proper probability distribution results. This can be done
by keeping the estimate of the probability mass for unseen items as
N(1)/N and renormalizing all the estimates for previously seen items
(as Gale and Sampson (1995) propose). (See M&S P.213, 1999)
"""
prob_cov = 0.0
for r_, nr_ in zip(r, nr):
prob_cov += nr_ * self._prob_measure(r_)
if prob_cov:
self._renormal = (1 - self._prob_measure(0)) / prob_cov
def smoothedNr(self, r):
"""
Return the number of samples with count r.
:param r: The amount of frequency.
:type r: int
:rtype: float
"""
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
# Estimate a and b by simple linear regression technique on
# the logarithmic form of the equation: log Nr = a + b*log(r)
return math.exp(self._intercept + self._slope * math.log(r))
def prob(self, sample):
"""
Return the sample's probability.
:param sample: sample of the event
:type sample: str
:rtype: float
"""
count = self._freqdist[sample]
p = self._prob_measure(count)
if count == 0:
if self._bins == self._freqdist.B():
p = 0.0
else:
p = p / (self._bins - self._freqdist.B())
else:
p = p * self._renormal
return p
def _prob_measure(self, count):
if count == 0 and self._freqdist.N() == 0 :
return 1.0
elif count == 0 and self._freqdist.N() != 0:
return self._freqdist.Nr(1) / self._freqdist.N()
if self._switch_at > count:
Er_1 = self._freqdist.Nr(count+1)
Er = self._freqdist.Nr(count)
else:
Er_1 = self.smoothedNr(count+1)
Er = self.smoothedNr(count)
r_star = (count + 1) * Er_1 / Er
return r_star / self._freqdist.N()
def check(self):
prob_sum = 0.0
for i in range(0, len(self._Nr)):
prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal
print("Probability Sum:", prob_sum)
#assert prob_sum != 1.0, "probability sum should be one!"
def discount(self):
"""
This function returns the total mass of probability transfers from the
seen samples to the unseen samples.
"""
return self.smoothedNr(1) / self._freqdist.N()
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<SimpleGoodTuringProbDist based on %d samples>'\
% self._freqdist.N()
class MutableProbDist(ProbDistI):
"""
An mutable probdist where the probabilities may be easily modified. This
simply copies an existing probdist, storing the probability values in a
mutable dictionary and providing an update method.
"""
def __init__(self, prob_dist, samples, store_logs=True):
"""
Creates the mutable probdist based on the given prob_dist and using
the list of samples given. These values are stored as log
probabilities if the store_logs flag is set.
:param prob_dist: the distribution from which to garner the
probabilities
:type prob_dist: ProbDist
:param samples: the complete set of samples
:type samples: sequence of any
:param store_logs: whether to store the probabilities as logarithms
:type store_logs: bool
"""
self._samples = samples
self._sample_dict = dict((samples[i], i) for i in range(len(samples)))
self._data = array.array(str("d"), [0.0]) * len(samples)
for i in range(len(samples)):
if store_logs:
self._data[i] = prob_dist.logprob(samples[i])
else:
self._data[i] = prob_dist.prob(samples[i])
self._logs = store_logs
def samples(self):
# inherit documentation
return self._samples
def prob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is None:
return 0.0
return (2**(self._data[i]) if self._logs else self._data[i])
def logprob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is None:
return float('-inf')
return (self._data[i] if self._logs else math.log(self._data[i], 2))
def update(self, sample, prob, log=True):
"""
Update the probability for the given sample. This may cause the object
to stop being the valid probability distribution - the user must
ensure that they update the sample probabilities such that all samples
have probabilities between 0 and 1 and that all probabilities sum to
one.
:param sample: the sample for which to update the probability
:type sample: any
:param prob: the new probability
:type prob: float
:param log: is the probability already logged
:type log: bool
"""
i = self._sample_dict.get(sample)
assert i is not None
if self._logs:
self._data[i] = (prob if log else math.log(prob, 2))
else:
self._data[i] = (2**(prob) if log else prob)
##/////////////////////////////////////////////////////
## Kneser-Ney Probability Distribution
##//////////////////////////////////////////////////////
# This method for calculating probabilities was introduced in 1995 by Reinhard
# Kneser and Hermann Ney. It was meant to improve the accuracy of language
# models that use backing-off to deal with sparse data. The authors propose two
# ways of doing so: a marginal distribution constraint on the back-off
# distribution and a leave-one-out distribution. For a start, the first one is
# implemented as a class below.
#
# The idea behind a back-off n-gram model is that we have a series of
# frequency distributions for our n-grams so that in case we have not seen a
# given n-gram during training (and as a result have a 0 probability for it) we
# can 'back off' (hence the name!) and try testing whether we've seen the
# n-1-gram part of the n-gram in training.
#
# The novelty of Kneser and Ney's approach was that they decided to fiddle
# around with the way this latter, backed off probability was being calculated
# whereas their peers seemed to focus on the primary probability.
#
# The implementation below uses one of the techniques described in their paper
# titled "Improved backing-off for n-gram language modeling." In the same paper
# another technique is introduced to attempt to smooth the back-off
# distribution as well as the primary one. There is also a much-cited
# modification of this method proposed by Chen and Goodman.
#
# In order for the implementation of Kneser-Ney to be more efficient, some
# changes have been made to the original algorithm. Namely, the calculation of
# the normalizing function gamma has been significantly simplified and
# combined slightly differently with beta. None of these changes affect the
# nature of the algorithm, but instead aim to cut out unnecessary calculations
# and take advantage of storing and retrieving information in dictionaries
# where possible.
@compat.python_2_unicode_compatible
class KneserNeyProbDist(ProbDistI):
"""
Kneser-Ney estimate of a probability distribution. This is a version of
back-off that counts how likely an n-gram is provided the n-1-gram had
been seen in training. Extends the ProbDistI interface, requires a trigram
FreqDist instance to train on. Optionally, a different from default discount
value can be specified. The default discount is set to 0.75.
"""
def __init__(self, freqdist, bins=None, discount=0.75):
"""
:param freqdist: The trigram frequency distribution upon which to base
the estimation
:type freqdist: FreqDist
:param bins: Included for compatibility with nltk.tag.hmm
:type bins: int or float
:param discount: The discount applied when retrieving counts of
trigrams
:type discount: float (preferred, but can be set to int)
"""
if not bins:
self._bins = freqdist.B()
else:
self._bins = bins
self._D = discount
# cache for probability calculation
self._cache = {}
# internal bigram and trigram frequency distributions
self._bigrams = defaultdict(int)
self._trigrams = freqdist
# helper dictionaries used to calculate probabilities
self._wordtypes_after = defaultdict(float)
self._trigrams_contain = defaultdict(float)
self._wordtypes_before = defaultdict(float)
for w0, w1, w2 in freqdist:
self._bigrams[(w0,w1)] += freqdist[(w0, w1, w2)]
self._wordtypes_after[(w0,w1)] += 1
self._trigrams_contain[w1] += 1
self._wordtypes_before[(w1,w2)] += 1
def prob(self, trigram):
# sample must be a triple
if len(trigram) != 3:
raise ValueError('Expected an iterable with 3 members.')
trigram = tuple(trigram)
w0, w1, w2 = trigram
if trigram in self._cache:
return self._cache[trigram]
else:
# if the sample trigram was seen during training
if trigram in self._trigrams:
prob = (self._trigrams[trigram]
- self.discount())/self._bigrams[(w0, w1)]
# else if the 'rougher' environment was seen during training
elif (w0,w1) in self._bigrams and (w1,w2) in self._wordtypes_before:
aftr = self._wordtypes_after[(w0, w1)]
bfr = self._wordtypes_before[(w1, w2)]
# the probability left over from alphas
leftover_prob = ((aftr * self.discount())
/ self._bigrams[(w0, w1)])
# the beta (including normalization)
beta = bfr /(self._trigrams_contain[w1] - aftr)
prob = leftover_prob * beta
# else the sample was completely unseen during training
else:
prob = 0.0
self._cache[trigram] = prob
return prob
def discount(self):
"""
Return the value by which counts are discounted. By default set to 0.75.
:rtype: float
"""
return self._D
def set_discount(self, discount):
"""
Set the value by which counts are discounted to the value of discount.
:param discount: the new value to discount counts by
:type discount: float (preferred, but int possible)
:rtype: None
"""
self._D = discount
def samples(self):
return self._trigrams.keys()
def max(self):
return self._trigrams.max()
def __repr__(self):
'''
Return a string representation of this ProbDist
:rtype: str
'''
return '<KneserNeyProbDist based on {0} trigrams'.format(self._trigrams.N())
##//////////////////////////////////////////////////////
## Probability Distribution Operations
##//////////////////////////////////////////////////////
def log_likelihood(test_pdist, actual_pdist):
if (not isinstance(test_pdist, ProbDistI) or
not isinstance(actual_pdist, ProbDistI)):
raise ValueError('expected a ProbDist.')
# Is this right?
return sum(actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2)
for s in actual_pdist)
def entropy(pdist):
probs = (pdist.prob(s) for s in pdist.samples())
return -sum(p * math.log(p,2) for p in probs)
##//////////////////////////////////////////////////////
## Conditional Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class ConditionalFreqDist(defaultdict):
"""
A collection of frequency distributions for a single experiment
run under different conditions. Conditional frequency
distributions are used to record the number of times each sample
occurred, given the condition under which the experiment was run.
For example, a conditional frequency distribution could be used to
record the frequency of each word (type) in a document, given its
length. Formally, a conditional frequency distribution can be
defined as a function that maps from each condition to the
FreqDist for the experiment under that condition.
Conditional frequency distributions are typically constructed by
repeatedly running an experiment under a variety of conditions,
and incrementing the sample outcome counts for the appropriate
conditions. For example, the following code will produce a
conditional frequency distribution that encodes how often each
word type occurs, given the length of that word type:
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.tokenize import word_tokenize
>>> sent = "the the the dog dog some other words that we do not care about"
>>> cfdist = ConditionalFreqDist()
>>> for word in word_tokenize(sent):
... condition = len(word)
... cfdist[condition][word] += 1
An equivalent way to do this is with the initializer:
>>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent))
The frequency distribution for each condition is accessed using
the indexing operator:
>>> cfdist[3]
FreqDist({'the': 3, 'dog': 2, 'not': 1})
>>> cfdist[3].freq('the')
0.5
>>> cfdist[3]['dog']
2
When the indexing operator is used to access the frequency
distribution for a condition that has not been accessed before,
``ConditionalFreqDist`` creates a new empty FreqDist for that
condition.
"""
def __init__(self, cond_samples=None):
"""
Construct a new empty conditional frequency distribution. In
particular, the count for every sample, under every condition,
is zero.
:param cond_samples: The samples to initialize the conditional
frequency distribution with
:type cond_samples: Sequence of (condition, sample) tuples
"""
defaultdict.__init__(self, FreqDist)
if cond_samples:
for (cond, sample) in cond_samples:
self[cond][sample] += 1
def __reduce__(self):
kv_pairs = ((cond, self[cond]) for cond in self.conditions())
return (self.__class__, (), None, None, kv_pairs)
def conditions(self):
"""
Return a list of the conditions that have been accessed for
this ``ConditionalFreqDist``. Use the indexing operator to
access the frequency distribution for a given condition.
Note that the frequency distributions for some conditions
may contain zero sample outcomes.
:rtype: list
"""
return list(self.keys())
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this ``ConditionalFreqDist``.
:rtype: int
"""
return sum(fdist.N() for fdist in compat.itervalues(self))
def plot(self, *args, **kwargs):
"""
Plot the given samples from the conditional frequency distribution.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
try:
from matplotlib import pylab
except ImportError:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', sorted(self.conditions()))
title = _get_kwarg(kwargs, 'title', '')
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
for condition in conditions:
if cumulative:
freqs = list(self[condition]._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
legend_loc = 'lower right'
else:
freqs = [self[condition][sample] for sample in samples]
ylabel = "Counts"
legend_loc = 'upper right'
# percents = [f * 100 for f in freqs] only in ConditionalProbDist?
kwargs['label'] = "%s" % condition
pylab.plot(freqs, *args, **kwargs)
pylab.legend(loc=legend_loc)
pylab.grid(True, color="silver")
pylab.xticks(range(len(samples)), [compat.text_type(s) for s in samples], rotation=90)
if title:
pylab.title(title)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the conditional frequency distribution.
:param samples: The samples to plot
:type samples: list
:param conditions: The conditions to plot (default is all)
:type conditions: list
:param cumulative: A flag to specify whether the freqs are cumulative (default = False)
:type title: bool
"""
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', sorted(self.conditions()))
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
width = max(len("%s" % s) for s in samples)
freqs = dict()
for c in conditions:
if cumulative:
freqs[c] = list(self[c]._cumulative_frequencies(samples))
else:
freqs[c] = [self[c][sample] for sample in samples]
width = max(width, max(len("%d" % f) for f in freqs[c]))
condition_size = max(len("%s" % c) for c in conditions)
print(' ' * condition_size, end=' ')
for s in samples:
print("%*s" % (width, s), end=' ')
print()
for c in conditions:
print("%*s" % (condition_size, c), end=' ')
for f in freqs[c]:
print("%*d" % (width, f), end=' ')
print()
# Mathematical operators
def __add__(self, other):
"""
Add counts from two ConditionalFreqDists.
"""
if not isinstance(other, ConditionalFreqDist):
return NotImplemented
result = ConditionalFreqDist()
for cond in self.conditions():
newfreqdist = self[cond] + other[cond]
if newfreqdist:
result[cond] = newfreqdist
for cond in other.conditions():
if cond not in self.conditions():
for elem, count in other[cond].items():
if count > 0:
result[cond][elem] = count
return result
def __sub__(self, other):
"""
Subtract count, but keep only results with positive counts.
"""
if not isinstance(other, ConditionalFreqDist):
return NotImplemented
result = ConditionalFreqDist()
for cond in self.conditions():
newfreqdist = self[cond] - other[cond]
if newfreqdist:
result[cond] = newfreqdist
for cond in other.conditions():
if cond not in self.conditions():
for elem, count in other[cond].items():
if count < 0:
result[cond][elem] = 0 - count
return result
def __or__(self, other):
"""
Union is the maximum of value in either of the input counters.
"""
if not isinstance(other, ConditionalFreqDist):
return NotImplemented
result = ConditionalFreqDist()
for cond in self.conditions():
newfreqdist = self[cond] | other[cond]
if newfreqdist:
result[cond] = newfreqdist
for cond in other.conditions():
if cond not in self.conditions():
for elem, count in other[cond].items():
if count > 0:
result[cond][elem] = count
return result
def __and__(self, other):
"""
Intersection is the minimum of corresponding counts.
"""
if not isinstance(other, ConditionalFreqDist):
return NotImplemented
result = ConditionalFreqDist()
for cond in self.conditions():
newfreqdist = self[cond] & other[cond]
if newfreqdist:
result[cond] = newfreqdist
return result
# @total_ordering doesn't work here, since the class inherits from a builtin class
def __le__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types("<=", self, other)
return set(self.conditions()).issubset(other.conditions()) \
and all(self[c] <= other[c] for c in self.conditions())
def __lt__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types("<", self, other)
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types(">=", self, other)
return other <= self
def __gt__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types(">", self, other)
return other < self
def __repr__(self):
"""
Return a string representation of this ``ConditionalFreqDist``.
:rtype: str
"""
return '<ConditionalFreqDist with %d conditions>' % len(self)
@compat.python_2_unicode_compatible
class ConditionalProbDistI(dict):
"""
A collection of probability distributions for a single experiment
run under different conditions. Conditional probability
distributions are used to estimate the likelihood of each sample,
given the condition under which the experiment was run. For
example, a conditional probability distribution could be used to
estimate the probability of each word type in a document, given
the length of the word type. Formally, a conditional probability
distribution can be defined as a function that maps from each
condition to the ``ProbDist`` for the experiment under that
condition.
"""
def __init__(self):
raise NotImplementedError("Interfaces can't be instantiated")
def conditions(self):
"""
Return a list of the conditions that are represented by
this ``ConditionalProbDist``. Use the indexing operator to
access the probability distribution for a given condition.
:rtype: list
"""
return list(self.keys())
def __repr__(self):
"""
Return a string representation of this ``ConditionalProbDist``.
:rtype: str
"""
return '<%s with %d conditions>' % (type(self).__name__, len(self))
class ConditionalProbDist(ConditionalProbDistI):
"""
A conditional probability distribution modeling the experiments
that were used to generate a conditional frequency distribution.
A ConditionalProbDist is constructed from a
``ConditionalFreqDist`` and a ``ProbDist`` factory:
- The ``ConditionalFreqDist`` specifies the frequency
distribution for each condition.
- The ``ProbDist`` factory is a function that takes a
condition's frequency distribution, and returns its
probability distribution. A ``ProbDist`` class's name (such as
``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify
that class's constructor.
The first argument to the ``ProbDist`` factory is the frequency
distribution that it should model; and the remaining arguments are
specified by the ``factory_args`` parameter to the
``ConditionalProbDist`` constructor. For example, the following
code constructs a ``ConditionalProbDist``, where the probability
distribution for each condition is an ``ELEProbDist`` with 10 bins:
>>> from nltk.corpus import brown
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.probability import ConditionalProbDist, ELEProbDist
>>> cfdist = ConditionalFreqDist(brown.tagged_words()[:5000])
>>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
>>> cpdist['passed'].max()
'VBD'
>>> cpdist['passed'].prob('VBD')
0.423...
"""
def __init__(self, cfdist, probdist_factory,
*factory_args, **factory_kw_args):
"""
Construct a new conditional probability distribution, based on
the given conditional frequency distribution and ``ProbDist``
factory.
:type cfdist: ConditionalFreqDist
:param cfdist: The ``ConditionalFreqDist`` specifying the
frequency distribution for each condition.
:type probdist_factory: class or function
:param probdist_factory: The function or class that maps
a condition's frequency distribution to its probability
distribution. The function is called with the frequency
distribution as its first argument,
``factory_args`` as its remaining arguments, and
``factory_kw_args`` as keyword arguments.
:type factory_args: (any)
:param factory_args: Extra arguments for ``probdist_factory``.
These arguments are usually used to specify extra
properties for the probability distributions of individual
conditions, such as the number of bins they contain.
:type factory_kw_args: (any)
:param factory_kw_args: Extra keyword arguments for ``probdist_factory``.
"""
self._probdist_factory = probdist_factory
self._factory_args = factory_args
self._factory_kw_args = factory_kw_args
for condition in cfdist:
self[condition] = probdist_factory(cfdist[condition],
*factory_args, **factory_kw_args)
def __missing__(self, key):
self[key] = self._probdist_factory(FreqDist(),
*self._factory_args,
**self._factory_kw_args)
return self[key]
class DictionaryConditionalProbDist(ConditionalProbDistI):
"""
An alternative ConditionalProbDist that simply wraps a dictionary of
ProbDists rather than creating these from FreqDists.
"""
def __init__(self, probdist_dict):
"""
:param probdist_dict: a dictionary containing the probdists indexed
by the conditions
:type probdist_dict: dict any -> probdist
"""
self.update(probdist_dict)
def __missing__(self, key):
self[key] = DictionaryProbDist()
return self[key]
##//////////////////////////////////////////////////////
## Adding in log-space.
##//////////////////////////////////////////////////////
# If the difference is bigger than this, then just take the bigger one:
_ADD_LOGS_MAX_DIFF = math.log(1e-30, 2)
def add_logs(logx, logy):
"""
Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return
*log(x+y)*. Conceptually, this is the same as returning
``log(2**(logx)+2**(logy))``, but the actual implementation
avoids overflow errors that could result from direct computation.
"""
if (logx < logy + _ADD_LOGS_MAX_DIFF):
return logy
if (logy < logx + _ADD_LOGS_MAX_DIFF):
return logx
base = min(logx, logy)
return base + math.log(2**(logx-base) + 2**(logy-base), 2)
def sum_logs(logs):
return (reduce(add_logs, logs[1:], logs[0]) if len(logs) != 0 else _NINF)
##//////////////////////////////////////////////////////
## Probabilistic Mix-in
##//////////////////////////////////////////////////////
class ProbabilisticMixIn(object):
"""
A mix-in class to associate probabilities with other classes
(trees, rules, etc.). To use the ``ProbabilisticMixIn`` class,
define a new class that derives from an existing class and from
ProbabilisticMixIn. You will need to define a new constructor for
the new class, which explicitly calls the constructors of both its
parent classes. For example:
>>> from nltk.probability import ProbabilisticMixIn
>>> class A:
... def __init__(self, x, y): self.data = (x,y)
...
>>> class ProbabilisticA(A, ProbabilisticMixIn):
... def __init__(self, x, y, **prob_kwarg):
... A.__init__(self, x, y)
... ProbabilisticMixIn.__init__(self, **prob_kwarg)
See the documentation for the ProbabilisticMixIn
``constructor<__init__>`` for information about the arguments it
expects.
You should generally also redefine the string representation
methods, the comparison methods, and the hashing method.
"""
def __init__(self, **kwargs):
"""
Initialize this object's probability. This initializer should
be called by subclass constructors. ``prob`` should generally be
the first argument for those constructors.
:param prob: The probability associated with the object.
:type prob: float
:param logprob: The log of the probability associated with
the object.
:type logprob: float
"""
if 'prob' in kwargs:
if 'logprob' in kwargs:
raise TypeError('Must specify either prob or logprob '
'(not both)')
else:
ProbabilisticMixIn.set_prob(self, kwargs['prob'])
elif 'logprob' in kwargs:
ProbabilisticMixIn.set_logprob(self, kwargs['logprob'])
else:
self.__prob = self.__logprob = None
def set_prob(self, prob):
"""
Set the probability associated with this object to ``prob``.
:param prob: The new probability
:type prob: float
"""
self.__prob = prob
self.__logprob = None
def set_logprob(self, logprob):
"""
Set the log probability associated with this object to
``logprob``. I.e., set the probability associated with this
object to ``2**(logprob)``.
:param logprob: The new log probability
:type logprob: float
"""
self.__logprob = logprob
self.__prob = None
def prob(self):
"""
Return the probability associated with this object.
:rtype: float
"""
if self.__prob is None:
if self.__logprob is None: return None
self.__prob = 2**(self.__logprob)
return self.__prob
def logprob(self):
"""
Return ``log(p)``, where ``p`` is the probability associated
with this object.
:rtype: float
"""
if self.__logprob is None:
if self.__prob is None: return None
self.__logprob = math.log(self.__prob, 2)
return self.__logprob
class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
def set_prob(self, prob):
raise ValueError('%s is immutable' % self.__class__.__name__)
def set_logprob(self, prob):
raise ValueError('%s is immutable' % self.__class__.__name__)
## Helper function for processing keyword arguments
def _get_kwarg(kwargs, key, default):
if key in kwargs:
arg = kwargs[key]
del kwargs[key]
else:
arg = default
return arg
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _create_rand_fdist(numsamples, numoutcomes):
"""
Create a new frequency distribution, with random samples. The
samples are numbers from 1 to ``numsamples``, and are generated by
summing two numbers, each of which has a uniform distribution.
"""
import random
fdist = FreqDist()
for x in range(numoutcomes):
y = (random.randint(1, (1 + numsamples) // 2) +
random.randint(0, numsamples // 2))
fdist[y] += 1
return fdist
def _create_sum_pdist(numsamples):
"""
Return the true probability distribution for the experiment
``_create_rand_fdist(numsamples, x)``.
"""
fdist = FreqDist()
for x in range(1, (1 + numsamples) // 2 + 1):
for y in range(0, numsamples // 2 + 1):
fdist[x+y] += 1
return MLEProbDist(fdist)
def demo(numsamples=6, numoutcomes=500):
"""
A demonstration of frequency distributions and probability
distributions. This demonstration creates three frequency
distributions with, and uses them to sample a random process with
``numsamples`` samples. Each frequency distribution is sampled
``numoutcomes`` times. These three frequency distributions are
then used to build six probability distributions. Finally, the
probability estimates of these distributions are compared to the
actual probability of each sample.
:type numsamples: int
:param numsamples: The number of samples to use in each demo
frequency distributions.
:type numoutcomes: int
:param numoutcomes: The total number of outcomes for each
demo frequency distribution. These outcomes are divided into
``numsamples`` bins.
:rtype: None
"""
# Randomly sample a stochastic process three times.
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
# Use our samples to create probability distributions.
pdists = [
MLEProbDist(fdist1),
LidstoneProbDist(fdist1, 0.5, numsamples),
HeldoutProbDist(fdist1, fdist2, numsamples),
HeldoutProbDist(fdist2, fdist1, numsamples),
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
SimpleGoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1, 7),
_create_sum_pdist(numsamples),
]
# Find the probability of each sample.
vals = []
for n in range(1,numsamples+1):
vals.append(tuple([n, fdist1.freq(n)] +
[pdist.prob(n) for pdist in pdists]))
# Print the results in a formatted table.
print(('%d samples (1-%d); %d outcomes were sampled for each FreqDist' %
(numsamples, numsamples, numoutcomes)))
print('='*9*(len(pdists)+2))
FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual'
print(FORMATSTR % tuple(repr(pdist)[1:9] for pdist in pdists[:-1]))
print('-'*9*(len(pdists)+2))
FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f'
for val in vals:
print(FORMATSTR % val)
# Print the totals for each column (should all be 1.0)
zvals = list(zip(*vals))
sums = [sum(val) for val in zvals[1:]]
print('-'*9*(len(pdists)+2))
FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f'
print(FORMATSTR % tuple(sums))
print('='*9*(len(pdists)+2))
# Display the distributions themselves, if they're short enough.
if len("%s" % fdist1) < 70:
print(' fdist1: %s' % fdist1)
print(' fdist2: %s' % fdist2)
print(' fdist3: %s' % fdist3)
print()
print('Generating:')
for pdist in pdists:
fdist = FreqDist(pdist.generate() for i in range(5000))
print('%20s %s' % (pdist.__class__.__name__[:20], ("%s" % fdist)[:55]))
print()
def gt_demo():
from nltk import corpus
emma_words = corpus.gutenberg.words('austen-emma.txt')
fd = FreqDist(emma_words)
sgt = SimpleGoodTuringProbDist(fd)
print('%18s %8s %14s' \
% ("word", "freqency", "SimpleGoodTuring"))
fd_keys_sorted=(key for key, value in sorted(fd.items(), key=lambda item: item[1], reverse=True))
for key in fd_keys_sorted:
print('%18s %8d %14e' \
% (key, fd[key], sgt.prob(key)))
if __name__ == '__main__':
demo(6, 10)
demo(5, 5000)
gt_demo()
__all__ = ['ConditionalFreqDist', 'ConditionalProbDist',
'ConditionalProbDistI', 'CrossValidationProbDist',
'DictionaryConditionalProbDist', 'DictionaryProbDist', 'ELEProbDist',
'FreqDist', 'SimpleGoodTuringProbDist', 'HeldoutProbDist',
'ImmutableProbabilisticMixIn', 'LaplaceProbDist', 'LidstoneProbDist',
'MLEProbDist', 'MutableProbDist', 'KneserNeyProbDist', 'ProbDistI', 'ProbabilisticMixIn',
'UniformProbDist', 'WittenBellProbDist', 'add_logs',
'log_likelihood', 'sum_logs', 'entropy']
|
gpl-3.0
|
kirangonella/BuildingMachineLearningSystemsWithPython
|
ch12/image-classification.py
|
21
|
3109
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import mahotas as mh
import numpy as np
from glob import glob
from jug import TaskGenerator
# We need to use the `features` module from chapter 10.
from sys import path
path.append('../ch10')
# This is the jug-enabled version of the script ``figure18.py`` in Chapter 10
basedir = '../SimpleImageDataset/'
@TaskGenerator
def compute_texture(im):
'''Compute features for an image
Parameters
----------
im : str
filepath for image to process
Returns
-------
fs : ndarray
1-D array of features
'''
from features import texture
imc = mh.imread(im)
return texture(mh.colors.rgb2grey(imc))
@TaskGenerator
def chist(fname):
from features import color_histogram
im = mh.imread(fname)
return color_histogram(im)
@TaskGenerator
def compute_lbp(fname):
from mahotas.features import lbp
imc = mh.imread(fname)
im = mh.colors.rgb2grey(imc)
return lbp(im, radius=8, points=6)
@TaskGenerator
def accuracy(features, labels):
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn import cross_validation
# We use logistic regression because it is very fast.
# Feel free to experiment with other classifiers
clf = Pipeline([('preproc', StandardScaler()),
('classifier', LogisticRegression())])
cv = cross_validation.LeaveOneOut(len(features))
scores = cross_validation.cross_val_score(
clf, features, labels, cv=cv)
return scores.mean()
@TaskGenerator
def print_results(scores):
with open('results.image.txt', 'w') as output:
for k,v in scores:
output.write('Accuracy (LOO x-val) with Logistic Regression [{0}]: {1:.1%}\n'.format(
k, v.mean()))
to_array = TaskGenerator(np.array)
hstack = TaskGenerator(np.hstack)
haralicks = []
chists = []
lbps = []
labels = []
# Use glob to get all the images
images = glob('{0}/*.jpg'.format(basedir))
for fname in sorted(images):
haralicks.append(compute_texture(fname))
chists.append(chist(fname))
lbps.append(compute_lbp(fname))
labels.append(fname[:-len('00.jpg')]) # The class is encoded in the filename as xxxx00.jpg
haralicks = to_array(haralicks)
chists = to_array(chists)
lbps = to_array(lbps)
labels = to_array(labels)
scores_base = accuracy(haralicks, labels)
scores_chist = accuracy(chists, labels)
scores_lbps = accuracy(lbps, labels)
combined = hstack([chists, haralicks])
scores_combined = accuracy(combined, labels)
combined_all = hstack([chists, haralicks, lbps])
scores_combined_all = accuracy(combined_all, labels)
print_results([
('base', scores_base),
('chists', scores_chist),
('lbps', scores_lbps),
('combined' , scores_combined),
('combined_all' , scores_combined_all),
])
|
mit
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/dask/dataframe/tests/test_optimize_dataframe.py
|
4
|
1688
|
import pytest
from operator import getitem
from toolz import merge
import dask
from dask.dataframe.io import dataframe_from_ctable
import dask.dataframe as dd
import pandas as pd
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
dfs = list(dsk.values())
def test_column_optimizations_with_bcolz_and_rewrite():
bcolz = pytest.importorskip('bcolz')
bc = bcolz.ctable([[1, 2, 3], [10, 20, 30]], names=['a', 'b'])
for cols in [None, 'abc', ['abc']]:
dsk2 = merge(dict((('x', i),
(dataframe_from_ctable, bc, slice(0, 2), cols, {}))
for i in [1, 2, 3]),
dict((('y', i),
(getitem, ('x', i), ['a', 'b']))
for i in [1, 2, 3]))
expected = dict((('y', i), (dataframe_from_ctable,
bc, slice(0, 2), ['a', 'b'], {}))
for i in [1, 2, 3])
result = dd.optimize(dsk2, [('y', i) for i in [1, 2, 3]])
assert result == expected
def test_fuse_ave_width():
df = pd.DataFrame({'x': range(10)})
df = dd.from_pandas(df, npartitions=5)
s = ((df.x + 1) + (df.x + 2))
with dask.config.set(fuse_ave_width=4):
a = s.__dask_optimize__(s.dask, s.__dask_keys__())
b = s.__dask_optimize__(s.dask, s.__dask_keys__())
assert len(a) < len(b)
assert len(a) <= 15
|
gpl-3.0
|
Averroes/statsmodels
|
statsmodels/datasets/statecrime/data.py
|
25
|
3128
|
#! /usr/bin/env python
"""Statewide Crime Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Public domain."""
TITLE = """Statewide Crime Data 2009"""
SOURCE = """
All data is for 2009 and was obtained from the American Statistical Abstracts except as indicated below.
"""
DESCRSHORT = """State crime data 2009"""
DESCRLONG = DESCRSHORT
#suggested notes
NOTE = """::
Number of observations: 51
Number of variables: 8
Variable name definitions:
state
All 50 states plus DC.
violent
Rate of violent crimes / 100,000 population. Includes murder, forcible
rape, robbery, and aggravated assault. Numbers for Illinois and
Minnesota do not include forcible rapes. Footnote included with the
American Statistical Abstract table reads:
"The data collection methodology for the offense of forcible
rape used by the Illinois and the Minnesota state Uniform Crime
Reporting (UCR) Programs (with the exception of Rockford, Illinois,
and Minneapolis and St. Paul, Minnesota) does not comply with
national UCR guidelines. Consequently, their state figures for
forcible rape and violent crime (of which forcible rape is a part)
are not published in this table."
murder
Rate of murders / 100,000 population.
hs_grad
Precent of population having graduated from high school or higher.
poverty
% of individuals below the poverty line
white
Percent of population that is one race - white only. From 2009 American
Community Survey
single
Calculated from 2009 1-year American Community Survey obtained obtained
from Census. Variable is Male householder, no wife present, family
household combined with Female household, no husband prsent, family
household, divided by the total number of Family households.
urban
% of population in Urbanized Areas as of 2010 Census. Urbanized
Areas are area of 50,000 or more people."""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the statecrime data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray(data, endog_idx=2, exog_idx=[7, 4, 3, 5],
dtype=float)
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=2, exog_idx=[7,4,3,5],
dtype=float, index_idx=0)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/statecrime.csv', 'rb'),
delimiter=",", names=True, dtype=None)
return data
|
bsd-3-clause
|
mojoboss/scikit-learn
|
sklearn/ensemble/tests/test_bagging.py
|
127
|
25365
|
"""
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstraping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstraping features may generate dupplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
|
bsd-3-clause
|
jgdo/arips_ros
|
door_handle_detection/scripts/train.py
|
1
|
5591
|
import glob
import os
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.keras import datasets, layers, models, activations, optimizers
import matplotlib.pyplot as plt
from tensorflow.keras.utils import get_custom_objects
from annot_utils import *
import dataset
from dataset import loadAllData, denormalizeCoords
import heatmap_model
all_images, all_labels = loadAllData()
train_gen = dataset.DoorDataGenerator(all_images, all_labels, train=True)
test_gen = dataset.DoorDataGenerator(all_images, all_labels, train=False, shuffle=False)
model_path = "mymodel"
"""
# my_activation = layers.LeakyReLU
my_activation = layers.ReLU
# TODO load dataset
model = models.Sequential()
model.add(layers.Conv2D(32, (5, 5), use_bias=True, input_shape=(240, 320, 5)))
model.add(layers.BatchNormalization())
model.add(my_activation())
#model.add(layers.Dropout(0.05))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), use_bias=True))
model.add(layers.BatchNormalization())
model.add(my_activation())
#model.add(layers.Dropout(0.1))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), use_bias=True))
model.add(layers.BatchNormalization())
model.add(my_activation())
#model.add(layers.Dropout(0.15))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(256, (3, 3), use_bias=True))
model.add(layers.BatchNormalization())
model.add(my_activation())
model.add(layers.Conv2D(128, (3, 3), use_bias=True))
model.add(layers.BatchNormalization())
model.add(my_activation())
model.add(layers.Conv2D(64, (3, 3), use_bias=True))
model.add(layers.BatchNormalization())
model.add(my_activation())
model.add(layers.Conv2D(32, (3, 3), use_bias=True))
model.add(layers.BatchNormalization())
model.add(my_activation())
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(5, activation=None))
def my_loss_function(y_true, y_pred):
squared_difference = tf.square(y_true[:, 0:4] - y_pred[:, 0:4])
# print("squared_difference: ", squared_difference)
position_loss = tf.reduce_mean(squared_difference, axis=-1) * y_true[:, 4]
#print("position_loss: ", position_loss)
loss = position_loss + tf.square((y_true[:, 4]*2-1) - y_pred[:, 4]) * 0.25
#print("loss: ", loss)
return loss
get_custom_objects().update({"my_loss_function": my_loss_function})
"""
model = heatmap_model.DoorHandleHeatmapModel()
model.build((1, 240, 320, 3))
model.summary()
model.compile(optimizer=optimizers.Adam(learning_rate=0.0005),
loss='mse')
if 0:
model = models.load_model(model_path)
if 1:
try:
my_callbacks = [
tf.keras.callbacks.EarlyStopping(patience=20),
]
history = model.fit(train_gen, epochs=200,
validation_data=test_gen, callbacks=my_callbacks)
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
# plt.ylim([0.0, 1])
plt.legend(loc='lower right')
plt.show()
except KeyboardInterrupt:
pass
# model.save(model_path, ca)
def predictImage(path):
x = np.linspace(-1, 1, 320)
y = np.linspace(-1, 1, 240)
xv, yv = np.meshgrid(x, y)
xv = np.expand_dims(xv, axis=2)
yv = np.expand_dims(yv, axis=2)
img = cv2.imread(path)
img_input = np.concatenate([img.astype(np.float32) / 255.0, xv, yv], axis=2)
img_input = np.expand_dims(img_input, axis=0)
labels = model.predict(img_input)[0]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if labels[4] > 0.5:
cv2.circle(img, (denormalizeCoords(labels[0], 320), (denormalizeCoords(labels[1], 240))), 3, (255, 0, 0), -1)
cv2.circle(img, (denormalizeCoords(labels[2], 320), (denormalizeCoords(labels[3], 240))), 3, (0, 255, 0), -1)
else:
cv2.circle(img, (img.shape[1] // 2, img.shape[0] // 2), 30, (255, 255, 0), 1)
plt.imshow(img)
plt.show()
# predictImage('/home/jgdo/frame0000.jpg')
"""
test_gen.get_with_indices = True
best_images = np.ndarray((0, 240, 320, 5), dtype=np.float32)
best_indices = []
best_losses = []
for i_b in range(len(test_gen)):
images, gt_labels, indices = test_gen[i_b]
pred_labels = model.predict(images)
loss = my_loss_function(gt_labels, pred_labels).numpy()
best_images = np.append(best_images, images, axis=0)
best_indices += indices.tolist()
best_losses = np.append(best_losses, loss, axis=0)
assert len(best_images) == len(best_indices)
assert len(best_losses) == len(best_indices)
best_indices.sort(key=lambda idx: best_losses[idx])
best_indices.reverse()
for best_index in range(20):
idx = best_indices[best_index]
labels = model.predict(best_images[idx:idx + 1])[0]
img = best_images[idx, :, :, 0:3].copy()
img = (img[:, :, ::-1] * 255).astype(np.uint8)
if labels[4] > 0.5:
cv2.circle(img, (denormalizeCoords(labels[0], 320), (denormalizeCoords(labels[1], 240))), 3, (255, 0, 0), -1)
cv2.circle(img, (denormalizeCoords(labels[2], 320), (denormalizeCoords(labels[3], 240))), 3, (0, 255, 0), -1)
else:
cv2.circle(img, (img.shape[1] // 2, img.shape[0] // 2), 30, (255, 255, 0), 1)
plt.imshow(img)
plt.show()
"""
if 1:
test_batch_images, test_batch_labels = test_gen[0]
for test_index in range(len(test_batch_images)):
labels = model.predict(test_batch_images[test_index:test_index+1])[0]
dataset.showImageLabels(test_batch_images[test_index], labels, False)
|
gpl-2.0
|
tgbugs/pyontutils
|
ilxutils/tools.py
|
1
|
10210
|
import json
import pickle
import re
import pandas as pd
from pathlib import Path as p
import pprint
from subprocess import call
from sys import exit
import csv
from typing import Union, Dict, List
import networkx as nx
def sort_list_of_tuples_by_string(list_of_tuples:List[tuple], string_index:int) -> List[tuple]:
return sorted(list_of_tuples, key=lambda x: (str(x[string_index]).strip() in ['None', ''], x[string_index].lower()))
def class_hierarchy(dag:List[tuple], descending=True) -> list:
''' Topological Sorting
Args:
dag: directed acyclic graph that is a mappings of list of tuples with len of 2
Returns:
Ordered list of single entities in topological ordering of choice
Examples:
>>> class_hierarchy([(1, 2), (2, 3)])
[3, 2, 1]
'''
dag = nx.DiGraph(dag)
dag = list(nx.topological_sort(dag))
if descending:
dag = list(reversed(dag))
return dag
def clean(self, string, clean_scale:int=0):
if clean_scale == 0:
return string
elif clean_scale == 1:
return string.lower().strip()
elif clean_scale == 2:
return ' '.join(string_profiler(string))
elif clean_scale == 3:
return ' '.join(string_profiler(string)).replace('obsolete', '').strip()
def string_profiler(
string: str,
start_delimiter: str='(',
end_delimiter: str=')',
remove: bool=True,
keep_delimiter: bool = True,
) -> List[str]:
'''
Seperates strings fragements into list based on the start and end delimiters
Args:
string: complete string you want to be broken up based on start and stop delimiters given
start_delimiter: delimiter element to start
end_delimiter: delimiter elemtent to end
remove: decide whether or not to keep strings inside the delimiters
Returns:
List[str]: list of strings that are split at start and end delimiters given and whether
or not you want to remove the string inside the delimiters
Tests:
long = '(life is is good) love world "(blah) blah" "here I am" once again "yes" blah '
print(string_profiler(long))
null = ''
print(string_profiler(null))
short = '(life love) yes(and much more)'
print(string_profiler(short))
short = 'yes "life love"'
print(string_profiler(short))
'''
outer_index = 0 # stepper for outer delimier string elements
inner_index = 0 # stepper for inner delimier string elements
curr_index = 0 # actual index of the current element in the string
string_list = [] # string broken up into individual elements whenever a start and end delimiter is hit
outer_string = '' # string tracked while outside the delimiters
inner_string = '' # string tracked while inside the delimiters
for outer_index in range(len(string)):
# Actual pointer position (inner delimiter counter + outer delimiter counter)
curr_index = inner_index + outer_index
# Close once acutal index is at the end
# NOTE: outer_index will keep going till end regardless of hitting a delimiter and adding to inner stepper.
if curr_index == len(string): break
### DELIMITER HIT ###
if string[curr_index] == start_delimiter:
# If we his a delimiter, collect the string previous to that as an element; flush
if outer_string:
# Option: .extend(outer_string.strip().split()) | If you want every word seperate. Maybe an option?
string_list.append(outer_string.strip())
outer_string = ''
for j in range(curr_index+1, len(string)):
# Stepper that is pushed while in inner delimiter string.
inner_index += 1
# Once we his the end delimiter, stop iterating through the inner delimiter string
if string[j] == end_delimiter: break
# String inside delimiters
inner_string += string[j]
# If you want the string inside the delimiters
if not remove:
if keep_delimiter:
inner_string = start_delimiter + inner_string + end_delimiter
string_list.append(inner_string)
# inner delimiter string restart
inner_string = ''
# String outside of the delimiters
else: outer_string += string[curr_index]
# End delimiter is either nested or not the real target; should ignore
if string[curr_index] == end_delimiter:
if string_list and outer_string:
string_list[-1] += outer_string
outer_string = ''
# In case of not hiting a delimiter at the end of the string, collect the remaining outer delimiter string
# Option: .extend(outer_string.strip().split()) | If you want every word seperate. Maybe an option?
if outer_string: string_list.append(outer_string.strip())
return string_list
pp = pprint.PrettyPrinter(indent=4).pprint
class SetEncoder(json.JSONEncoder):
''' Custom encoder to allow json to convert any sets in nested data to become lists '''
def default(self, obj):
if isinstance(obj, set):
return sorted(list(obj))
return json.JSONEncoder.default(self, obj)
def is_file(path):
if p(path).is_file():
return True
return False
def is_dict(path):
if p(path).is_dir():
return True
return False
def tilda(obj):
if isinstance(obj, list):
return [str(p(o).expanduser()) if isinstance(o, str) else o for o in obj]
elif isinstance(obj, str):
return str(p(obj).expanduser())
else:
return obj
def fix_path(path):
def __fix_path(path):
if not isinstance(path, str):
return path
elif '~' == path[0]:
tilda_fixed_path = tilda(path)
if is_file(tilda_fixed_path):
return tilda_fixed_path
else:
exit(path, ': does not exit.')
elif is_file(p.home() / path):
return str(p().home() / path)
elif is_dict(p.home() / path):
return str(p().home() / path)
else:
return path
if isinstance(path, str):
return __fix_path(path)
elif isinstance(path, list):
return [__fix_path(p) for p in path]
else:
return path
def compare_strings(s1, s2):
s1, s2 = degrade(s1), degrade(s2)
if s1 != s2:
return False
return True
def mydecoder(string):
try:
string.encode('ascii')
return string
except:
ustring = string.encode('utf-8')
string = re.sub(b"\xe2\x80\x90", b"-", ustring)
return string.decode('utf-8')
def __degrade(sub, var):
def helper(s):
s = str(s)
s = mydecoder(s)
s = re.sub(sub, "", s).lower().strip()
if not s:
return None
return s
if isinstance(var, list):
return [helper(v) if v else v for v in var]
else:
if var:
return helper(var)
else:
return None
def light_degrade(var):
sub = "\(|\)|'|"|\'|\""
return __degrade(sub=sub, var=var)
def degrade(var):
sub = "\(|\)|'|"|\'|\"|-|,|_|:|\.| |;|#|>|<|`|~|@"
return __degrade(sub=sub, var=var)
def degrade_hash(mylist):
if not isinstance(var, list):
sys.exit('degrade_hash :: intended for lists only')
local_hash = {}
return {v: degraded(v) for v in mylist}
def namecheck(infilename):
if infilename == str(p.home() / 'Dropbox'):
sys.exit('DONT OVERWRITE THE DROPBOX')
def open_txt(infilename):
namecheck(infilename)
infilename = str(infilename)
if '.txt' not in infilename:
infilename += '.txt'
infilename = fix_path(infilename)
with open(infilename, 'r') as infile:
output = infile.read().strip()
infile.close()
return output
def create_txt(data, output):
namecheck(output)
output = str(output)
if '.txt' not in output:
output += '.txt'
output = fix_path(output)
with open(output, 'w') as outfile:
outfile.write(data)
outfile.close()
def create_json(data, output):
namecheck(output)
output = str(output)
if '.json' not in output:
output += '.json'
output = fix_path(output)
with open(output, 'w') as outfile:
json.dump(data, outfile, indent=4, cls=SetEncoder)
outfile.close()
def open_json(infile):
namecheck(infile)
infile = str(infile)
if '.json' not in infile:
infile += '.json'
infile = fix_path(infile)
with open(infile, 'r') as _infile:
return json.load(_infile)
def create_pickle(data, output):
namecheck(output)
output = str(output)
if '.pickle' not in output:
output += '.pickle'
output = fix_path(output)
with open(output, 'wb') as outfile:
pickle.dump(data, outfile)
outfile.close()
def open_pickle(infile):
namecheck(infile)
infile = str(infile)
if '.pickle' not in infile:
infile += '.pickle'
infile = fix_path(infile)
with open(infile, 'rb') as _infile:
output = pickle.load(_infile)
_infile.close()
return output
def create_csv(rows, infile):
namecheck(infile)
infile = str(infile)
if '.csv' not in infile:
infile += '.csv'
infile = fix_path(infile)
with open(infile, 'wb') as csvfile:
filewriter = csv.writer(csvfile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL,)
for row in rows:
filewriter.writerow(row)
csvfile.close()
def prettify_ontodiff_json(output):
namecheck(output)
output = str(output) # shell fixes output path itself
if '.json' not in output:
output += '.json'
shellcommand = 'ex -s +\'g/\[[\ \\n]\+"/j4\' -cwq ' + output
if call(shellcommand, shell=True) == 1:
print('Could not prettify the json file')
else:
print('Prettify Complete For:', output)
|
mit
|
procoder317/scikit-learn
|
examples/cluster/plot_mini_batch_kmeans.py
|
265
|
4081
|
"""
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
|
bsd-3-clause
|
jaidevd/scikit-learn
|
sklearn/gaussian_process/tests/test_gpc.py
|
49
|
6016
|
"""Testing for Gaussian process classification """
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils.testing import (assert_true, assert_greater,
assert_almost_equal, assert_array_equal)
def f(x):
return np.sin(x)
X = np.atleast_2d(np.linspace(0, 10, 30)).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = np.array(f(X).ravel() > 0, dtype=int)
fX = f(X).ravel()
y_mc = np.empty(y.shape, dtype=int) # multi-class
y_mc[fX < -0.35] = 0
y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
y_mc[fX > 0.35] = 2
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=0.1), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))]
def test_predict_consistent():
# Check binary predict decision has also predicted probability above 0.5.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X),
gpc.predict_proba(X)[:, 1] >= 0.5)
def test_lml_improving():
# Test that hyperparameter-tuning improves log-marginal likelihood.
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
# Test that lml of optimized kernel is stored correctly.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_almost_equal(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(), 7)
def test_converged_to_local_maximum():
# Test that we are in local maximum after hyperparameter-optimization.
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 0]) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 1])))
def test_lml_gradient():
# Compare analytic and numeric gradient of log marginal likelihood.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpc.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the log marginal likelihood of the chosen theta.
n_samples, n_features = 25, 2
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1e-3] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features)
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessClassifier(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_custom_optimizer():
# Test that GPC can use externally defined optimizers.
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
gpc.fit(X, y_mc)
# Checks that optimizer improved marginal likelihood
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_multi_class():
# Test GPC for multi-class classification problems.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
assert_almost_equal(y_prob.sum(1), 1)
y_pred = gpc.predict(X2)
assert_array_equal(np.argmax(y_prob, 1), y_pred)
def test_multi_class_n_jobs():
# Test that multi-class GPC produces identical results with n_jobs>1.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
gpc_2.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
y_prob_2 = gpc_2.predict_proba(X2)
assert_almost_equal(y_prob, y_prob_2)
|
bsd-3-clause
|
rroart/stockstat
|
python/cli/macd.py
|
1
|
10898
|
import talib as ta
import pandas as pd
import pdutils as pdu
import myutils as my
import const
#import myutils as my
doprint = False
class MACD:
def __init__(self, stockdata):
self.count = 0;
self.stockdata = stockdata
def title(self):
return "MACD"
def names(self):
return ["macd", "signal", "diff"]
def getmacd(self, m):
#print("mmm", type(m))
#print(len(m))
#print(type(m[0]))
l = len(m) / 2
# print("hei\n")
# print(l)
# print("\nhei2\n")
# m
c = 1
retlist1 = m[0];
retlist2 = m[1];
retlist3 = m[2];
#for i in range(0, l):
# elem = m[i,]
# first = elem[1]
# second = elem[2]
# if isna(first) and isna(second):
# retlist1[c] = first
# retlist2[c] = second
# retlist3[c] = first - second
# c = c + 1
#print(unlist(retlist1))
#print("")
#print(unlist(retlist2))
#print("")
#print(unlist(retlist3))
return([retlist1, retlist2, retlist3])
def calculate(self, myma):
# print(myma)
# print("bla\n")
#myma = my.fixna(myma)
#print("bbbb")
#print(myma)
#print(len(myma))
l = myma[0]
#llow = myma[1]
#lhigh = myma[2]
#print("len",len(l))
if len(l) < 40:
return(None)
scalebeginning100 = 0
# if scalebeginning100 == 0:
# this does not matter?
# myma = fixpercent(myma)
# print(myma)
maType = 'EMA'
fast = 12
slow = 26
sig = 9
#m = ta.MACD(myma, nFast=fast, nSlow=slow, nSig=sig, maType = maType, percent = False )
#print((myma)
if doprint:
print("l1",l.values)
if not l.isnull().all():
m = ta.MACD(l)
if doprint:
print("hh", m[0].values)
else:
return None
#m = (pd.Series([np.NaN]), pd.Series([np.NaN]), pd.Series([np.NaN]))
#print(type(m))
#print(m.values)
#m = ta.MACD(myma, fast, slow, sig)
#print(type(m))
#print(m)
#if True:
# import time
# time.sleep(15)
# print(m)
# print(m)
# print("\ngrr\n")
lses = self.getmacd(m)
l = len(myma)
#print(myma)
#print(m)
return(lses)
def getmomhist(self, myma, deltadays):
#print(type(myma))
#print(myma.values)
lses = self.calculate(myma)
if lses is None:
print("null")
return(None)
retl = [0, 0, 0, 0, 0, 0]
ls1 = lses[0]
ls2 = lses[1]
ls3 = lses[2]
#print(type(ls1))
doprint = None
if doprint:
print(ls1.values)
print(ls2.values)
print(ls3.values)
#print(type(ls1))
#rint(ls1.keys())
keys1 = ls1.keys()
keys2 = ls2.keys()
keys3 = ls3.keys()
last1 = len(ls1) - 1
last2 = len(ls2) - 1
last3 = len(ls3) - 1
r = keys1[last1]
retl[0] = ls1[keys1[last1]]
retl[1] = ls2[keys2[last2]]
retl[2] = ls3[keys3[last3]]
delta = deltadays - 1
prevs1 = last1 - delta
prevs2 = last2 - delta
prevs3 = last3 - delta
retl[3] = (ls1[keys1[last1]] - ls1[keys1[prevs1]])/delta
retl[4] = (ls2[keys2[last2]] - ls2[keys2[prevs2]])/delta
retl[5] = (ls3[keys3[last3]] - ls3[keys3[prevs3]])/delta
#print(mydf.id)
# if mydf.id == 'VXAZN':
#print('vxazn')
#print(histc.values)
#print(retl)
#print(retl[0])
return(retl)
def dfextend(self, df, period, periodtext, sort, interpolate = True, rebase = False, deltadays = 3, reverse = False, interpolation = 'linear'):
dateset = set(self.stockdata.listdates)
momlist = []
signlist = []
histlist = []
momdlist = []
histdlist = []
sigdlist = []
macd2list = []
sign2list = []
hist2list = []
for mydf in df.itertuples():
#print(type(listid))
el = next(x for x in self.stockdata.listid if x.id.iloc[0] == mydf.id)
#print(type(el))
eldateset = set(el.date.values)
aset = dateset - eldateset
emptydf = pd.DataFrame(data = None, columns = el.columns)
#print("l", len(emptydf))
#print("aset", aset)
for x in aset:
emptydf = emptydf.append({ 'date' : x }, ignore_index=True)
#print("empty0", len(el), len(emptydf), len(aset))
el = el.append(emptydf)
#print("empty", len(el), len(emptydf), len(aset))
#print("eld", len(eld), eld.values)
#for x in stockdata.listdates:
# #print("lll", len(el.date == x))
# if len(el.date == x) == 0:
# print("xxx", x)
#el2 = stockdata.stocks[stockdata.stocks.id == mydf.id]
#print("x", type(el), len(el))
#print(type(el2), len(el2))
el = el.sort_values(by='date', ascending = 1)
myc = pdu.getonedfvalue(el, period)
dateslen = len(self.stockdata.listdates)
myclen = len(myc)
mycoffset = dateslen - myclen
mycorig = myc
#print(type(myc))
#print(myclen, headskipmacd)
#print(type(headskipmacd))
#myc = myc.head(n=(myclen-headskipmacd))
#myc = myc.tail(n=macddays)
myc = myc.iloc[0 : self.stockdata.dates.startindex + 1 - mycoffset]
#print(type(myc))
if rebase:
if periodtext == "Price" or periodtext == "Index":
#print("myc")
#print(type(myc))
#print(myc.values[0])
first = myc.values[0]
#print(first)
#print(100/first)
#print(myc.values)
myc = myc * (100 / first)
#print(myc.values)
#print("myc2")
#print(mydf.id)
global doprint
doprint = mydf.id == '1301162' or mydf.id == '3SUR'
#doprint = doprint
#rsi.doprint = doprint
if doprint:
print(type(myc))
print(myc.values)
print(mycorig.values)
print(len(myc.values))
print(len(self.stockdata.listdates))
print(myclen)
print("iloc",self.stockdata.dates.startindex,self.stockdata.dates.endindex)
print("iloc",self.stockdata.listdates[self.stockdata.dates.startindex],self.stockdata.listdates[self.stockdata.dates.endindex])
#if myclen != len(stockdata.listdates):
# print("error", len(stockdata.listdates),myclen)
#else:
# print("ok")
if periodtext == "Price" or periodtext == "Index":
myc = my.fixzero2(myc)
if interpolate:
myc = my.fixna(myc, interpolation)
#myc = myc.interpolate(method='linear')
momhist = self.getmomhist([myc, None, None], deltadays)
#print(type(momhist))
#print(len(momhist))
#print(momhist.keys())
#print(type(momhist))
#print(len(momhist))
#print(mom)
if doprint:
print("monh", momhist)
l = pdu.listperiod2(mydf, period)
print(l)
if not momhist is None:
l = pdu.listperiod2(mydf, period)
momlist.append(momhist[0])
signlist.append(momhist[1])
histlist.append(momhist[2])
momdlist.append(momhist[3])
sigdlist.append(momhist[4])
histdlist.append(momhist[5])
macd2list.append(momhist[0]/l)
sign2list.append(momhist[1]/l)
hist2list.append(momhist[2]/l)
else:
momlist.append(None)
signlist.append(None)
histlist.append(None)
momdlist.append(None)
sigdlist.append(None)
histdlist.append(None)
macd2list.append(None)
sign2list.append(None)
hist2list.append(None)
#headskipmacd = headskipmacd + tablemoveintervaldays
momc = momlist
signc = signlist
histc = histlist
#momdc = momdlist
#histdc = histdlist
#print('momlist')
#print(momlist)
#print(type(momlist))
df['momc'] = pd.Series(data = momlist, name = 'momc', index = df.index)
df['histc'] = pd.Series(data = histlist, name = 'histc', index = df.index)
df['signc'] = pd.Series(data = signlist, name = 'signc', index = df.index)
df['momdc'] = pd.Series(data = momdlist, name = 'momdc', index = df.index)
df['sigdc'] = pd.Series(data = sigdlist, name = 'sigdc', index = df.index)
df['histdc'] = pd.Series(data = histdlist, name = 'histdc', index = df.index)
df['macd2'] = pd.Series(data = macd2list, name = 'macd2', index = df.index)
df['sign2'] = pd.Series(data = sign2list, name = 'sign2', index = df.index)
df['hist2'] = pd.Series(data = hist2list, name = 'hist2', index = df.index)
#print(df.name)
#print(df.momc)
if sort == const.HIST:
if reverse:
df = df.sort_values(by='histc', ascending = 0)
else:
df = df.sort_values(by='histc', ascending = 1)
if sort == const.MACD:
if reverse:
df = df.sort_values(by='momc', ascending = 0)
else:
df = df.sort_values(by='momc', ascending = 1)
if sort == const.SIGN:
if reverse:
df = df.sort_values(by='signc', ascending = 0)
else:
df = df.sort_values(by='signc', ascending = 1)
if sort == const.MACD2:
if reverse:
df = df.sort_values(by='macd2', ascending = 0)
else:
df = df.sort_values(by='macd2', ascending = 1)
if sort == const.SIGN2:
if reverse:
df = df.sort_values(by='sign2', ascending = 0)
else:
df = df.sort_values(by='sign2', ascending = 1)
if sort == const.HIST2:
if reverse:
df = df.sort_values(by='hist2', ascending = 0)
else:
df = df.sort_values(by='hist2', ascending = 1)
return df
def titles(self):
return [ "macd", "sign", "hist", "macdd", "signd", "histd", "macdb", "signb", "histb" ]
def values(self, df, i):
return [ df.momc.iloc[i],
df.signc.iloc[i],
df.histc.iloc[i],
df.momdc.iloc[i],
df.sigdc.iloc[i],
df.histdc.iloc[i],
df.macd2.iloc[i],
df.sign2.iloc[i],
df.hist2.iloc[i] ]
def formats(self):
return [ "{:.2f}", "{:.2f}", "{:.2f}", "{:.4f}", "{:.4f}", "{:.4f}", "{:.4f}", "{:.4f}", "{:.4f}" ]
|
agpl-3.0
|
architecture-building-systems/CEAforArcGIS
|
cea/technologies/network_layout/connectivity_potential.py
|
2
|
17538
|
"""
This script uses libraries in shapely to create connections from
a series of points (buildings) to the closest street
"""
import os
from geopandas import GeoDataFrame as gdf
from shapely.geometry import Point, LineString, MultiPoint, box
from shapely.ops import split, linemerge, snap
import cea.config
import cea.inputlocator
from cea.constants import SHAPEFILE_TOLERANCE, SNAP_TOLERANCE
from cea.utilities.standardize_coordinates import get_projected_coordinate_system, get_geographic_coordinate_system
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2017, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca", "Malcolm Kesson", "Mattijn"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
def compute_intersections(lines, crs):
import itertools
inters = []
for line1, line2 in itertools.combinations(lines, 2):
if line1.intersects(line2):
inter = line1.intersection(line2)
if "Point" == inter.type:
inters.append(inter)
elif "MultiPoint" == inter.type:
inters.extend([pt for pt in inter])
elif "MultiLineString" == inter.type:
multiLine = [line for line in inter]
first_coords = multiLine[0].coords[0]
last_coords = multiLine[len(multiLine) - 1].coords[1]
inters.append(Point(first_coords[0], first_coords[1]))
inters.append(Point(last_coords[0], last_coords[1]))
elif "GeometryCollection" == inter.type:
for geom in inter:
if "Point" == geom.type:
inters.append(geom)
elif "MultiPoint" == geom.type:
inters.extend([pt for pt in geom])
elif "MultiLineString" == geom.type:
multiLine = [line for line in geom]
first_coords = multiLine[0].coords[0]
last_coords = multiLine[len(multiLine) - 1].coords[1]
inters.append(Point(first_coords[0], first_coords[1]))
inters.append(Point(last_coords[0], last_coords[1]))
geometry = inters
df = gdf(geometry=geometry, crs=crs)
return df
def computer_end_points(lines, crs):
all_points = []
for line in lines:
for i in [0, -1]: # start and end point
all_points.append(line.coords[i])
unique_points = set(all_points)
endpts = [Point(p) for p in unique_points]
df = gdf(geometry=endpts, crs=crs)
return df
def nearest_neighbor_within(others, point, max_distance):
"""Find nearest point among others up to a maximum distance.
Args:
others: a list of Points or a MultiPoint
point: a Point
max_distance: maximum distance to search for the nearest neighbor
Returns:
A shapely Point if one is within max_distance, None otherwise
"""
search_region = point.buffer(max_distance)
interesting_points = search_region.intersection(MultiPoint(others))
if not interesting_points:
closest_point = None
elif isinstance(interesting_points, Point):
closest_point = interesting_points
else:
distances = [point.distance(ip) for ip in interesting_points
if point.distance(ip) > 0]
closest_point = interesting_points[distances.index(min(distances))]
return closest_point
def find_isolated_endpoints(lines):
"""Find endpoints of lines that don't touch another line.
Args:
lines: a list of LineStrings or a MultiLineString
Returns:
A list of line end Points that don't touch any other line of lines
"""
isolated_endpoints = []
for i, line in enumerate(lines):
other_lines = lines[:i] + lines[i + 1:]
for q in [0, -1]:
endpoint = Point(line.coords[q])
if any(endpoint.touches(another_line)
for another_line in other_lines):
continue
else:
isolated_endpoints.append(endpoint)
return isolated_endpoints
def bend_towards(line, where, to):
"""Move the point where along a line to the point at location to.
Args:
line: a LineString
where: a point ON the line (not necessarily a vertex)
to: a point NOT on the line where the nearest vertex will be moved to
Returns:
the modified (bent) line
"""
if not line.contains(where) and not line.touches(where):
raise ValueError('line does not contain the point where.')
coords = line.coords[:]
# easy case: where is (within numeric precision) a vertex of line
for k, vertex in enumerate(coords):
if where.almost_equals(Point(vertex)):
# move coordinates of the vertex to destination
coords[k] = to.coords[0]
return LineString(coords)
# hard case: where lies between vertices of line, so
# find nearest vertex and move that one to point to
_, min_k = min((where.distance(Point(vertex)), k)
for k, vertex in enumerate(coords))
coords[min_k] = to.coords[0]
return LineString(coords)
def vertices_from_lines(lines):
"""Return list of unique vertices from list of LineStrings."""
vertices = []
for line in lines:
vertices.extend(list(line.coords))
return [Point(p) for p in set(vertices)]
def snappy_endings(lines, max_distance, crs):
"""Snap endpoints of lines together if they are at most max_length apart.
Args:
lines: a list of LineStrings or a MultiLineString
max_distance: maximum distance two endpoints may be joined together
"""
# initialize snapped lines with list of original lines
# snapping points is a MultiPoint object of all vertices
snapped_lines = [line for line in lines]
snapping_points = vertices_from_lines(snapped_lines)
# isolated endpoints are going to snap to the closest vertex
isolated_endpoints = find_isolated_endpoints(snapped_lines)
# only move isolated endpoints, one by one
for endpoint in isolated_endpoints:
# find all vertices within a radius of max_distance as possible
target = nearest_neighbor_within(snapping_points, endpoint,
max_distance)
# do nothing if no target point to snap to is found
if not target:
continue
# find the LineString to modify within snapped_lines and update it
for i, snapped_line in enumerate(snapped_lines):
if endpoint.touches(snapped_line):
snapped_lines[i] = bend_towards(snapped_line, where=endpoint,
to=target)
break
# also update the corresponding snapping_points
for i, snapping_point in enumerate(snapping_points):
if endpoint.equals(snapping_point):
snapping_points[i] = target
break
# post-processing: remove any resulting lines of length 0
snapped_lines = [s for s in snapped_lines if s.length > 0]
df = gdf(geometry=snapped_lines, crs=crs)
return df
def split_line_by_nearest_points(gdf_line, gdf_points, tolerance_grid_snap, crs):
"""
Split the union of lines with the union of points resulting
:param GeoDataFrame gdf_line: GeoDataFrame with multiple rows of connecting line segments
:param GeoDataFrame gdf_points: geodataframe with multiple rows of single points
:returns: ``gdf_segments`` (GeoDataFrame of segments)
:rtype: GeoDataFrame
https://github.com/ojdo/python-tools/blob/master/shapelytools.py#L144
"""
# union all geometries
line = gdf_line.geometry.unary_union
line._crs = crs
snap_points = gdf_points.geometry.unary_union
snap_points._crs = crs
# snap and split coords on line
# returns GeometryCollection
# snap_points = snap(coords, line, tolerance)
# snap_points._crs = crs
split_line = split(line, snap(snap_points, line, tolerance_grid_snap))
split_line._crs = crs
segments = [feature for feature in split_line if feature.length > 0.01]
gdf_segments = gdf(geometry=segments, crs=crs)
# gdf_segments.columns = ['index', 'geometry']
return gdf_segments
def nearest_neighbor_within(others, point, max_distance):
"""Find nearest point among others up to a maximum distance.
Args:
others: a list of Points or a MultiPoint
point: a Point
max_distance: maximum distance to search for the nearest neighbor
Returns:
A shapely Point if one is within max_distance, None otherwise
"""
search_region = point.buffer(max_distance)
interesting_points = search_region.intersection(MultiPoint(others))
if not interesting_points:
closest_point = None
elif isinstance(interesting_points, Point):
closest_point = interesting_points
else:
distances = [point.distance(ip) for ip in interesting_points
if point.distance(ip) > 0]
closest_point = interesting_points[distances.index(min(distances))]
return closest_point
def near_analysis(buiding_centroids, street_network, crs):
near_point = []
building_name = []
for point, name in zip(buiding_centroids.geometry, buiding_centroids.Name):
point._crs = crs
distance = 10e10
for line in street_network.geometry:
line._crs = crs
nearest_point_candidate = line.interpolate(line.project(point))
distance_candidate = point.distance(nearest_point_candidate)
if distance_candidate < distance:
distance = distance_candidate
nearest_point = nearest_point_candidate
building_name.append(name)
near_point.append(nearest_point)
geometry = near_point
df = gdf(geometry=geometry, crs=crs)
df["Name"] = building_name
return df
def snap_points(points, lines, tolerance, crs):
length = lines.shape[0]
for i in range(length):
for point in points.geometry:
line = lines.loc[i, "geometry"]
line._crs = crs
point._crs = crs
point_inline_projection = line.interpolate(line.project(point))
point_inline_projection._crs = crs
distance_to_line = point.distance(point_inline_projection)
if (point.x, point.y) in line.coords:
x = "nothing"
else:
if distance_to_line < tolerance:
buff = point.buffer(0.1)
### Split the line on the buffer
geometry = split(line, buff)
geometry._crs = crs
line_1_points = [tuple(xy) for xy in geometry[0].coords[:-1]]
line_1_points.append((point.x, point.y))
line_2_points = []
line_2_points.append((point.x, point.y))
line_2_points.extend([x for x in geometry[-1].coords[1:]])
### Stitch together the first segment, the interpolated point, and the last segment
new_line = linemerge((LineString(line_1_points), LineString(line_2_points)))
lines.loc[i, "geometry"] = new_line
G = points["geometry"].apply(lambda geom: geom.wkb)
points = points.loc[G.drop_duplicates().index]
G = lines["geometry"].apply(lambda geom: geom.wkb)
lines = lines.loc[G.drop_duplicates().index]
return points, lines
def one_linestring_per_intersection(lines, crs):
""" Move line endpoints to intersections of line segments.
Given a list of touching or possibly intersecting LineStrings, return a
list LineStrings that have their endpoints at all crossings and
intersecting points and ONLY there.
Args:
a list of LineStrings or a MultiLineString
Returns:
a list of LineStrings
"""
lines_merged = linemerge(lines)
# intersecting multiline with its bounding box somehow triggers a first intersection
try:
bounding_box = box(*lines_merged.bounds)
lines_merged = lines_merged.intersection(bounding_box)
except:
# if the bounding_box fails, then revert to lines merge.
print('bounding box method did not work, falling to a more simple method, no need to worry')
# merge the result
lines_merged = linemerge(lines_merged)
lines = [line for line in lines_merged]
df = gdf(geometry=lines, crs=crs)
return df
def calculate_end_points_intersections(prototype_network, crs):
# compute endpoints of the new prototype network
gdf_points = computer_end_points(prototype_network.geometry, crs)
# compute intersections
gdf_intersections = compute_intersections(prototype_network.geometry, crs)
gdf_points_snapped = gdf_points.append(gdf_intersections).reset_index(drop=True)
G = gdf_points_snapped["geometry"].apply(lambda geom: geom.wkb)
gdf_points_snapped = gdf_points_snapped.loc[G.drop_duplicates().index]
return gdf_points_snapped
def create_terminals(buiding_centroids, crs, street_network):
# get list of nearest points
near_points = near_analysis(buiding_centroids, street_network, crs)
# extend to the buiding centroids
all_points = near_points.append(buiding_centroids)
all_points.crs = crs
# Aggregate these points with the GroupBy
lines_to_buildings = all_points.groupby(['Name'])['geometry'].apply(lambda x: LineString(x.tolist()))
lines_to_buildings = gdf(lines_to_buildings, geometry='geometry', crs=crs)
lines_to_buildings = lines_to_buildings.append(street_network).reset_index(drop=True)
lines_to_buildings.crs = crs
return lines_to_buildings
def simplify_liness_accurracy(lines, decimals, crs):
new_lines = []
for line in lines:
points_of_line = []
for point in line.coords:
x = round(point[0], decimals)
y = round(point[1], decimals)
points_of_line.append((x, y))
new_lines.append(LineString(points_of_line))
df = gdf(geometry=new_lines, crs=crs)
return df
def calc_connectivity_network(path_streets_shp, building_centroids_df, temp_path_potential_network_shp):
"""
This script outputs a potential network connecting a series of building points to the closest street network
the street network is assumed to be a good path to the district heating or cooling network
:param path_streets_shp: path to street shapefile
:param building_centroids_df: path to substations in buildings (or close by)
:param path_potential_network: output path shapefile
:return:
"""
# first get the street network
street_network = gdf.from_file(path_streets_shp)
# check coordinate system
street_network = street_network.to_crs(get_geographic_coordinate_system())
lon = street_network.geometry[0].centroid.coords.xy[0][0]
lat = street_network.geometry[0].centroid.coords.xy[1][0]
street_network = street_network.to_crs(get_projected_coordinate_system(lat, lon))
crs = street_network.crs
street_network = simplify_liness_accurracy(street_network.geometry.values, SHAPEFILE_TOLERANCE, crs)
# create terminals/branches form street to buildings
prototype_network = create_terminals(building_centroids_df, crs, street_network)
config = cea.config.Configuration()
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
# first split in intersections
prototype_network = one_linestring_per_intersection(prototype_network.geometry.values,
crs)
# snap endings of all vectors to ending of all other vectors
prototype_network = snappy_endings(prototype_network.geometry.values, SNAP_TOLERANCE, crs)
# calculate intersections
gdf_points_snapped = calculate_end_points_intersections(prototype_network, crs)
# snap these points to the lines and transform lines
gdf_points_snapped, prototype_network = snap_points(gdf_points_snapped, prototype_network, SNAP_TOLERANCE, crs)
# save for verification purposes
prototype_network.to_file(locator.get_temporary_file("prototype_network.shp"), driver='ESRI Shapefile')
# get segments
potential_network_df = split_line_by_nearest_points(prototype_network, gdf_points_snapped, 1.0, crs)
# calculate Shape_len field
potential_network_df["Shape_Leng"] = potential_network_df["geometry"].apply(lambda x: x.length)
potential_network_df.to_file(temp_path_potential_network_shp, driver='ESRI Shapefile')
return crs
def main(config):
assert os.path.exists(config.scenario), 'Scenario not found: %s' % config.scenario
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
path_streets_shp = locator.get_street_network() # shapefile with the stations
path_connection_point_buildings_shp = locator.get_temporary_file(
"nodes_buildings.shp") # substation, it can be the centroid of the building
path_potential_network = locator.get_temporary_file("potential_network.shp") # shapefile, location of output.
calc_connectivity_network(path_streets_shp, path_connection_point_buildings_shp,
path_potential_network)
if __name__ == '__main__':
main(cea.config.Configuration())
|
mit
|
neishm/EC-CAS-diags
|
eccas_diags/diagnostics/TimeSeriesRBP.py
|
1
|
5591
|
###############################################################################
# Copyright 2016 - Climate Research Division
# Environment and Climate Change Canada
#
# This file is part of the "EC-CAS diags" package.
#
# "EC-CAS diags" is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "EC-CAS diags" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with "EC-CAS diags". If not, see <http://www.gnu.org/licenses/>.
###############################################################################
# regional bar plots
from .timeseries import Timeseries
class TimeseriesRBP(Timeseries):
"""
Bin data into regions (country or zone), and produce a histogram of the
result.
"""
def __init__ (self, ymin=350, ymax=420, **kwargs):
super(TimeseriesRBP,self).__init__(**kwargs)
self.ymin = ymin
self.ymax = ymax
# Only use very particular obs datasets, where we know the regions.
def _input_combos (self, inputs):
for inputs in super(TimeseriesRBP,self)._input_combos(inputs):
if inputs[-1].name == 'GAW-2014-hourly':
yield inputs
def do (self, inputs):
import numpy as np
import matplotlib.pyplot as pl
from os.path import exists
#Format image directory
outdir = self.outdir + '/TimeSeriesRBP-images_%s_%s%s'%('_'.join(d.name for d in inputs),self.fieldname,self.suffix)
if not exists(outdir):
from os import makedirs
makedirs(outdir)
outfile = "%s/%s_timeseries_%s%s.%s"%(outdir,'_'.join(d.name for d in inputs),self.fieldname,self.suffix+self.end_suffix,self.image_format)
# Skip plots that have already been generated.
if exists(outfile): return
fig = pl.figure(figsize=(8,8))
Zones = np.zeros((5,len(inputs)))
Stds = np.zeros((5,len(inputs)))
#List of stations within the two continents in question
NorthAmList = ['Alert','Barrow','Candle Lake','Chibougamau','East Trout Lake','Egbert','Estevan Point','Fraserdale',
'Lac La Biche (Alberta)','Mauna Loa','Sable Island',]
EuropeList = ['BEO Moussala','Jungfraujoch','Kollumerwaard','Monte Cimone','Neuglobsow','Pallas-Sammaltunturi','Plateau Rosa',
'Puszcza Borecka/Diabla Gora','Schauinsland','Sonnblick','Westerland','Zeppelinfjellet (Ny-Alesund)','Zugspitze / Schneefernerhaus']
#List for counting the number of stations in each group
Count = np.zeros((5,len(inputs)),dtype=int)
nstations = len(inputs[0].datasets)
for j in range(nstations):
station_axis = inputs[0].datasets[j].vars[0].station
assert len(station_axis) == 1, "Unable to handle multi-station datasets"
location = station_axis.station[0]
station_info = station_axis(station=location)
lat = station_info.lat[0]
lon = station_info.lon[0]
#-----Record Data------
for i,inp in enumerate(inputs):
d = inp.datasets[j][self.fieldname].get().flatten()
mean = np.mean(d[~np.isnan(d)])
std = np.std(d[~np.isnan(d)])
if np.isnan(mean): continue # Check if there's any data to include
#Average values and standard deviations of each station's timeseries
if lat > 30:
#Add 1 to the region count on first run through
Zones[0,i] += mean
Stds[0,i] += std
Count[0,i] += 1
elif lat < -30:
Zones[1,i] += mean
Stds[1,i] += std
Count[1,i] += 1
else:
Zones[2,i] += mean
Stds[2,i] += std
Count[2,i] += 1
#Sort for Europe and NA stations
if location in NorthAmList:
Zones[3,i] += mean
Stds[3,i] += std
Count[3,i] += 1
elif location in EuropeList:
Zones[4,i] += mean
Stds[4,i] += std
Count[4,i] += 1
#---------Process/Plot Data--------
#Average the values of each Zone's dataset's station average
Zones /= Count
Stds /= Count
rects = []
for i in range(len(inputs)):
xvalues = np.arange(5)*(len(inputs)+1)+i
height = list(Zones[:,i])
yerr = list(Stds[:,i])
rect = pl.bar(xvalues,height,yerr = yerr,color = inputs[i].color, width = 1,lw = 2, ecolor= 'black',capsize = 5)
rects.append(rect)
for i in range(len(Zones)):
for j in range(len(inputs)):
textx = i*(len(inputs)+1)+j+0.5
texty = (Zones[i,j]+self.ymin)/2.0
if np.isfinite(texty):
pl.text(textx,texty,Count[i,j],horizontalalignment = 'center', color='white')
pl.xlim(-1,len(Zones)*(len(inputs)+1))
pl.title('Average %s Concentrations'%(self.fieldname))
pl.ylim(ymin=self.ymin,ymax=self.ymax)
pl.ylabel('%s (%s)'%(self.fieldname,self.units))
pl.xticks(np.arange(5)*(len(inputs)+1)+len(inputs)/2.0,
['Northern\nHemisphere','Southern\nHemisphere','Tropics','North\nAmerica','Europe']
,horizontalalignment = 'center')
pl.legend(rects, [d.title for d in inputs],prop={'size':12})
pl.text(.02,.96,'One standard deviation shown',transform = pl.gca().transAxes)
fig.savefig(outfile)
pl.close(fig)
from . import table
table['regional-bargraph'] = TimeseriesRBP
|
lgpl-3.0
|
manashmndl/scikit-learn
|
sklearn/utils/validation.py
|
66
|
23629
|
"""Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
|
bsd-3-clause
|
tbabej/astropy
|
astropy/visualization/wcsaxes/tests/test_transform_coord_meta.py
|
2
|
5095
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division, absolute_import
import numpy as np
import matplotlib.pyplot as plt
from .... import units as u
from ....wcs import WCS
from ....tests.helper import pytest, remote_data
from .. import WCSAxes
from .test_images import BaseImageTests
from ..transforms import CurvedTransform
from ....tests.image_tests import IMAGE_REFERENCE_DIR
# Create fake transforms that roughly mimic a polar projection
class DistanceToLonLat(CurvedTransform):
def __init__(self, R=6e3):
super(DistanceToLonLat, self).__init__()
self.R = R
def transform(self, xy):
x, y = xy[:, 0], xy[:, 1]
lam = np.degrees(np.arctan2(y, x))
phi = 90. - np.degrees(np.hypot(x, y) / self.R)
return np.array((lam, phi)).transpose()
transform_non_affine = transform
def inverted(self):
return LonLatToDistance(R=self.R)
class LonLatToDistance(CurvedTransform):
def __init__(self, R=6e3):
super(LonLatToDistance, self).__init__()
self.R = R
def transform(self, lamphi):
lam, phi = lamphi[:, 0], lamphi[:, 1]
r = np.radians(90 - phi) * self.R
x = r * np.cos(np.radians(lam))
y = r * np.sin(np.radians(lam))
return np.array((x, y)).transpose()
transform_non_affine = transform
def inverted(self):
return DistanceToLonLat(R=self.R)
class TestTransformCoordMeta(BaseImageTests):
@remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='coords_overlay.png', tolerance=1.5)
def test_coords_overlay(self):
# Set up a simple WCS that maps pixels to non-projected distances
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['x', 'y']
wcs.wcs.cunit = ['km', 'km']
wcs.wcs.crpix = [614.5, 856.5]
wcs.wcs.cdelt = [6.25, 6.25]
wcs.wcs.crval = [0., 0.]
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], wcs=wcs)
fig.add_axes(ax)
s = DistanceToLonLat(R=6378.273)
ax.coords['x'].set_ticklabel_position('')
ax.coords['y'].set_ticklabel_position('')
coord_meta = {}
coord_meta['type'] = ('longitude', 'latitude')
coord_meta['wrap'] = (360., None)
coord_meta['unit'] = (u.deg, u.deg)
coord_meta['name'] = 'lon', 'lat'
overlay = ax.get_coords_overlay(s, coord_meta=coord_meta)
overlay.grid(color='red')
overlay['lon'].grid(color='red', linestyle='solid', alpha=0.3)
overlay['lat'].grid(color='blue', linestyle='solid', alpha=0.3)
overlay['lon'].set_ticklabel(size=7)
overlay['lat'].set_ticklabel(size=7)
overlay['lon'].set_ticklabel_position('brtl')
overlay['lat'].set_ticklabel_position('brtl')
overlay['lon'].set_ticks(spacing=10. * u.deg, exclude_overlapping=True)
overlay['lat'].set_ticks(spacing=10. * u.deg, exclude_overlapping=True)
ax.set_xlim(-0.5, 1215.5)
ax.set_ylim(-0.5, 1791.5)
return fig
@remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='coords_overlay_auto_coord_meta.png', tolerance=1.5)
def test_coords_overlay_auto_coord_meta(self):
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], wcs=WCS(self.msx_header))
fig.add_axes(ax)
ax.grid(color='red', alpha=0.5, linestyle='solid')
overlay = ax.get_coords_overlay('fk5') # automatically sets coord_meta
overlay.grid(color='black', alpha=0.5, linestyle='solid')
overlay['ra'].set_ticks(color='black')
overlay['dec'].set_ticks(color='black')
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
return fig
@remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR, filename='direct_init.png', tolerance=1.5)
def test_direct_init(self):
s = DistanceToLonLat(R=6378.273)
coord_meta = {}
coord_meta['type'] = ('longitude', 'latitude')
coord_meta['wrap'] = (360., None)
coord_meta['unit'] = (u.deg, u.deg)
coord_meta['name'] = 'lon', 'lat'
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], transform=s, coord_meta=coord_meta)
fig.add_axes(ax)
ax.coords['lon'].grid(color='red', linestyle='solid', alpha=0.3)
ax.coords['lat'].grid(color='blue', linestyle='solid', alpha=0.3)
ax.coords['lon'].set_ticklabel(size=7)
ax.coords['lat'].set_ticklabel(size=7)
ax.coords['lon'].set_ticklabel_position('brtl')
ax.coords['lat'].set_ticklabel_position('brtl')
ax.coords['lon'].set_ticks(spacing=10. * u.deg, exclude_overlapping=True)
ax.coords['lat'].set_ticks(spacing=10. * u.deg, exclude_overlapping=True)
ax.set_xlim(-400., 500.)
ax.set_ylim(-300., 400.)
return fig
|
bsd-3-clause
|
ahoyosid/scikit-learn
|
examples/linear_model/plot_logistic_path.py
|
349
|
1195
|
#!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
msmbuilder/msmbuilder
|
msmbuilder/tests/test_estimator_subclassing.py
|
10
|
1499
|
from __future__ import print_function, absolute_import, division
import importlib
import inspect
import pkgutil
import warnings
from contextlib import contextmanager
from sklearn.base import BaseEstimator
import msmbuilder
import msmbuilder.base
def silent_warnings(*args, **kwargs):
print(args, kwargs)
@contextmanager
def supress_warnings():
original_warn = warnings.warn
warnings.warn = silent_warnings
yield
warnings.warn = original_warn
def import_all_estimators(pkg):
def estimator_in_module(mod):
for name, obj in inspect.getmembers(mod):
if name.startswith('_'):
continue
if inspect.isclass(obj) and issubclass(obj, BaseEstimator):
yield obj
with supress_warnings():
result = {}
for _, modname, ispkg in pkgutil.iter_modules(pkg.__path__):
c = '%s.%s' % (pkg.__name__, modname)
try:
mod = importlib.import_module(c)
if ispkg:
result.update(import_all_estimators(mod))
for kls in estimator_in_module(mod):
result[kls.__name__] = kls
except ImportError as e:
print('e', e)
continue
return result
def test_all_estimators():
for key, value in import_all_estimators(msmbuilder).items():
if 'msmbuilder' in value.__module__:
assert issubclass(value, msmbuilder.base.BaseEstimator), value
|
lgpl-2.1
|
rubennj/pvlib-python
|
setup.py
|
2
|
2294
|
#!/usr/bin/env python
import os
import re
import shutil
import sys
try:
from setuptools import setup, Command
from setuptools.extension import Extension
except ImportError:
raise RuntimeError('setuptools is required')
DESCRIPTION = 'The PVLIB toolbox provides a set functions for simulating the performance of photovoltaic energy systems.'
LONG_DESCRIPTION = open('README.md').read()
DISTNAME = 'pvlib'
LICENSE = 'The BSD 3-Clause License'
AUTHOR = 'Dan Riley, Clifford Hanson, Rob Andrews, Will Holmgren, github contributors'
MAINTAINER_EMAIL = '[email protected]'
URL = 'https://github.com/pvlib/pvlib-python'
# imports __version__ into the local namespace
version_file = os.path.join(os.path.dirname(__file__), 'pvlib/version.py')
with open(version_file, 'r') as f:
exec(f.read())
# check python version.
if not sys.version_info[:2] in ((2,7), (3,3), (3,4)):
sys.exit('%s requires Python 2.7, 3.3, or 3.4' % DISTNAME)
setuptools_kwargs = {
'zip_safe': False,
'install_requires': ['numpy >= 1.7.0',
'pandas >= 0.13.1',
'pytz',
'six',
],
'scripts': [],
'include_package_data': True
}
# set up pvlib packages to be installed and extensions to be compiled
PACKAGES = ['pvlib']
extensions = []
spa_sources = ['pvlib/spa_c_files/spa.c', 'pvlib/spa_c_files/spa_py.c']
spa_depends = ['pvlib/spa_c_files/spa.h']
spa_all_file_paths = map(lambda x: os.path.join(os.path.dirname(__file__), x),
spa_sources + spa_depends)
if all(map(os.path.exists, spa_all_file_paths)):
print('all spa_c files found')
PACKAGES.append('pvlib.spa_c_files')
spa_ext = Extension('pvlib.spa_c_files.spa_py',
sources=spa_sources, depends=spa_depends)
extensions.append(spa_ext)
else:
print('WARNING: spa_c files not detected. ' +
'See installation instructions for more information.')
setup(name=DISTNAME,
version=__version__,
packages=PACKAGES,
ext_modules=extensions,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
maintainer_email=MAINTAINER_EMAIL,
license=LICENSE,
url=URL,
**setuptools_kwargs)
|
bsd-3-clause
|
wright-group/WrightTools
|
tests/artists/test_quick2D.py
|
1
|
1558
|
#! /usr/bin/env python3
import numpy as np
import WrightTools as wt
from WrightTools import datasets
def test_perovskite():
p = datasets.wt5.v1p0p0_perovskite_TA # axes w1=wm, w2, d2
# A race condition exists where multiple tests access the same file in short order
# this loop will open the file when it becomes available.
while True:
try:
data = wt.open(p)
break
except:
pass
return wt.artists.quick2D(data, xaxis=0, yaxis=2, at={"w2": [1.7, "eV"]})
def test_4D():
w1 = np.linspace(-3, 3, 3)
w2 = np.linspace(-2, 2, 3)
w3 = np.linspace(-1, 1, 3)
tau = np.linspace(-1, 3, 2)
signal = (
w1[:, None, None, None]
+ w2[None, :, None, None]
+ w3[None, None, :, None]
+ tau[None, None, None, :]
)
data = wt.data.Data(name="data")
data.create_channel("signal", values=signal, signed=True)
data.create_variable("w1", values=w1[:, None, None, None], units="wn", label="1")
data.create_variable("w2", values=w2[None, :, None, None], units="wn", label="2")
data.create_variable("w3", values=w3[None, None, :, None], units="wn", label="3")
data.create_variable("d1", values=tau[None, None, None, :], units="ps")
data.transform("w1", "w2", "w3", "d1")
return wt.artists.quick2D(data, xaxis=0, yaxis=1)
if __name__ == "__main__":
import matplotlib.pyplot as plt
plt.close("all")
# store to variable to prevent garbage collection
t0 = test_perovskite()
t1 = test_4D()
plt.show()
|
mit
|
grundgruen/zipline
|
zipline/utils/cli.py
|
2
|
8286
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import argparse
from copy import copy
from six import print_
from six.moves import configparser
import pandas as pd
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
PYGMENTS = True
except:
PYGMENTS = False
import zipline
from zipline.errors import NoSourceError, PipelineDateError
DEFAULTS = {
'data_frequency': 'daily',
'capital_base': '10e6',
'source': 'yahoo',
'symbols': 'AAPL',
'metadata_index': 'symbol',
'source_time_column': 'Date',
}
def parse_args(argv, ipython_mode=False):
"""Parse list of arguments.
If a config file is provided (via -c), it will read in the
supplied options and overwrite any global defaults.
All other directly supplied arguments will overwrite the config
file settings.
Arguments:
* argv : list of strings
List of arguments, e.g. ['-c', 'my.conf']
* ipython_mode : bool <default=True>
Whether to parse IPython specific arguments
like --local_namespace
Notes:
Default settings can be found in zipline.utils.cli.DEFAULTS.
"""
# Parse any conf_file specification
# We make this parser with add_help=False so that
# it doesn't parse -h and print help.
conf_parser = argparse.ArgumentParser(
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file",
metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(argv)
defaults = copy(DEFAULTS)
if args.conf_file:
config = configparser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("Defaults")))
# Parse rest of arguments
# Don't suppress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
description="Zipline version %s." % zipline.__version__,
parents=[conf_parser]
)
parser.set_defaults(**defaults)
parser.add_argument('--algofile', '-f')
parser.add_argument('--data-frequency',
choices=('minute', 'daily'))
parser.add_argument('--start', '-s')
parser.add_argument('--end', '-e')
parser.add_argument('--capital_base')
parser.add_argument('--source', '-d', choices=('yahoo',))
parser.add_argument('--source_time_column', '-t')
parser.add_argument('--symbols')
parser.add_argument('--output', '-o')
parser.add_argument('--metadata_path', '-m')
parser.add_argument('--metadata_index', '-x')
parser.add_argument('--print-algo', '-p', dest='print_algo',
action='store_true')
parser.add_argument('--no-print-algo', '-q', dest='print_algo',
action='store_false')
if ipython_mode:
parser.add_argument('--local_namespace', action='store_true')
args = parser.parse_args(remaining_argv)
return(vars(args))
def parse_cell_magic(line, cell):
"""Parse IPython magic
"""
args_list = line.split(' ')
args = parse_args(args_list, ipython_mode=True)
# Remove print_algo kwarg to overwrite below.
args.pop('print_algo')
local_namespace = args.pop('local_namespace', False)
# By default, execute inside IPython namespace
if not local_namespace:
args['namespace'] = get_ipython().user_ns # flake8: noqa
# If we are running inside NB, do not output to file but create a
# variable instead
output_var_name = args.pop('output', None)
perf = run_pipeline(print_algo=False, algo_text=cell, **args)
if output_var_name is not None:
get_ipython().user_ns[output_var_name] = perf # flake8: noqa
def run_pipeline(print_algo=True, **kwargs):
"""Runs a full zipline pipeline given configuration keyword
arguments.
1. Load data (start and end dates can be provided a strings as
well as the source and symobls).
2. Instantiate algorithm (supply either algo_text or algofile
kwargs containing initialize() and handle_data() functions). If
algofile is supplied, will try to look for algofile_analyze.py and
append it.
3. Run algorithm (supply capital_base as float).
4. Return performance dataframe.
:Arguments:
* print_algo : bool <default=True>
Whether to print the algorithm to command line. Will use
pygments syntax coloring if pygments is found.
"""
start = kwargs['start']
end = kwargs['end']
# Compare against None because strings/timestamps may have been given
if start is not None:
start = pd.Timestamp(start, tz='UTC')
if end is not None:
end = pd.Timestamp(end, tz='UTC')
# Fail out if only one bound is provided
if ((start is None) or (end is None)) and (start != end):
raise PipelineDateError(start=start, end=end)
# Check if start and end are provided, and if the sim_params need to read
# a start and end from the DataSource
if start is None:
overwrite_sim_params = True
else:
overwrite_sim_params = False
symbols = kwargs['symbols'].split(',')
asset_identifier = kwargs['metadata_index']
# Pull asset metadata
asset_metadata = kwargs.get('asset_metadata', None)
asset_metadata_path = kwargs['metadata_path']
# Read in a CSV file, if applicable
if asset_metadata_path is not None:
if os.path.isfile(asset_metadata_path):
asset_metadata = pd.read_csv(asset_metadata_path,
index_col=asset_identifier)
source_arg = kwargs['source']
source_time_column = kwargs['source_time_column']
if source_arg is None:
raise NoSourceError()
elif source_arg == 'yahoo':
source = zipline.data.load_bars_from_yahoo(
stocks=symbols, start=start, end=end)
elif os.path.isfile(source_arg):
source = zipline.data.load_prices_from_csv(
filepath=source_arg,
identifier_col=source_time_column
)
elif os.path.isdir(source_arg):
source = zipline.data.load_prices_from_csv_folder(
folderpath=source_arg,
identifier_col=source_time_column
)
else:
raise NotImplementedError(
'Source %s not implemented.' % kwargs['source'])
algo_text = kwargs.get('algo_text', None)
if algo_text is None:
# Expect algofile to be set
algo_fname = kwargs['algofile']
with open(algo_fname, 'r') as fd:
algo_text = fd.read()
if print_algo:
if PYGMENTS:
highlight(algo_text, PythonLexer(), TerminalFormatter(),
outfile=sys.stdout)
else:
print_(algo_text)
algo = zipline.TradingAlgorithm(script=algo_text,
namespace=kwargs.get('namespace', {}),
capital_base=float(kwargs['capital_base']),
algo_filename=kwargs.get('algofile'),
equities_metadata=asset_metadata,
start=start,
end=end)
perf = algo.run(source, overwrite_sim_params=overwrite_sim_params)
output_fname = kwargs.get('output', None)
if output_fname is not None:
perf.to_pickle(output_fname)
return perf
|
apache-2.0
|
wathen/PhD
|
MHD/FEniCS/MHD/CG/common/IterOperations.py
|
1
|
8404
|
from dolfin import *
import PETScIO as IO
import numpy as np
import scipy.linalg as splin
import scipy
import petsc4py
import sys
import time
petsc4py.init(sys.argv)
# import matplotlib.pylab as plt
from petsc4py import PETSc
import MatrixOperations as MO
def StoreMatrix(A,name):
test ="".join([name,".mat"])
scipy.io.savemat( test, {name: A},oned_as='row')
def Errors(X,mesh,FSpaces,ExactSolution,k,dim, FS = "CG"):
Vdim = dim[0]
Pdim = dim[1]
Mdim = dim[2]
Rdim = dim[3]
# k +=2
VelocityE = VectorFunctionSpace(mesh,"CG",3)
u = interpolate(ExactSolution[0],VelocityE)
PressureE = FunctionSpace(mesh,FS,2)
# parameters["form_compiler"]["quadrature_degree"] = 8
# X = x.array()
xu = X[0:Vdim]
ua = Function(FSpaces[0])
ua.vector()[:] = xu
pp = X[Vdim:Vdim+Pdim]
pa = Function(FSpaces[1])
pa.vector()[:] = pp
pend = assemble(pa*dx)
ones = Function(FSpaces[1])
ones.vector()[:]=(0*pp+1)
pp = Function(FSpaces[1])
pp.vector()[:] = pa.vector().array()- assemble(pa*dx)/assemble(ones*dx)
pInterp = interpolate(ExactSolution[1],PressureE)
pe = Function(PressureE)
pe.vector()[:] = pInterp.vector().array()
const = - assemble(pe*dx)/assemble(ones*dx)
pe.vector()[:] = pe.vector()[:]+const
ErrorU = Function(FSpaces[0])
ErrorP = Function(FSpaces[1])
ErrorU = u-ua
ErrorP = pe-pp
tic()
errL2u= sqrt(abs(assemble(inner(ErrorU, ErrorU)*dx)))
MO.StrTimePrint("Velocity L2 error, time: ", toc())
tic()
errH1u= sqrt(abs(assemble(inner(grad(ErrorU), grad(ErrorU))*dx)))
MO.StrTimePrint("Velocity H1 error, time: ", toc())
tic()
errL2p= sqrt(abs(assemble(inner(ErrorP, ErrorP)*dx)))
MO.StrTimePrint("Pressure L2 error, time: ", toc())
parameters["form_compiler"]["quadrature_degree"] = 5
MagneticE = FunctionSpace(mesh,"N1curl",3)
LagrangeE = FunctionSpace(mesh,"CG",3)
b = interpolate(ExactSolution[2],MagneticE)
r = interpolate(ExactSolution[3],LagrangeE)
xb = X[Vdim+Pdim:Vdim+Pdim+Mdim]
ba = Function(FSpaces[2])
ba.vector()[:] = xb
xr = X[Vdim+Pdim+Mdim:]
ra = Function(FSpaces[3])
ra.vector()[:] = xr
ErrorB = Function(FSpaces[2])
ErrorR = Function(FSpaces[3])
# plot(ua)
# plot(pp)
# plot(ba)
# plot(ra)
# plot(u)
# plot(pe)
# plot(b)
# plot(r)
# print curl(b).vector().array()
# ssss
# print b.vector().array()-ba.vector().array()
# ssss
ErrorB = b-ba
ErrorR = r-ra
# print ' Exact solution curl ', assemble(curl(b)*dx), ' assemble(curl(b)*dx)'
# print ' Approx solution curl ', assemble(curl(ba)*dx), ' assemble(curl(ba)*dx)'
# print ' Error curl ', assemble(curl(ErrorB)*dx), ' assemble(curl(ErrorB)*dx)'
# # print ' Error ', assemble((ErrorB)*dx), ' assemble((ErrorB)*dx)'
# print ' Error curl-curl ', assemble(curl(ErrorB)*curl(ErrorB)*dx), ' assemble(curl(ErrorB)*curl(ErrorB)*dx)'
# print ' Error inner curl-curl ', assemble(inner(curl(ErrorB),curl(ErrorB))*dx), ' assemble(inner(curl(ErrorB),curl(ErrorB))*dx)'
tic()
errL2b= sqrt(abs(assemble(inner(ErrorB, ErrorB)*dx)))
MO.StrTimePrint("Magnetic L2 error, time: ", toc())
tic()
errCurlb = sqrt(abs(assemble(inner(curl(ErrorB),curl(ErrorB))*dx)))
MO.StrTimePrint("Magnetic Curl error, time: ", toc())
tic()
errL2r= sqrt(abs(assemble(inner(ErrorR, ErrorR)*dx)))
MO.StrTimePrint("Multiplier L2 error, time: ", toc())
tic()
errH1r= sqrt(abs(assemble(inner(grad(ErrorR), grad(ErrorR))*dx)))
MO.StrTimePrint("Multiplier H1 error, time: ", toc())
# errL2b= errornorm(b, ba, norm_type='L2', degree_rise=4)
# errCurlb = errornorm(b, ba, norm_type='Hcurl0', degree_rise=4)
# errL2r= errornorm(r, ra, norm_type='L2', degree_rise=4)
# errH1r= errornorm(r, ra, norm_type='H10', degree_rise=4)
return errL2u, errH1u, errL2p, errL2b, errCurlb, errL2r, errH1r
def PicardTolerance(x,u_k,b_k,FSpaces,dim,NormType,iter):
X = IO.vecToArray(x)
uu = X[0:dim[0]]
bb = X[dim[0]+dim[1]:dim[0]+dim[1]+dim[2]]
u = Function(FSpaces[0])
u.vector()[:] = u.vector()[:] + uu
diffu = u.vector().array() - u_k.vector().array()
b = Function(FSpaces[2])
b.vector()[:] = b.vector()[:] + bb
diffb = b.vector().array() - b_k.vector().array()
if (NormType == '2'):
epsu = splin.norm(diffu)/sqrt(dim[0])
epsb = splin.norm(diffb)/sqrt(dim[0])
elif (NormType == 'inf'):
epsu = splin.norm(diffu, ord=np.Inf)
epsb = splin.norm(diffb, ord=np.Inf)
else:
print "NormType must be 2 or inf"
quit()
print 'iter=%d: u-norm=%g b-norm=%g ' % (iter, epsu,epsb)
u_k.assign(u)
b_k.assign(b)
return u_k,b_k,epsu,epsb
def PicardToleranceDecouple(x,U,FSpaces,dim,NormType,iter,SaddlePoint = "No"):
X = IO.vecToArray(x)
uu = X[0:dim[0]]
if SaddlePoint == "Yes":
bb = X[dim[0]:dim[0]+dim[1]]
pp = X[dim[0]+dim[1]:dim[0]+dim[1]+dim[2]]
else:
pp = X[dim[0]:dim[0]+dim[1]]
bb = X[dim[0]+dim[1]:dim[0]+dim[1]+dim[2]]
rr = X[dim[0]+dim[1]+dim[2]:]
u = Function(FSpaces[0])
u.vector()[:] = uu
u_ = assemble(inner(u,u)*dx)
diffu = u.vector().array()
# if SaddlePoint == "Yes":
# p = Function(FSpaces[2])
# p.vector()[:] = pp
# ones = Function(FSpaces[2])
# ones.vector()[:]=(0*ones.vector().array()+1)
# pp = Function(FSpaces[2])
# print ones
# pp.vector()[:] = p.vector().array()- assemble(p*dx)/assemble(ones*dx)
# p = pp.vector().array()
# b = Function(FSpaces[1])
# b.vector()[:] = bb
# diffb = b.vector().array()
# else:
print pp.shape
p = Function(FSpaces[1])
print FSpaces[1].dim()
p.vector()[:] = pp
p_ = assemble(p*p*dx)
ones = Function(FSpaces[1])
ones.vector()[:]=(0*ones.vector().array()+1)
pp = Function(FSpaces[1])
pp.vector()[:] = p.vector().array() - assemble(p*dx)/assemble(ones*dx)
p_ = assemble(pp*pp*dx)
p = pp.vector().array()
b = Function(FSpaces[2])
b.vector()[:] = bb
b_ = assemble(inner(b,b)*dx)
diffb = b.vector().array()
r = Function(FSpaces[3])
r.vector()[:] = rr
r_ = assemble(r*r*dx)
# print diffu
if (NormType == '2'):
epsu = splin.norm(diffu)/sqrt(dim[0])
epsp = splin.norm(pp.vector().array())/sqrt(dim[1])
epsb = splin.norm(diffb)/sqrt(dim[2])
epsr = splin.norm(r.vector().array())/sqrt(dim[3])
elif (NormType == 'inf'):
epsu = splin.norm(diffu, ord=np.Inf)
epsp = splin.norm(pp.vector().array(),ord=np.inf)
epsb = splin.norm(diffb, ord=np.Inf)
epsr = splin.norm(r.vector().array(),ord=np.inf)
else:
print "NormType must be 2 or inf"
quit()
# U.axpy(1,x)
p = Function(FSpaces[1])
RHS = IO.vecToArray(U+x)
if SaddlePoint == "Yes":
u.vector()[:] = RHS[0:dim[0]]
p.vector()[:] = pp.vector().array()+U.array[dim[0]+dim[1]:dim[0]+dim[1]+dim[2]]
b.vector()[:] = RHS[dim[0]:dim[0]+dim[1]]
r.vector()[:] = RHS[dim[0]+dim[1]+dim[2]:]
else:
u.vector()[:] = RHS[0:dim[0]]
p.vector()[:] = pp.vector().array()+U.array[dim[0]:dim[0]+dim[1]]
b.vector()[:] = RHS[dim[0]+dim[1]:dim[0]+dim[1]+dim[2]]
r.vector()[:] = RHS[dim[0]+dim[1]+dim[2]:]
# print diffu.dot(diffu) + pp.dot(pp) + diffb.dot(diffb) + r.dot(r)
# epsu = sqrt(u_)/sqrt(dim[0])
# epsp = sqrt(p_)/sqrt(dim[1])
# epsb = sqrt(b_)/sqrt(dim[2])
# epsr = sqrt(r_)/sqrt(dim[3])
# uOld = np.concatenate((diffu, pp.vector().array(), diffb, r.vector().array()), axis=0)
# print np.linalg.norm(uOld)/sum(dim)
print 'u-norm=%g p-norm=%g \n b-norm=%g r-norm=%g' % (epsu,epsp,epsb,epsr), '\n\n\n'
print 'u-norm=%g p-norm=%g \n b-norm=%g r-norm=%g' % (sqrt(u_), sqrt(p_), sqrt(b_), sqrt(r_)), '\n\n\n'
return u,p,b,r,epsu+epsp+epsb+epsr
def u_prev(u,p,b,r):
uOld = np.concatenate((u.vector().array(),p.vector().array(),b.vector().array(),r.vector().array()), axis=0)
x = IO.arrayToVec(uOld)
return x
|
mit
|
lenovor/scikit-learn
|
examples/svm/plot_svm_scale_c.py
|
223
|
5375
|
"""
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
|
bsd-3-clause
|
wohllab/milkyway_proteomics
|
galaxy_milkyway_files/tools/wohl-proteomics/ptmRSmax/ptmrsmax_wrapper.py
|
1
|
48840
|
import os, sys, re
import optparse
import shutil
import pandas
import numpy
import subprocess
import math
import pyopenms
import gc
import operator
import uniprot as uni
from pyteomics import mass
from pyopenms import *
from tqdm import *
from joblib import Parallel, delayed
import multiprocessing
from multiprocessing import cpu_count
import modification
#from pyopenms.sysinfo import free_mem
import time
#####################################
#This is the wrapper for ptmRSMax... Adapted from the Luciphor2 wrapper, and the phosphoRS wrapper (RSmax).
#It will be responsible for taking command line arguments and building a
#input file for running of ptmRSMax to help localize PTMs on
#identified modified peptides, and ultimately producing an estimate of
#confidence of PTM localization for the peptides
#
#VERSION 0.4A
version="0.4A"
#DATE: 04/07/2017
date="04/07/2017"
#ptmRSmax XML configuration file should be named inputFileBase+"_ptmRS_config.xml"
#####################################
print "-----------------------------------------------------------------------"
print "Welcome to the ptmRSmax wrapper for Galaxy, Wohlschlegel Lab UCLA"
print "Written by William Barshop"
print "Version: ",version
print "Date: ",date
basedir=os.getcwd()
##############
#file = MzMLFile()
#experiment = MSExperiment()
#file.load(inputfile, experiment)
#That's temporary storage for pyopenms stuff...
##for spectrum in experiment:
## if spectrum.getMSLevel() == 2:
## precursor = spectrum.getPrecursors()
## precursorMZ = precursor[0].getMZ()
## precursorCharge = precursor[0].getCharge()
## precursorUnchargedMass = precursor[0].getUnchargedMass()
## #print precursorUnchargedMass
## #pause = raw_input("Pause")
## #precursorMZ=precursor[0][0]
## spec_array = spectrum.get_peaks()
## instrumentSettings = spectrum.getInstrumentSettings()
## acquisitionInfo = spectrum.getAcquisitionInfo()
## nativeID = spectrum.getNativeID()
def applyParallel(dfGrouped, func):
#print dfGrouped,type(dfGrouped)
#retLst = [x for x in [group.apply(fix_modification_parallel) for name, group in dfGrouped]]
retLst = Parallel(n_jobs=multiprocessing.cpu_count())(delayed(func)(group) for name, group in dfGrouped)
#retLst = Parallel(n_jobs=8)(delayed(func)(group) for name, group in dfGrouped)
#print retLst[0].columns,"before concat..."
my_tmp=pandas.concat(retLst)
return_df = my_tmp.reindex_axis(retLst[0].columns, axis=1)
#print return_df.columns,"after concat..."
return return_df
def fix_modification_parallel(group):
#print group.columns,"in parallel at start..."
#global numMod_list
#global siteProbabilities_list
filtered_modification_list=[]
all_target_aa=[]
mod_specific_aa={}
first_time_setup=True
for eachmod in modification_list:
all_target_aa.extend(eachmod.getAAs())
mod_specific_aa[eachmod.getName()]=eachmod.getAAs()
for index,eachrow in group.iterrows():
if ((eachrow['ptmRS_numMods'] < 1) and (']' in eachrow['sequence'])):
#pass
#print eachrow
zz=0
newPep=[]
while zz<len(eachrow['sequence']):
if not eachrow['sequence'][zz]=='[':
newPep.append(eachrow['sequence'][zz])
else:
buffer=newPep.pop()
while zz<len(eachrow['sequence']) and eachrow['sequence'][zz]!=']':
buffer+=eachrow['sequence'][zz]
zz+=1
buffer+=eachrow['sequence'][zz]
buffer=buffer[:2]+buffer[2:-1]+"]" #We'll cram in the full mass accuracy mod as given in the settings...
newPep.append(buffer)
zz+=1
#print group.loc[index,'pRS_sequence'],"BEFORE - 1"
group.loc[index,'ptmRS_sequence']=''.join(newPep)
#print group.loc[index,'pRS_sequence'],"AFTER - 1"
elif eachrow['ptmRS_numMods'] >= 1:
#print group.loc[index,'pRS_sequence'],"BEFORE - 2"
mod_specific_PPS={}
total_numPPS=0
#print "----------------------------------"
for each_aa in all_target_aa:
#print "looking for ",each_aa
total_numPPS+=eachrow['sequence'].count(each_aa)
#print "found ",str(eachrow['sequence'].count(each_aa))
for each_mod in mod_specific_aa:
for each_aa in mod_specific_aa[each_mod]:
if not each_mod in mod_specific_PPS:
mod_specific_PPS[each_mod]=0
mod_specific_PPS[each_mod]+=eachrow['sequence'].count(each_aa)
zz=0
newPep=[]
while zz<len(eachrow['sequence']):
if not eachrow['sequence'][zz]=='[':
newPep.append(eachrow['sequence'][zz])
else:
buffer=newPep.pop()
while zz<len(eachrow['sequence']) and eachrow['sequence'][zz]!=']':
buffer+=eachrow['sequence'][zz]
zz+=1
buffer+=eachrow['sequence'][zz]
#print buffer,buffer[3:-1]
if buffer[3:-1] in modMass_targets: # if this mass is in our list of target mods, then we'll go ahead and drop it and only add back the AA.
#print "IT IS IN MOD MASS TO TARGETS..",buffer[3:-1]
buffer=buffer[:1]
else:
#print "IT IS NOT IN MOD MASS TO TARGETS..",buffer[3:-1],type(buffer[3:-1])
#for each in modMass_targets:
# print each,type(each),"in modmass targets..."
buffer=buffer[:-1]+"]" #We'll cram in the full mass accuracy mod as given in the settings...
newPep.append(buffer)
zz+=1
#print newPep,"should have tmods removed..."
#We have stripped out the target modifications.
#now we will read the ptmRS modification string (FOR EACH TARGET MOD) and determine which positions to place the current target modification
#print eachrow,eachrow.index
#print eachrow.index.tolist()
if first_time_setup:
#mod_set=[]
index_localprob=eachrow.index.tolist().index('ptmRS_peptideLocalizationProbability')
#print "index is ...",index_localprob
remaining_list=eachrow.index.tolist()[index_localprob+1:-3]
mod_set=set([x.split("_")[0] for x in remaining_list])
#for each in remaining_list:
#print "MOD SET IS",mod_set
#print remaining_list
for eachmod in modification_list:
if eachmod.getName() in mod_set:
filtered_modification_list.append(eachmod)
#######modmap_per_run[run_dict[int(each_idx)]['Original File Name']+".mzML"][eachrow['scan']]=modmap # THIS IS NOW AVAILABLE...
#### Check ModificationManagement getScoredEquivalent() whatever it's called... maybe that's the key to
#### intelligently knowing which mods are grouped together and recounting nummods correctly...
first_time_setup=False
#print eachrow,"yo"
#print filtered_modification_list,"this is the filtered list..."
#for eachMod in modification_list:#this should be the target mods only...
for eachMod in filtered_modification_list:#this should be the target mods only...
#print "I'm in - 1"
this_mod_name=eachMod.getName()
this_mod_number_of_sites=int(eachrow[this_mod_name+"_numMods"])
#if isinstance(eachrow[this_mod_name+"_siteProbabilities"],float):
# print ">>>>>>>>>>>>>>>>>>>>>"
# print eachrow
# print "<<<<<<<<<<<<<<<<<<<<<~~~~~~~~~~~~~~~~"
this_mod_site_dict={}
#print this_mod_number_of_sites,"this is the number of sites...."
#print this_mod_name,"this is the name...."
if this_mod_number_of_sites > 0:
this_mod_site_scores_split=eachrow[this_mod_name+"_siteProbabilities"].split(';')
#print this_mod_site_scores_split,"split scores..."
for eachsite in this_mod_site_scores_split:
#print eachsite,"eachsite"
score=float(eachsite.split()[1])
factor=int(eachsite.split()[0].rsplit('x',1)[1].replace(":",""))
site_index=str(int(eachsite.split()[0].split('(')[1].split(')')[0])-1)+"_"+str(factor)
this_mod_site_dict[site_index]=score
this_mod_sorted_sites = sorted(this_mod_site_dict.items(), key=operator.itemgetter(1), reverse=True)
this_mod_sorted_sites=this_mod_sorted_sites[:this_mod_number_of_sites]
#print this_mod_sorted_sites,"sorted sites, filtered... before"
this_mod_sorted_sites=[(x[0].split("_")[0],x[1]) for x in this_mod_sorted_sites]
#print this_mod_sorted_sites,"sorted sites, filtered... after"
collapse_dict={}
for eachmod in this_mod_sorted_sites:
#print eachmod,"mod sorted sites eachmod"
if eachmod[0] in collapse_dict:
collapse_dict[eachmod[0]]+=float(eachMod.getMass())
else:
collapse_dict[eachmod[0]]=float(eachMod.getMass())
#print collapse_dict,"collapse dict"
#I NEED TO COLLAPSE "POLYMERIC MODS" DOWN IF THEY ARE HIGH ENOUGH IN THE RANKING TO BE PLACED ON AMINOACIDS.
for each_mod in collapse_dict:
#newPep[int(this_mod_sorted_sites[ns][0])]+="[+"+str(eachMod.getMass()*this_mod_factor_dict[int(this_mod_sorted_sites[ns][0])])+"]"
if collapse_dict[each_mod]>=0:
newPep[int(each_mod)]+="[+"+str(collapse_dict[each_mod])+"]"
else:
newPep[int(each_mod)]+="["+str(collapse_dict[each_mod])+"]"
#print "moving right along..",total_numPPS
#print sorted_sites[ns],"this will be the ",ns,"th site to add a target mod!"
#ns+=this_mod_factor_dict[int(this_mod_sorted_sites[ns][0])]
#ns+=1 #CHANGE THIS TO BE THE FACTOR
#print "replacing "+eachrow['sequence']+" with "+''.join(newPep)
group.loc[index,'ptmRS_totalNumPPS']=total_numPPS
#print "placed totalnumpps",total_numPPS
group.loc[index,'ptmRS_sequence']=''.join(newPep)
#print "did the sequence, too"
#print group.loc[index,'ptmRS_sequence'],"AFTER - 2"
#print group.columns,"in parallel at end..."
return group
####################################
#Argument parsing! So much fun!
#We'll use OptParse even though some
#people really rave about argparse...
#
#
# NB: With Optparse, if an option is
# not specified, it will take a
# value of None
####################################
#print sys.argv,"THESE ARE THE ARGS"
parser = optparse.OptionParser()
parser.add_option("--pout",action="store",type="string",dest="operation_folder")
parser.add_option("--ms2tol",action="store", type="float", dest="ms2tol")
parser.add_option("--scored",action="store",type="string",dest="scored")
parser.add_option("--unscored",action="store",type="string",dest="unscored")
#parser.add_option("--target",action="store",type="string",dest="target_mods")
#parser.add_option("--targetnames",action="store",type="string",dest="target_mod_names")
#parser.add_option("--variable",action="store",type="string",dest="variable_mods")
parser.add_option("--silac",action="store",type="string",dest="silac_mods")
#parser.add_option("--di",action="store",type="string",dest="diagnostic_ions")
#parser.add_option("--nl",action="store",type="string",dest="neutral_loss")
parser.add_option("--nt",action="store",type="int",dest="num_threads")
parser.add_option("--mzml",action="store",type="string",dest="mzml_files")
parser.add_option("--expgroups",action="store",type="string",dest="exp_group_file")
parser.add_option("--mp",action="store",type="int",dest="max_permutations")
parser.add_option("--scorenl",action="store_true",dest="score_nl")
parser.add_option("--filter",action="store", type="float", dest="filter")
parser.add_option("--xml",action="store",type="string",dest="xml_file")
#"--scorenl"
(options,args) = parser.parse_args()
aa_masses = {'A' : 71.037114, 'R' : 156.101111, 'N' : 114.042927, 'D' : 115.026943, 'C' : 103.009185, 'E' : 129.042593, 'Q' : 128.058578, 'G' : 57.021464, 'H' : 137.058912, 'I' : 113.084064, 'L' : 113.084064, 'K' : 128.094963, 'M' : 131.040485, 'F' : 147.068414, 'P' : 97.052764, 'S' : 87.032028, 'T' : 101.047679, 'U' : 150.95363, 'W' : 186.079313, 'Y' : 163.06332, 'V' : 99.068414 }
######### I scraped these AA masses from Matrix, who proudly bring us Mascot!
global modification_list#To copy into Parallel instances by joblib...
#global modification_list_scored#To copy into Parallel instances by joblib...
modification_list=[]
#modification_list_scored=[]
#########HERE I NEED TO
#########CRAWL THROUGH THE POUT FOLDER AND
#########GET A LIST OF ALL THE SPECTRAL FILES AND SUUUCH
#OKKKKKAYYYYY Let's get a list of all the MZML files we got!
mzml_dict={}
for eachfile in options.mzml_files.split(","):
if eachfile in mzml_dict:
print "Somehow you gave me the same file twice.... Just a warning, but you might want to take a look at what's going on!"
else:
mzml_dict[eachfile]={}
#### Okay, now we'll need to read in the percolator experimental groups so that we
#### can build the input files for Luciphor2 appropriately! (that means get the groups right...)
if options.exp_group_file is None:
print "I need to know how to group the experiments to properly build my ptmRS input files... Gimmie the file!"
sys.exit(2)
filter = 1.0 #This allows everything as the default!
if options.filter is not None:
filter=options.filter
# I'm lazy, so let's just read the file into a dataframe.
#print options.exp_group_file,type(options.exp_group_file)
#with open(options.exp_group_file,'r') as expfile:
group_information = pandas.read_csv(options.exp_group_file,sep='\t')
#print group_information,"this was group info...."
#print type(group_information)
run_dict={}
#And then for easy access, we'll make a dict of it.
for index,row in group_information.iterrows():
run_dict[row['Crux File Integer']]=row
#for each in run_dict.keys():
# print type(each)
infiles = []
for root, subFolders, files in os.walk(options.operation_folder):
for eachfile in files:
if 'target.psms.txt' in eachfile:
infiles.append(str(os.path.join(root,eachfile)))
dataframe_vector=[]
for eachfile in infiles:
newdf=pandas.read_csv(eachfile,sep='\t',index_col=False)
dataframe_vector.append(newdf)
del newdf
combined_results=pandas.concat(dataframe_vector)
#del dataframe_vector
#######Okay, so now we have all our percolator PSMs loaded into a
#######single pandas dataframe. We'll take that concatetnated dataframe
#######and then iterate over all the results and place in the file names
#######while constructing the input files.
theruns=[]
#for each_run in group_information['Original File Name']:
# thisrun=each_run+".mzML"
# theruns.append(thisrun)
combined_results['file_idx']=combined_results['file_idx'].astype(str)
combined_results['file']=combined_results['file'].astype(str)
combined_results['percolator q-value']=combined_results['percolator q-value'].astype(float)
#for each_run in group_information['Original File Name']:
# thisrun=str(each_run)+".mzML"
new_results=[]
for each_idx in group_information['Crux File Integer']:
#print type(each_idx),type(combined_results['file_idx'])
mask = combined_results[(combined_results.file_idx == str(each_idx))]
mask['file']=run_dict[int(each_idx)]['Original File Name']+".mzML"
new_results.append(mask)
#print run_dict[int(each_idx)]['Original File Name']+".mzML"
combined_results=pandas.concat(new_results)
output_results=pandas.concat(new_results)
####### Okay, now we've taken care of file names, let's just go ahead and build us some luciphor inputs!
luci_input_writers={}
luci_input_files=[] #This is just a list of the files we'll have to run luciphor on!
ptmrs_config_writers={}
if options.xml_file:
xml_locations=[]
for eachgroup in set(group_information['Fractionation Group ID String']):
mywriter=open(options.operation_folder+eachgroup+".pin_out/crux-output/"+eachgroup+"_rsmax.txt",'w')
if not options.xml_file:
ptmrswriter=open(options.operation_folder+eachgroup+".pin_out/crux-output/"+eachgroup+"_ptmRS_config.xml",'w')
ptmrs_config_writers[eachgroup]=ptmrswriter
else:
xml_locations.append(options.operation_folder+eachgroup+".pin_out/crux-output/"+eachgroup+"_ptmRS_config.xml")
luci_input_files.append(options.operation_folder+eachgroup+".pin_out/crux-output/"+eachgroup+"_rsmax.txt")
#mywriter.write("specId\tpeptide\tmodPosition\tmodMap\tactivationType\tcharge\tpeak list\tprecursormz\n")
luci_input_writers[eachgroup]=mywriter
run_to_group={}
for index,row in group_information.iterrows(): #['Original File Name']:
thisrun = row['Original File Name']+".mzML"
thisgroup = row['Fractionation Group ID String']
run_to_group[thisrun]=thisgroup
target_masses = []
non_decimal = re.compile(r'[^\d.]+')
for thismod in options.scored.split("___"):
for eachmod in thismod.split(",")[0]:
if eachmod is not "":
try:
mod_mass=float(eachmod)
except:
mod_mass=round(mass.calculate_mass(formula=eachmod),6)
target_masses.append(mod_mass)
#target_masses.append(non_decimal.sub('',eachmod))
target_masses=list(set(target_masses))
#modId,ScoredmodMassDouble,NLmassDouble,ScoredModificationName,ScoredShortModName,ScoredModSiteAAs,ScoredElementalModificationStr
#We'll take in the info about the target mod...
target_masses_list=[]
mods_by_mass={}
terminal_aa_re=re.compile(r'([\D]+$)')
for each_mod in options.scored.split("___"):
this_mod_split=each_mod.split(",")
if len(this_mod_split)<4:
continue #We'll just skip it... Wrong formatting.
newMod=modification.Modification()
if len(this_mod_split)>4 and len(this_mod_split[5])>0:
#print this_mod_split[5],"THIS IS THE NL STRING...<--------------"
NL_mass=-1.0*float(non_decimal.sub('',this_mod_split[5]))
NL_AAs=terminal_aa_re.search(this_mod_split[5]).groups(0)[0]
#print NL_AAs,"These are the NL AAs... and all of them"
#print "raw string...",this_mod_split[5]
NL_string=str(NL_AAs)+" "+str(NL_mass)
else:
NL_string="-"
try:
mod_mass=float(this_mod_split[0])
except:
mod_mass=round(mass.calculate_mass(formula=this_mod_split[0]),6)
pass #Handle conversion of elemental composition to mass... pyteomics?
print this_mod_split
if str(mod_mass) not in mods_by_mass:
newMod.create(this_mod_split[4],this_mod_split[1]+" "+str(mod_mass),NL_string)#nameString,modString,NLstring
mods_by_mass[str(mod_mass)]=newMod
target_masses_list.append(str(newMod.getMass()))
modification_list.append(newMod)
else:
update_me=mods_by_mass[str(mod_mass)]
mass=update_me.getMass()
aas=update_me.getAAs()
NLs=update_me.getNL()
#print NLs,"these are NLs..."
for each_aa in this_mod_split[1]:
if each_aa not in aas:
aas.append(each_aa)
update_me.setAAs(aas)
if len(this_mod_split)>4 and len(this_mod_split[5])>0 and this_mod_split[5]!="-":
NL_mass=-1.0*float(non_decimal.sub('',this_mod_split[5]))
NL_AAs=terminal_aa_re.search(this_mod_split[5]).groups(0)[0]
for each_AA in NL_AAs:
if each_AA not in NLs:
NLs[each_AA]=NL_mass
update_me.setNL(NLs)
#print aas, mass,NLs,"after merge..."
#pass#Update this with more AAs, etc
#target_mod_names_split = [x for x in options.target_mod_names.split(",")] # Name Strings
#target_mod_split = [x for x in options.target_mods.split(",")] # Taret Mod String
#target_mod_NL_split = [x for x in options.neutral_loss.split(",")] # NL Strings
##target_mod_DI_split = [x for x in options.diagnostic_ions.split(",")] # DIstring "diagnostic_ions"
##zipped_target_mods = zip(target_mod_names_split,target_mod_split,target_mod_NL_split)#,target_mod_DI_split)
print "We'll score ",str(len(modification_list)),"mods (ptmRS)"
for each_mod in modification_list:
print each_mod.mass
print each_mod.AAs
print "------------"
#target_masses_list=[]
#for each_mod in zipped_target_mods:
# newMod=modification.Modification()
# newMod.create(each_mod[0],each_mod[1],each_mod[2])#,each_mod[3])
# target_masses_list.append(str(newMod.getMass()))
# modification_list.append(newMod)
# #modification_list_scored.append(newMod)
##print modification_list
##sys.exit(2)
#target_mod_split = [x.split() for x in options.target_mods.split(",")] ###############################################<-~~~~~~~~~~~~~~~~~~~~~~~
#target_mod_AA = target_mod_split[0]
#target_mod_mass = target_mod_split[1]
#target_mod_elements = target_mod_split[2]
#print options.neutral_loss,type(options.neutral_loss),"yo"
#if not options.neutral_loss is "":
# target_mod_NL_split = [x.split() for x in options.neutral_loss.split(",")]
#else:
# target_mod_NL_split = ["A 0.00 H".split()] #We're not using a NL...
#target_mod_NL_AA = target_mod_NL_split[0]
#target_mod_NL_mass = str(math.fabs(float(target_mod_NL_split[1])))
#target_mod_NL_elements = target_mod_NL_split[2]
########### THIS IS WHERE YOU LEFT OFF ON 4/7/2017 --- CONTINUE ON WITH ADAPTING THE UNSCORED MODS TO THE NEW SYSTEM! YOU'RE ALMOST THERE.
unscored_mod_dict={}#key is mass(as str), value is AAs (as str)
if options.unscored!="None":
unscored_mod_split = options.unscored.split("___")
for each_mod in unscored_mod_split:
this_mod_split=each_mod.split(",")
print this_mod_split
if len(this_mod_split)<4:
continue
try:
mod_mass=float(this_mod_split[0])
except:
mod_mass=round(mass.calculate_mass(formula=this_mod_split[0]),6)
mod_aas=this_mod_split[1]
unscored_mod_dict[mod_mass]=mod_aas
#modId,ScoredmodMassDouble,NLmassDouble,ScoredModificationName,ScoredShortModName,ScoredModSiteAAs,ScoredElementalModificationStr
#We're going to make a reference dictionary to convert the mass (as str) to the modid... which we'll go ahead and assign now!
id_itr=1
modID_targets=[] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ptmRS this will hold a list of the score target mods...
global modMass_targets
modMass_targets=[]
massStr_to_modID={}
for each_mod in modification_list:
massStr_to_modID[str(each_mod.getMass())]=str(id_itr)
modID_targets.append(str(id_itr))
modMass_targets.append(str(each_mod.getMass()))
each_mod.setID(id_itr)
id_itr+=1
#print modMass_targets
#print massStr_to_modID
#for each_mod in modification_list:
# print each_mod,each_mod.getID()
#sys.exit(2)
target_mods_less_than_this=id_itr
#Now, we'll go ahead and iterate over all the other mods...
#id_itr=2
for each_mod in unscored_mod_dict:
massStr_to_modID[str(each_mod)]=str(id_itr)
id_itr+=1
unscored_mod_str=""
for each_mod in unscored_mod_dict:
unscored_mod_str+=massStr_to_modID[str(each_mod)]+","+str(each_mod)+","+"0.0"+","+"unscored"+massStr_to_modID[str(each_mod)]+","+"mod"+massStr_to_modID[str(each_mod)]+","+unscored_mod_dict[each_mod]
for eachrun in run_to_group:
#print eachrun,"we're dealing with this run..."
##thislink=os.readlink(eachrun)
##shutil.copy(thislink,options.operation_folder+run_to_group[eachrun]+".pin_out/crux-output/"+eachrun)
shutil.copy(eachrun,options.operation_folder+run_to_group[eachrun]+".pin_out/crux-output/"+eachrun)
#global target_mass_conversion
#target_mass_conversion={}
#target_mass_conversion[str(round(float(target_mod_mass),2))]=str(target_mod_mass)
#We're also going to actually put the rest of our mod masses in there, too...
#for eachmod in options.variable_mods.split(","):
# mod_aa=eachmod.split()[0]
# mod_mass=eachmod.split()[1]
# target_mass_conversion[str(mod_mass)]=str(mod_mass)
#if options.variable_mods is not None:
# for each in unscored_mod_dict.keys():
# if each is not "":
# target_mass_conversion[str(non_decimal.sub('',each))]=str(non_decimal.sub('',each))
#rounded_target_masses=str(target_mod_mass)
#print "These are the rounded targets...",rounded_target_masses
global modmap_per_run
modmap_per_run={}
#print target_mass_conversion,"this is target mass conv."
regex = re.compile('[^a-zA-Z]')
for each_idx in group_information['Crux File Integer']:
file = MzMLFile()
experiment = MSExperiment()
print "loading file "
print run_dict[int(each_idx)]['Original File Name']+".mzML"#run_dict[int(each_idx)]
print ""
file.load(os.path.join(basedir,run_dict[int(each_idx)]['Original File Name']+".mzML"), experiment)
if not run_dict[int(each_idx)]['Original File Name']+".mzML" in modmap_per_run:
modmap_per_run[run_dict[int(each_idx)]['Original File Name']+".mzML"]={}
#That's temporary storage for pyopenms stuff...
##for spectrum in experiment:
## if spectrum.getMSLevel() == 2:
## precursor = spectrum.getPrecursors()
## precursorMZ = precursor[0].getMZ()
## precursorCharge = precursor[0].getCharge()
## precursorUnchargedMass = precursor[0].getUnchargedMass()
## #print precursorUnchargedMass
## #pause = raw_input("Pause")
## #precursorMZ=precursor[0][0]
## spec_array = spectrum.get_peaks()
## instrumentSettings = spectrum.getInstrumentSettings()
## acquisitionInfo = spectrum.getAcquisitionInfo()
## nativeID = spectrum.getNativeID()
mask = combined_results[(combined_results['file_idx'] == str(each_idx))]
#print '|'.join(target_masses),"This is target masses...."
mask2=mask[(mask['sequence'].str.contains('|'.join(target_masses_list)))] #numpy.any(mods in mask.sequence for mods in target_masses)]
mask3=mask2[(mask2['percolator q-value'] <= filter)]
#print mask3,"THIS WAS THE FILTERED FINAL MASK FOR THIS RUN..."
#sorted_mask2=mask2.sort_values('scan',ascending=True)
#mask=mask.mask(applyme)
#print mask,"THIS IS MASK"
#mask2 = mask[mask.sequence.contains('|'.join(target_masses))]
#print mask2,"this is mask2<------------------"
### OKAY, NOW YOU'LL NEED TO WRITE THE OUTPUT FOR THIS FILE
print "Generating input files...."
for index,eachrow in tqdm(mask3.iterrows()):
thispep=eachrow['sequence']
fullseqlen=len(thispep)
unmodpeplen=len(regex.sub('',thispep))
newpep=[]
adjustment=0
actualindex=0
nummodsadded=0
#print "---------------------------"
#print "Working on :",thispep
################ So we're going to construct the ModMap for this PSM.
#We'll iterate over the peptide sequence, reading the modifications and making a modmap of the PhosphoRS standard format.
# 0.000100200100.0
modmap="0."
target_modsites=[]
i=0
while i < len(thispep):
if thispep[i].isalpha():
modmap+="0"
#newpep.append(thispep[i])
actualindex+=1
elif thispep[i]=="[":
#Now we know we're in a mod.
mod_start_pos=i
mod_end_pos=i#changed these from actualindex...
actualindex-=1 #Because we're seeing a mod for the AA right before this.
i-=1
while thispep[mod_end_pos]!="]":
mod_end_pos+=1
mod=thispep[mod_start_pos:mod_end_pos+1]
#print "THIS IS MOD",mod
diff=mod_end_pos+1-mod_start_pos
#i=i-diff#correction for the peptide losing stuff
#print thispep, "BEFORE"
thispep=thispep[:mod_start_pos]+thispep[mod_end_pos+1:]
#print thispep, "AFTER"
modmap=modmap[:-1]#We'll remove the last 0 of modmap to place in the correct modificaiton info...
#print massStr_to_modID,"this is the dictionary of interest..."
modmap+=massStr_to_modID[mod[2:-1]]
if massStr_to_modID[mod[2:-1]] in modID_targets:
target_modsites.append(str(massStr_to_modID[mod[2:-1]])+":"+str(len(modmap)-3))
#target_modsites.append(str(actualindex))
i+=1
actualindex+=1 # THIS WAS ADDED 12/15/2015 TO CORRECT FOR INDEX SHIFTING
modmap+=".0"
modmap_per_run[run_dict[int(each_idx)]['Original File Name']+".mzML"][eachrow['scan']]=modmap
#print modmap,"This is modmap!"
mod_positions=",".join(target_modsites)
specID=eachrow['file'].replace('.mzML','')+"."+str(eachrow['scan'])+'.'+str(eachrow['scan'])+'.'+str(eachrow['charge'])
#print specID,"this is specID"
spec_peaks=experiment[eachrow['scan']-1].get_peaks()#THIS WILL HAVE TO BE FORMATTED CORRECTLY....
formatted_spec_peaks=""
#for eachpeak in spec_peaks:
# formatted_spec_peaks+=str(round(eachpeak[0],6))+":"+str(round(eachpeak[1],6))+"," #6 decimal places of float accuracy...
for i in xrange(len(spec_peaks[0])):
formatted_spec_peaks+=str(round(spec_peaks[0][i],6))+":"+str(round(spec_peaks[1][i],6))+"," #6 decimal places of float accuracy...
formatted_spec_peaks=formatted_spec_peaks[:-1]
charge=eachrow['charge']
activation_method="HCD"#by default
activation_type=experiment[eachrow['scan']-1].getPrecursors()[0].getActivationMethods()
precursorMZ=experiment[eachrow['scan']-1].getPrecursors()[0].getMZ()
if 0 in activation_type:
activation_method="CID"
elif 5 in activation_type:
activation_method="ECD"
elif 8 in activation_type:
activation_method="HCD"
elif 11 in activation_type:
activation_method="ETD"
elif 13 in activation_type: ################################################################ WHAT IS THE ETHCD ACTIVATION TYPE INT?
activation_method="EThcD"
else:
print "I DON\'T KNOW WHAT THE ACTIVATION METHOD IS.... SO WE\'LL CALL IT HCD...."
#print "---------------------------"
#FOR PHSOPHORS, thispep = peptide sequence, cleaned
# modmap = modmap
# mod_positions = positions of modifications "14,18"
#mywriter.write("specId\tpeptide\tmodPosition\tmodMap\tactivationType\tcharge\tpeak list\tprecursormz\n")
luci_input_writers[run_to_group[eachrow['file']]].write(specID+'\t'+thispep+'\t'+mod_positions+'\t'+modmap+'\t'+activation_method+'\t'+str(eachrow['charge'])+'\t'+formatted_spec_peaks+'\t'+str(precursorMZ)+'\n')################### ACTIVATION TYPE, PEAKS, PRECURSORMZ
#luci_input_writers[run_to_group[eachrow['file']]].write(str(eachrow['file'])+'\t'+str(eachrow['scan'])+'\t'+str(eachrow['charge'])+'\t'+str(eachrow['percolator q-value'])+'\t'+regex.sub('',eachrow['sequence'])+'\t'+modstr+"\n")
del file
del experiment
gc.collect()
#mask = combined_results
os.chdir(basedir)
for eachwriter in luci_input_writers:
luci_input_writers[eachwriter].close()
if options.score_nl:
score_neutrals="true"
else:
score_neutrals="false"
processes=[]
#RSmax.exe /path/to/inputs/ scoreNLBoolean massToleranceDouble modId,ScoredmodMassDouble,NLmassDouble,ScoredModificationName,ScoredShortModName,ScoredModSiteAAs,ScoredElementalModificationStr
#modId,ScoredmodMassDouble,NLmassDouble,NL_aa,ScoredModificationName,ScoredShortModName,ScoredModSiteAAs
target_modstr=""
for eachMod in modification_list:
NL_aas=""
NL_mass=""
AAstr=""
for eachNL in eachMod.getNL():
NL_aas+=eachNL
NL_mass=str(eachMod.getNL()[eachNL])
for eachAA in eachMod.getAAs():
AAstr+=eachAA
target_modstr+=eachMod.getID()+","+str(eachMod.getMass())+","+NL_mass+","+NL_aas+","+eachMod.getName()+","+eachMod.getName()+","+AAstr+" "
#print target_modstr
#sys.exit(2)
#target_modstr=massStr_to_modID[str(target_mod_mass)]+","+str(target_mod_mass)+","+target_mod_NL_mass+","+"targetMod"+","+"targ"+","+target_mod_AA+","+target_mod_elements
####COME BACK
if not options.xml_file:
for eachwriter in ptmrs_config_writers:
thiswriter=ptmrs_config_writers[eachwriter]
thiswriter.write("<AnyPTM>\n")
for eachMod in modification_list:
thiswriter.write("<modification name=\"{0}\" abbreviation=\"{1}\" searchdefined=\"TRUE\" mass=\"{2}\" unimodId=\"{3}\" >\n".format(eachMod.getName(),eachMod.getName(),str(eachMod.getMass()),eachMod.getID()))
for eachAA in eachMod.getAAs():
thiswriter.write("<target aminoacid=\"{0}\" />\n".format(eachAA))
#for eachNL in eachMod.getNL(): #float is value, key is AA
nl_dict=eachMod.getNL()
#print nl_dict
if isinstance(nl_dict,dict):
#print "i'm in!"
if len(nl_dict.keys())>0:
theAAs=nl_dict.keys()
for eachAA in theAAs:
nl_mass=nl_dict[eachAA]
if nl_mass > 0.0:
thiswriter.write("<neutralloss abbreviation=\"{0}\" mass=\"{1}\">\n".format(eachMod.getName()+"_nl_"+eachAA,nl_mass))
#for eachAA in theAAs:
thiswriter.write("<target aminoacid=\"{0}\" />\n".format(eachAA))
#thiswriter.write("<target aminoacid=\"{0}\" factor=\"1\"/>\n".format(eachAA))
#thiswriter.write("<target aminoacid=\"{0}\" factor=\"2\"/>\n".format(eachAA))
#thiswriter.write("<target aminoacid=\"{0}\" factor=\"3\"/>\n".format(eachAA))
thiswriter.write("</neutralloss>\n")
#for eachAA in nl_dict.keys():
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
thiswriter.write("<equivalentmodification name=\"{0}\" factor=\"1\" new=\"FALSE\"/>\n".format(eachMod.getName()))
thiswriter.write("</modification>\n")
thiswriter.write("<FragmentIonCompositionPreference>\n<FragmentIonComposition ActivationType=\"CID\" FragmentIonComposition=\"b,y\" NeutralLossFragmentIonComposition=\"\"/>\n<FragmentIonComposition ActivationType=\"HCD\" FragmentIonComposition=\"b,y\" NeutralLossFragmentIonComposition=\"b,y\"/>\n<FragmentIonComposition ActivationType=\"ETHcD\" FragmentIonComposition=\"b,y,c,zPrime,zRadical\" NeutralLossFragmentIonComposition=\"b,y\"/>\n<FragmentIonComposition ActivationType=\"ETD\" FragmentIonComposition=\"c,zPrime,zRadical\" NeutralLossFragmentIonComposition=\"\"/>\n</FragmentIonCompositionPreference>\n")
thiswriter.write("</AnyPTM>\n")
for eachwriter in ptmrs_config_writers:
ptmrs_config_writers[eachwriter].close()
else:
for each in xml_locations:
shutil.copy(options.xml_file,each)
with open(os.devnull, "w") as fnull:
for eachgroup in tqdm(set(group_information['Fractionation Group ID String'])):
os.chdir(basedir)
os.chdir(options.operation_folder+eachgroup+".pin_out/crux-output/")
#processes.append(subprocess.Popen(command,shell=True)
command = "mono /galaxy-central/tools/wohl-proteomics/ptmRSmax/ptmRSmax.exe "+os.getcwd()+" "+eachgroup+" "+str(options.num_threads)+" "+str(options.max_permutations)+" "+score_neutrals+" "+str(options.ms2tol)+" "+str(target_mods_less_than_this-1)+" "+target_modstr+" "+unscored_mod_str
#command = "mono /home/galaxy/wohl-proteomics-backups/2016-07-25/wohl-proteomics/ptmRSmax/ptmRSmax.exe "+os.getcwd()+" "+eachgroup+" "+str(options.num_threads)+" "+str(options.max_permutations)+" "+score_neutrals+" "+str(options.ms2tol)+" "+str(target_mods_less_than_this-1)+" "+target_modstr+" "+unscored_mod_str
print "running command... ",command
#processes.append(subprocess.Popen(command.split()))
#proc=subprocess.Popen(command.split())
#proc.wait()
############subprocess.call(command.split(), stderr=sys.stdout.fileno()) #### SEEMED TO WORK ONCE UP ON A TIME....
#processes.append(subprocess.Popen(command.split(),stdout=fnull, stderr=fnull)) # These pipes will hide the output from luciphor... this is only for DEBUG DEBUG DEBUG purposes...
#out, err = p.communicate()
p=os.system(command)
#print "Luciphor exited with a status of ",str(os.system(command))
time.sleep(1)
print "Finished scoring this command...!"
time.sleep(1)
#for each_proc in processes:
# each_proc.wait()
###### INPUT-FILE CLEANUP ######
#This gets rid of the mzML files...
print "Beginning file cleanup..."
os.chdir(basedir)
for eachrun in run_to_group:
os.remove(options.operation_folder+run_to_group[eachrun]+".pin_out/crux-output/"+eachrun)
### #thislink = os.readlink(eachrun)
### os.unlink(options.operation_folder+run_to_group[eachrun]+".pin_out/crux-output/"+eachrun)
##### READ IN OUTPUT FILES #####
output_results['file_idx']=output_results['file_idx'].astype(str)
output_results['file']=output_results['file'].astype(str)
#def extractScanNum(x):
# return str(x['specId'].split(".")[1])
#def extractFileName(x):
# return str(x['specId'].split(".")[0])
group_to_results={}
print "reading results in..."
for eachgroup in tqdm(set(group_information['Fractionation Group ID String'])):
os.chdir(basedir)
os.chdir(options.operation_folder+eachgroup+".pin_out/crux-output/")
try:
rsmax_result=pandas.read_csv("output_"+str(eachgroup)+"_rsmax.txt",sep='\t')
#scan_extracted=rsmax_result['specId'].split(".")[1]
#scan_extracted=rsmax_result.apply(extractScanNum,axis=1)
#fname_extracted=rsmax_result.apply(extractFileName,axis=1)
rsmax_result['scan']=rsmax_result['scan'].astype(int)
#rsmax_result.assign(scan = lambda x: x['specId'].split(".")[1])
#for each,row in rsmax_result.iterrows():
# print each,row
#sys.exit(2)###################################################
group_to_results[eachgroup]=rsmax_result
except:
print "WARNING ::::: Couldn't find results for ",str(eachgroup)
group_to_runids={}
group_to_files={}
for index,row in group_information.iterrows():
if row['Fractionation Group ID String'] in group_to_runids:
group_to_runids[row['Fractionation Group ID String']].append(str(row['Crux File Integer']))
else:
#print row,"THIS IS ROW!2"
group_to_runids[row['Fractionation Group ID String']]=[str(row['Crux File Integer'])]
if row['Fractionation Group ID String'] in group_to_files:
group_to_files[row['Fractionation Group ID String']].append(str(row['Original File Name']))
else:
#print row,"THIS IS ROW!2"
group_to_files[row['Fractionation Group ID String']]=[str(row['Original File Name'])]
##### We'll read in the raw inputs, and then append extra PhosphoRS information to them! #####
groups_output={}
for each_group in group_to_files:
groups_output[each_group]=output_results[(output_results['file'].str.contains('|'.join(group_to_files[each_group])))]
def replaceMods(input):
modDict=target_mass_conversion
#print "input",input
output=input
#print modDict,"This is modDict!"
for eachmod in modDict:
if str(eachmod) in output and str(modDict[eachmod]) not in output:
if float(eachmod)>0.0:
output=output.replace(str(eachmod),"+"+str(modDict[eachmod]))
else:
output=output.replace(str(eachmod),str(modDict[eachmod]))
#print "output",output
return output
##### We'll use this dict to convert fileidx to file names
fileidx_to_file={}
for index,row in group_information.iterrows():
thisrun = row['Original File Name']
thisidx = str(row['Crux File Integer'])
fileidx_to_file[thisidx]=thisrun
#modsite_to_modmass={}
#modsite_to_modmass_fixed={}
#modsite_to_modmass_variable={}
#print options.target_mods,"target mods!"
#print options.variable_mods,"unscored mods!"
#for eachmod in options.target_mods.split():
# modsite_to_modmass[eachmod[:1]]=non_decimal.sub('',eachmod)
#tmod_aa=options.target_mods.split()[0]
#tmod_mass=options.target_mods.split()[1]
#for each_aa in tmod_aa:
# modsite_to_modmass[each_aa]=tmod_mass
#for eachmod in options.variable_mods.split(","):
# mod_aa=eachmod.split()[0]
# mod_mass=eachmod.split()[1]
# for each_aa in mod_aa:
# modsite_to_modmass[each_aa]=mod_mass
#print "This is the modsite dict!",modsite_to_modmass
#for eachmod in options.fixed_mods.split(","):
# modsite_to_modmass_fixed[eachmod[:1]]=non_decimal.sub('',eachmod)
##### unscored_mod_dict={}#key is mass(as str), value is AAs (as str) #This exists here in the pRS implementation!
#for eachmod in options.variable_mods.split(","): ###### NB: At the moment, I'm not sure how LuciPhor handles these...
# modsite_to_modmass_variable[eachmod[:1]]=non_decimal.sub('',eachmod)###### So I'll make this for later, but I don't know what to do with it... yet...
#####
#####
##### We'll have to cram the phosphoRS results into the appropriate files... and modify the outputs!
modified_output={}
#### DEBUG LOGGING:
#logwriter=open(options.operation_folder+"luci_log.txt",'wb')
##########################################NEWNEWNEWNEWNEWNEWNEWNEW
#def combineFileNameAndScanLuci(x):
# return str(x['specId'].split(".")[0]+".mzML."+x['specId'].split(".")[1])
def combineFileNameAndScanPerco(x):
return str(x['file']+"."+str(x['scan']))
newlist=[]
for eachgroup in groups_output:
newlist.append(groups_output[eachgroup])
combined_groups=pandas.concat(newlist)#This is the combined percolator results...
#combined_fname_scans=combined_groups['mzML']+"."+str(combined_groups['scan'])
print "Generating unique names..."
combined_fname_scans=combined_groups.apply(combineFileNameAndScanPerco,axis=1)
combined_groups['unique_name']=combined_fname_scans
del newlist
newlist=[]
for eachgroup in group_to_results:
try:
newlist.append(group_to_results[eachgroup])
except:
print "We didn't find a result file from ",eachgroup
combined_luci_groups=pandas.concat(newlist)#This is the combined results
#combined_fname_scans_luci=combined_luci_groups.apply(combineFileNameAndScanLuci,axis=1)
combined_fname_scans_luci=combined_luci_groups['mzML']+"."+combined_luci_groups['scan'].astype(str)
#print combined_fname_scans_luci
combined_luci_groups['unique_name']=combined_fname_scans_luci
print "column names are"
combined_luci_groups.rename(columns={"sequence":"unmodified sequence","sequenceProbability":"ptmRS_peptideLocalizationProbability","ptmRSscore":"ptmRS_score","all_numMods":"ptmRS_numMods"},inplace=True)
print combined_luci_groups.columns.values.tolist()
del newlist
combined_luci_groups=combined_luci_groups.drop('scan',1)
combined_luci_groups=combined_luci_groups.drop('mzML',1)
#These are global so that they will make their way into the joblib Parallel instances...
global numMod_list
global siteProbabilities_list
numMod_list=[]
siteProbabilities_list=[]
for each in combined_luci_groups.columns.values.tolist():
if "_numMods" in each:
numMod_list.append(each)
elif "_siteProbabilities" in each:
siteProbabilities_list.append(each)
print combined_luci_groups.columns,"These are the ptmRS group columns..."
merged_df=pandas.merge(combined_groups,combined_luci_groups, on="unique_name",sort=True,how="outer",indicator=False) #CHANGED BACK TO OUTER BECAUSE WE WERE THROWING AWAY UNMOD PEPTIDES! (3/2/16)
#merged_df=combined_groups.merge(combined_luci_groups, on="unique_name",sort=True,how="inner",indicator=False) #CHANGED TO INNER 2/22/2016
#print merged_df.columns.values.tolist(),"fwd merged cols"
#print merged_df.columns,"columns, too... jsut after..."
#firestick
#reverse_merged_df=pandas.merge(combined_luci_groups,combined_groups, on="unique_name",sort=True,how="outer",indicator=False)
#print reverse_merged_df.columns.values.tolist(),"rev merged cols"
merged_df['ptmRS_numMods'].fillna(0,inplace=True)
merged_df['ptmRS_numMods']=merged_df['ptmRS_numMods'].astype(int)
merged_df['ptmRS_sequence']=""
merged_df['ptmRS_numPPS']=0
#merged_df['numRPS'].fillna("0")
merged_df=merged_df.drop_duplicates(subset='unique_name')
#merge_numPPS_mask=merged_df[merged_df['numPPS']>=1]
#print target_mass_conversion,"this is the mass conv dict"
merged_df['ptmRS_sequence']=merged_df['sequence']
#filtered_frame=merged_df[merged_df['sequence'].str.contains(']')]
#print filtered_frame,"filtered......!!!!!!!!!!"
print "Replacing mod positions from ptmRS outputs... In Parallel!"
#################### THIS WHOLE LOOP COULD PROBABLY BE PARALLELIZED WITH A GROUPBY....
#print "length before...",str(len(merged_df.index))
grouped_df=merged_df.groupby(np.arange(len(merged_df))//cpu_count())
####print merged_df,type(merged_df)
####merged_df = merged_df.apply(fix_modification_parallel,axis=0)
#print merged_df.columns,"columns, too... BEFORE PARALLEL"
merged_df['ptmRS_totalNumPPS']=np.nan
merged_df=applyParallel(grouped_df,fix_modification_parallel)
print merged_df.columns,"columns, too... AFTER PARALLEL"
#print "length after...",str(len(merged_df.index))
#print merged_df
merged_df=merged_df.drop('unique_name',1)
def stripMods(x):
return re.sub("[\(\[].*?[\)\]]", "", x['sequence'])
#grouped_df2=merged_df.groupby(np.arange(len(merged_df))//cpu_count())
merged_df['unmodified sequence']=merged_df.apply(stripMods,axis=1)
#merged_df=applyParallel(grouped_df2,stripMods)
if "unmodified sequence_x" in merged_df.columns.tolist():
#merged_df['unmodified sequence']=merged_df['unmodified sequence_x']+merged_df['unmodified sequence_y']
merged_df=merged_df.drop('unmodified sequence_x',1)
merged_df=merged_df.drop('unmodified sequence_y',1)
#merged_df.rename(columns={"unmodified sequence_y":"unmodified sequence"},inplace=True)
#merged_df['unmodified sequence']=merged_df['sequence']
#print merged_df,"THIS IS THE MERGED DATAFRAME AFTER PROCESSING....."
print merged_df.columns,"columns, too... END"
for each_idx in set(merged_df['file_idx']):
modified_output[each_idx]=merged_df[merged_df['file_idx']==str(each_idx)]
###########################################################################
idx_to_group={}
for index,row in group_information.iterrows(): #['Original File Name']:
thisidx = str(row['Crux File Integer'])
thisgroup = row['Fractionation Group ID String']
idx_to_group[thisidx]=thisgroup
assembled_output={}
for eachkey in modified_output:
group=idx_to_group[eachkey]
if group in assembled_output:
assembled_output[group].append(modified_output[eachkey])
else:
assembled_output[group]=[modified_output[eachkey]]
#print assembled_output.keys(),"these are the keys"
###### Now we'll go ahead and write out each group to a file in the approp. location!
for eachgroup in assembled_output:
os.chdir(basedir)
os.chdir(options.operation_folder+eachgroup+".pin_out/crux-output/")
groupdf=pandas.concat(assembled_output[eachgroup])
shutil.move(eachgroup+".percolator.target.psms.txt",eachgroup+".percolator.target.psms_uncorrected.txt")
groupdf.to_csv(eachgroup+".percolator.target.psms.txt",sep="\t",index=False)
print "-----------------------------------------------------------------------"
|
mit
|
gracecox/MagPy
|
magpysv/tests/test_io.py
|
1
|
10486
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 21 13:26:22 2016
Testing the file io functionality of io.py.
@author: Grace Cox and Will Brown
"""
import unittest
import mock
from ddt import ddt, data, unpack
from io import StringIO # io
import os
from .. import io # magpysv.io
from pandas.util.testing import assert_frame_equal
import pandas as pd
import datetime as dt
import numpy as np
# Directory where the test files are located
TEST_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')
@ddt
class WDCParsefileTestCase(unittest.TestCase):
"""Set up for test case for parsing function (opens test datafiles)"""
@data({'filename': 'testdata1.wdc', 'code': 'NGK', 'component1': 'D',
'component2': 'Z'},
{'filename': 'testdata2.wdc', 'code': 'NGK', 'component1': 'D',
'component2': 'Z'},
{'filename': 'testdata3.wdc', 'code': 'PSM', 'component1': 'H',
'component2': 'D'})
@unpack
def test_wdc_parsefile_newformat(self, filename, code, component1,
component2):
""""Verify correct parsing of the test WDC format files"""
testfile = os.path.join(TEST_DATA_PATH, filename)
data = io.wdc_parsefile(testfile)
# Observatory code
self.assertEqual(data.code[0], code)
self.assertEqual(len(data.code.unique()), 1)
# Components
self.assertTrue(any(
x in {'X', 'Y', 'Z', 'D', 'I', 'H'} for x in data.component))
self.assertEqual(data.component[3], component1)
self.assertEqual(data.component.values[-1], component2)
class WDCDatetimesTestCase(unittest.TestCase):
"""Set up the test case for datetimes function"""
def setUp(self):
"""Create a dataframe containing mock input for datetime function"""
self.data = pd.DataFrame(
index=[0], columns=['century', 'yr', 'month', 'day', 'hour'])
self.data['century'] = 19
self.data['yr'] = 88
self.data['month'] = 9
self.data['day'] = 21
self.data['hour'] = 2
self.data['code'] = 'ESK'
def test_wdc_datetimes(self):
"""Verify correct datetime creation from mock data"""
df = io.wdc_datetimes(self.data)
self.assertTrue(isinstance(df.date[0], pd.datetime))
self.assertEqual(df.date[0], dt.datetime(day=21, month=9, year=1988,
hour=2, minute=30))
class HourlyMeanConversionTestCase(unittest.TestCase):
"""Set up the test case for hourly means conversion function"""
def setUp(self):
"""Create a dataframe containing mock input"""
self.data = pd.DataFrame(
index=[0, 1], columns=[
'date', 'component', 'base', 'hourly_mean_temp'])
self.data.component = ['I', 'X']
self.data.base = [53, 200]
self.data.hourly_mean_temp = [1200, 530]
def test_hourly_mean_conversion(self):
"""Verify correct conversion to hourly means from mock data"""
df = io.hourly_mean_conversion(self.data)
self.assertAlmostEqual(df.iloc[0].hourly_mean, 55)
self.assertAlmostEqual(df.iloc[1].hourly_mean, 20530)
class AnglesToGeographicTestCase(unittest.TestCase):
"""Set up the test case for conversion from D and H to X and Y"""
def setUp(self):
"""Create a dataframe containing mock input"""
self.data = pd.DataFrame(
index=[0, 1], columns=[
'date', 'component', 'daily_mean'])
self.data.component = ['H', 'D']
self.data.daily_mean = [20530, 55]
self.data.date = [dt.date(day=15, month=1, year=1963), dt.date(day=15,
month=1, year=1963)]
self.data = self.data.pivot(index='date', columns='component',
values='daily_mean')
def test_angles_to_geographic(self):
"""Verify correct conversion from mock data"""
df = io.angles_to_geographic(self.data)
self.assertAlmostEqual(df.iloc[0].X, 11775.524238286978)
self.assertAlmostEqual(df.iloc[0].Y, 16817.191469253001)
class WDCXYZTestCase(unittest.TestCase):
"""Set up the test case for conversion to X, Y and Z components"""
def setUp(self):
"""Create a dataframe containing mock input"""
self.data = pd.DataFrame(
index=[0, 1, 2, 3, 4, 5], columns=[
'date', 'component', 'base', 'hourly_mean_temp'])
self.data.component = ['H', 'D', 'X', 'Y', 'Z', 'X']
self.data.base = [200, 53, np.nan, np.nan, 300, 9999]
self.data.hourly_mean_temp = [530, 1200, np.nan, np.nan, 430, 9999]
self.data.date.iloc[0:5] = dt.date(day=15, month=1, year=1963)
self.data.date.iloc[5] = dt.date(day=20, month=1, year=1963)
def test_wdc_xyz(self):
"""Verify correct conversion to X, Y and Z components from mock data"""
df = io.wdc_xyz(self.data)
self.assertAlmostEqual(df.iloc[0].X, 11775.524238286978)
self.assertAlmostEqual(df.iloc[0].Y, 16817.191469253001)
self.assertAlmostEqual(df.iloc[0].Z, 30430.000000000000)
self.assertTrue(np.isnan(df.iloc[1].X))
def test_wdc_xyz_is_nan_if_Z_missing(self):
"""Ensure missing Z data are set to NaN"""
self.data = self.data[self.data.component != 'Z']
df = io.wdc_xyz(self.data)
self.assertTrue(np.isnan(df.iloc[1].Z))
def test_wdc_xyz_is_nan_if_DHXY_missing(self):
"""Ensure missing X, Y, D or H data are set to NaN"""
self.data = self.data[~(self.data.component.isin(['D', 'H', 'X',
'Y']))]
df = io.wdc_xyz(self.data)
self.assertTrue(np.isnan(df.iloc[0].X))
self.assertTrue(np.isnan(df.iloc[0].Y))
class WDCReadTestCase(unittest.TestCase):
"""Set up the test case for reading WDC files"""
def setUp(self):
"""Create a dataframe containing mock input and specify test file"""
self.data = pd.DataFrame(columns=['date', 'X', 'Y', 'Z'])
self.data.columns.name = 'component'
self.data.date = pd.date_range('1883-01-01 00:30:00', freq='H',
periods=5)
self.data.X = [np.nan, 18656.736886, 18657.537749, 18660.729592,
18658.976990]
self.data.Y = [np.nan, -5487.438180, -5491.801722, -5480.946278,
-5493.994785]
self.data.Z = [np.nan, np.nan, np.nan, np.nan, np.nan]
self.filename = os.path.join(TEST_DATA_PATH, 'testdata3.wdc')
def test_wdc_readfile(self):
"""Verify correct reading of test file by comparing with mock data"""
df = io.wdc_readfile(self.filename)
assert_frame_equal(df.head(), self.data)
class WDCAppendTestCase(unittest.TestCase):
"""Set up the test case for appending WDC data"""
def setUp(self):
"""Create a dataframe containing mock input and specify test file"""
# the type matches when this is a single value range...
self.value1 = [pd.date_range('1911-1-1 0:30', '1911-1-1 0:30')]
self.value2 = [45294.0]
self.dimensions = (1416, 4)
self.filename = 'testappenddata'
def test_append_wdc_data(self):
"""Verify correct append of test file by comparing with mock data"""
df = io.append_wdc_data(obs_name=self.filename, path=TEST_DATA_PATH)
self.assertEqual(self.dimensions, df.shape)
self.assertEqual(self.value1, df['date'].head(1).values)
self.assertAlmostEqual(self.value2, df['Z'].tail(1).values)
class WDCHourlyToCSVTestCase(unittest.TestCase):
"""
Mock the calls I don't want to actually make or are tested elsewhere, and
beware that patches are applied in _reverse_ order, because whoever wrote
mock is the devil
"""
def setUp(self):
"""Specify mock filenames and paths"""
self.wdc_path = 'data'
self.write_dir = './a-test-path/to-nowhere'
self.obs_list = ['ESK']
self.print_obs = True
self.wdc_data = pd.DataFrame()
@mock.patch('magpysv.io.write_csv_data')
@mock.patch('magpysv.io.append_wdc_data')
@mock.patch('os.makedirs')
@mock.patch('os.path.exists', return_value=False)
def test_wdc_to_hourly_csv_path_exists(self, mock_exists, mock_makedirs,
mock_append_wdc_data,
mock_write_csv_data):
"""Check is paths/files exist"""
io.wdc_to_hourly_csv(wdc_path=self.wdc_path,
write_dir=self.write_dir,
obs_list=self.obs_list,
print_obs=self.print_obs)
assert mock_makedirs.call_count == 1
mock_makedirs.assert_called_with(self.write_dir)
@mock.patch('magpysv.io.write_csv_data')
@mock.patch('magpysv.io.append_wdc_data')
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('os.path.exists', return_value=True)
def test_wdc_to_hourly_csv_call_print(self, mock_exists, mock_print,
mock_append_wdc_data,
mock_write_csv_data):
"""Check if print function called"""
io.wdc_to_hourly_csv(wdc_path=self.wdc_path,
write_dir=self.write_dir,
obs_list=self.obs_list,
print_obs=self.print_obs)
self.assertEqual(self.obs_list[0] + '\n', mock_print.getvalue())
@mock.patch('magpysv.io.write_csv_data')
@mock.patch('magpysv.io.append_wdc_data')
@mock.patch('os.path.exists', return_value=True)
def test_wdc_to_hourly_csv_call_write(self, mock_exists,
mock_append_wdc_data,
mock_write_csv_data):
"""Check if write function called"""
mock_append_wdc_data.return_value = self.wdc_data
io.wdc_to_hourly_csv(wdc_path=self.wdc_path,
write_dir=self.write_dir,
obs_list=self.obs_list,
print_obs=self.print_obs)
mock_write_csv_data.assert_called_with(data=self.wdc_data,
write_dir=self.write_dir,
obs_name=self.obs_list[0])
|
gpl-3.0
|
3DGenomes/tadbit
|
_pytadbit/modelling/impoptimizer.py
|
1
|
37038
|
"""
28 Aug 2013
"""
from pytadbit.modelling.imp_modelling import generate_3d_models
from pytadbit.utils.extraviews import plot_2d_optimization_result
from pytadbit.utils.extraviews import plot_3d_optimization_result
from pytadbit.modelling.structuralmodels import StructuralModels
from cPickle import dump, load
from sys import stderr
import itertools
import numpy as np
import multiprocessing as mu
class IMPoptimizer(object):
"""
This class optimizes a set of parameters (scale, kbending, maxdist, lowfreq, and
upfreq) in order to maximize the correlation between the contact matrix computed on
the generted models (generated by IMP, or lammps) and the input contact map.
:param experiment: an instance of the class pytadbit.experiment.Experiment
:param start: first bin to model (bin number, inclusive and starting at 1)
:param end: last bin to model (bin number)
:param 5000 n_models: number of models to generate
:param 1000 n_keep: number of models to use in the final analysis (usually
the top 20% of the generated models). The models are ranked according to
their objective function value (the lower the better)
:param 1 close_bins: number of particles away (i.e. the bin number difference) a
particle pair must be in order to be considered as neighbors (e.g. 1 means
nearest neighbors particles)
:param None cutoff: distance cutoff (nm) to define whether two particles
are in contact or not in the models, default is 2.0 * resolution * scale.
:param None container: restrains particle to be within a given object.
NOTE: The container can only be a 'cylinder' of a given height closed
by hemispheres. This cylinder is defined by its radius, its height (if
height=0 the container is a sphere), and the strength (k) of the harmonic
force applied in the restraint. For example, to model the confinement
in E. coli (2 micrometers of length, and 0.5 micrometer of width),
container = ['cylinder', 250, 1500, 50] should be used, and
in a typical spherical mammalian nuclei (about 6 micrometers of diameter),
container = ['cylinder', 3000, 0, 50]
"""
def __init__(self, experiment, start, end, n_models=500,
n_keep=100, close_bins=1, container=None,
single_particle_restraints=None):
(self.zscores,
self.values, zeros) = experiment._sub_experiment_zscore(start, end)
self.resolution = experiment.resolution
self.zeros = tuple([i not in zeros for i in xrange(end - start + 1)])
self.nloci = end - start + 1
if not self.nloci == len(self.zeros):
raise Exception('ERROR: in optimization, bad number of particles\n')
self.n_models = n_models
self.n_keep = n_keep
self.close_bins = close_bins
self.chromosomes = None
if experiment.hic_data and experiment.hic_data[0].chromosomes:
self.chromosomes = experiment.hic_data[0].chromosomes
self.coords = []
tot = 0
chrs = []
chrom_offset_start = 1
chrom_offset_end = 0
for k, v in experiment.hic_data[0].chromosomes.iteritems():
tot += v
if start > tot:
chrom_offset_start = start - tot
if end <= tot:
chrom_offset_end = tot - end
chrs.append(k)
break
if start < tot and end >= tot:
chrs.append(k)
for k in chrs:
self.coords.append({'crm' : k,
'start': 1,
'end' : experiment.hic_data[0].chromosomes[k]})
self.coords[0]['start'] = chrom_offset_start
self.coords[-1]['end'] -= chrom_offset_end
else:
self.coords = {'crm' : experiment.crm.name,
'start': start,
'end' : end}
self.single_particle_restraints = single_particle_restraints
# For clarity, the order in which the optimized parameters are managed should be
# always the same: scale, kbending, maxdist, lowfreq, upfreq
self.scale_range = []
self.kbending_range = []
self.maxdist_range = []
self.lowfreq_range = []
self.upfreq_range = []
self.dcutoff_range = []
self.container = container
self.results = {}
def run_grid_search(self,
scale_range=0.01,
kbending_range=0.0, # TODO: Choose values of kbending that should be explored by default!!!
maxdist_range=(400, 1500, 100),
lowfreq_range=(-1, 0, 0.1),
upfreq_range=(0, 1, 0.1),
dcutoff_range=2,
corr='spearman', off_diag=1,
savedata=None, n_cpus=1, verbose=True,
use_HiC=True, use_confining_environment=True,
use_excluded_volume=True):
"""
This function calculates the correlation between the models generated
by IMP and the input data for the four main IMP parameters (scale,
kbending, maxdist, lowfreq and upfreq) in the given ranges of values.
The range can be expressed as a list.
:param n_cpus: number of CPUs to use
:param 0.01 scale_range: upper and lower bounds used to search for
the optimal scale parameter (unit nm per nucleotide). The last value of
the input tuple is the incremental step for scale parameter values
:param (0,2.0,0.5) kbending_range: values of the bending rigidity
strength to enforce in the models
:param (400,1400,100) maxdist_range: upper and lower bounds
used to search for the optimal maximum experimental distance.
The last value of the input tuple is the incremental step for maxdist
values
:param (-1,0,0.1) lowfreq_range: range of lowfreq values to be
optimized. The last value of the input tuple is the incremental
step for the lowfreq values. To be precise "freq" refers to the
Z-score.
:param (0,1,0.1) upfreq_range: range of upfreq values to be optimized.
The last value of the input tuple is the incremental step for the
upfreq values. To be precise "freq" refers to the Z-score.
:param 2 dcutoff_range: upper and lower bounds used to search for
the optimal distance cutoff parameter (distance, in number of beads,
from which to consider 2 beads as being close). The last value of the
input tuple is the incremental step for scale parameter values.
:param None savedata: concatenate all generated models into a dictionary
and save it into a file named by this argument
:param True verbose: print the results to the standard output
"""
if verbose:
stderr.write('Optimizing %s particles\n' % self.nloci)
# These commands transform the ranges defined in input as tuples
# in list of values to use in the grid search of the best parameters
# scale
if isinstance(scale_range, tuple):
scale_step = scale_range[2]
scale_arange = np.arange(scale_range[0],
scale_range[1] + scale_step / 2,
scale_step)
else:
if isinstance(scale_range, (float, int)):
scale_range = [scale_range]
scale_arange = scale_range
# kbending
if isinstance(kbending_range, tuple):
kbending_step = kbending_range[2]
kbending_arange = np.arange(kbending_range[0],
kbending_range[1] + kbending_step / 2,
kbending_step)
else:
if isinstance(kbending_range, (float, int)):
kbending_range = [kbending_range]
kbending_arange = kbending_range
# maxdist
if isinstance(maxdist_range, tuple):
maxdist_step = maxdist_range[2]
maxdist_arange = np.arange(maxdist_range[0],
maxdist_range[1] + maxdist_step,
maxdist_step)
else:
if isinstance(maxdist_range, (float, int)):
maxdist_range = [maxdist_range]
maxdist_arange = maxdist_range
# lowfreq
if isinstance(lowfreq_range, tuple):
lowfreq_step = lowfreq_range[2]
lowfreq_arange = np.arange(lowfreq_range[0],
lowfreq_range[1] + lowfreq_step / 2,
lowfreq_step)
else:
if isinstance(lowfreq_range, (float, int)):
lowfreq_range = [lowfreq_range]
lowfreq_arange = lowfreq_range
# upfreq
if isinstance(upfreq_range, tuple):
upfreq_step = upfreq_range[2]
upfreq_arange = np.arange(upfreq_range[0],
upfreq_range[1] + upfreq_step / 2,
upfreq_step)
else:
if isinstance(upfreq_range, (float, int)):
upfreq_range = [upfreq_range]
upfreq_arange = upfreq_range
# dcutoff
if isinstance(dcutoff_range, tuple):
dcutoff_step = dcutoff_range[2]
dcutoff_arange = np.arange(dcutoff_range[0],
dcutoff_range[1] + dcutoff_step / 2,
dcutoff_step)
else:
if isinstance(dcutoff_range, (float, int)):
dcutoff_range = [dcutoff_range]
dcutoff_arange = dcutoff_range
# These commands round all the values in the ranges defined as input
# scale
if not self.scale_range:
self.scale_range = [my_round(i) for i in scale_arange ]
else:
self.scale_range = sorted([my_round(i) for i in scale_arange
if not my_round(i) in self.scale_range] +
self.scale_range)
# scale
if not self.kbending_range:
self.kbending_range = [my_round(i) for i in kbending_arange]
else:
self.kbending_range = sorted([my_round(i) for i in kbending_arange
if not my_round(i) in self.kbending_range] +
self.kbending_range)
# maxdist
if not self.maxdist_range:
self.maxdist_range = [my_round(i) for i in maxdist_arange]
else:
self.maxdist_range = sorted([my_round(i) for i in maxdist_arange
if not my_round(i) in self.maxdist_range] +
self.maxdist_range)
# lowfreq
if not self.lowfreq_range:
self.lowfreq_range = [my_round(i) for i in lowfreq_arange]
else:
self.lowfreq_range = sorted([my_round(i) for i in lowfreq_arange
if not my_round(i) in self.lowfreq_range] +
self.lowfreq_range)
# upfreq
if not self.upfreq_range:
self.upfreq_range = [my_round(i) for i in upfreq_arange ]
else:
self.upfreq_range = sorted([my_round(i) for i in upfreq_arange
if not my_round(i) in self.upfreq_range] +
self.upfreq_range)
# dcutoff
if not self.dcutoff_range:
self.dcutoff_range = [my_round(i) for i in dcutoff_arange]
else:
self.dcutoff_range = sorted([my_round(i) for i in dcutoff_arange
if not my_round(i) in self.dcutoff_range] +
self.dcutoff_range)
# These commands perform the grid search of the best parameters
models = {}
count = 0
if verbose:
stderr.write(' %-4s%-5s\t%-8s\t%-7s\t%-7s\t%-6s\t%-7s\t%-11s\n' % (
"num","scale","kbending","maxdist","lowfreq","upfreq","dcutoff","correlation"))
parameters_sets = itertools.product([my_round(i) for i in scale_arange ],
[my_round(i) for i in kbending_arange],
[my_round(i) for i in maxdist_arange ],
[my_round(i) for i in lowfreq_arange ],
[my_round(i) for i in upfreq_arange ])
#for (scale, maxdist, upfreq, lowfreq, kbending) in zip([my_round(i) for i in scale_arange ],
for (scale, kbending, maxdist, lowfreq, upfreq) in parameters_sets:
#print (scale, kbending, maxdist, lowfreq, upfreq)
# This check whether this optimization has been already done for this set of parameters
if (scale, kbending, maxdist, lowfreq, upfreq) in [tuple(k[:5]) for k in self.results]:
k = [k for k in self.results
if (scale, kbending, maxdist, lowfreq, upfreq) == tuple(k[:5])
][0]
result = self.results[(scale, kbending, maxdist, lowfreq, upfreq, k[-1])]
if verbose:
verb = ' %-5s\t%-5s\t%-8s\t%-7s\t%-7s\t%-6s\t%-7s\t' % (
'xx', scale, kbending, maxdist, lowfreq, upfreq, k[-1])
if verbose == 2:
stderr.write(verb + str(round(result, 4)) + '\n')
else:
print verb + str(round(result, 4))
continue
config_tmp = {'kforce' : 5,
'scale' : float(scale),
'kbending' : float(kbending),
'lowrdist' : 100, # This parameters is fixed to XXX
'maxdist' : float(maxdist),
'lowfreq' : float(lowfreq),
'upfreq' : float(upfreq)}
try:
count += 1
tdm = generate_3d_models(
self.zscores, self.resolution,
self.nloci, n_models=self.n_models,
n_keep=self.n_keep, config=config_tmp,
n_cpus=n_cpus, first=0,
values=self.values, container=self.container,
coords = self.coords, close_bins=self.close_bins,
zeros=self.zeros, use_HiC=use_HiC,
use_confining_environment=use_confining_environment,
use_excluded_volume=use_excluded_volume,
single_particle_restraints=self.single_particle_restraints)
result = 0
cutoff = my_round(dcutoff_arange[0])
matrices = tdm.get_contact_matrix(
cutoff=[i * self.resolution * float(scale) for i in dcutoff_arange])
for m in matrices:
cut = m**0.5
result = tdm.correlate_with_real_data(cutoff=cut, corr=corr,
off_diag=off_diag,
contact_matrix=matrices[m])[0]
cutoff = my_round(float(cut) / self.resolution / float(scale))
if verbose:
verb = ' %-4s%-5s\t%-8s\t%-7s\t%-7s\t%-6s\t%-7s' % (
count, scale, kbending, maxdist, lowfreq, upfreq, cutoff)
if verbose == 2:
stderr.write(verb + str(round(result, 4)) + '\n')
else:
print verb + str(round(result, 4))
# Store the correlation for the TADbit parameters set
self.results[(scale, kbending, maxdist, lowfreq, upfreq, cutoff)] = result
except Exception, e:
print ' SKIPPING: %s' % e
result = 0
cutoff = my_round(dcutoff_arange[0])
if savedata and result:
models[(scale, kbending, maxdist, lowfreq, upfreq, cutoff)] = \
tdm._reduce_models(minimal=["restraints", "zscores", "original_data"])
if savedata:
out = open(savedata, 'w')
dump(models, out)
out.close()
self.kbending_range.sort( key=float)
self.scale_range.sort( key=float)
self.maxdist_range.sort(key=float)
self.lowfreq_range.sort(key=float)
self.upfreq_range.sort( key=float)
self.dcutoff_range.sort(key=float)
def load_grid_search_OLD(self, filenames, corr='spearman', off_diag=1,
verbose=True, n_cpus=1):
"""
Loads one file or a list of files containing pre-calculated Structural
Models (keep_models parameter used). And correlate each set of models
with real data. Useful to run different correlation on the same data
avoiding to re-calculate each time the models.
:param filenames: either a path to a file or a list of paths.
:param spearman corr: correlation coefficient to use
'param 1 off_diag:
:param True verbose: print the results to the standard output
"""
if isinstance(filenames, str):
filenames = [filenames]
models = {}
for filename in filenames:
inf = open(filename)
models.update(load(inf))
inf.close()
count = 0
pool = mu.Pool(n_cpus, maxtasksperchild=1)
jobs = {}
for scale, maxdist, upfreq, lowfreq, dcutoff in models:
svd = models[(scale, maxdist, upfreq, lowfreq, dcutoff)]
jobs[(scale, maxdist, upfreq, lowfreq, dcutoff)] = pool.apply_async(
_mu_correlate, args=(svd, corr, off_diag,
scale, maxdist, upfreq, lowfreq, dcutoff,
verbose, count))
count += 1
pool.close()
pool.join()
for scale, maxdist, upfreq, lowfreq, dcutoff in models:
self.results[(scale, maxdist, upfreq, lowfreq, dcutoff)] = \
jobs[(scale, maxdist, upfreq, lowfreq, dcutoff)].get()
if not scale in self.scale_range:
self.scale_range.append(scale)
if not maxdist in self.maxdist_range:
self.maxdist_range.append(maxdist)
if not lowfreq in self.lowfreq_range:
self.lowfreq_range.append(lowfreq)
if not upfreq in self.upfreq_range:
self.upfreq_range.append(upfreq)
if not dcutoff in self.dcutoff_range:
self.dcutoff_range.append(dcutoff)
self.scale_range.sort( key=float)
self.maxdist_range.sort(key=float)
self.lowfreq_range.sort(key=float)
self.upfreq_range.sort( key=float)
self.dcutoff_range.sort(key=float)
def load_grid_search(self, filenames, corr='spearman', off_diag=1,
verbose=True, n_cpus=1):
"""
Loads one file or a list of files containing pre-calculated Structural
Models (keep_models parameter used). And correlate each set of models
with real data. Useful to run different correlation on the same data
avoiding to re-calculate each time the models.
:param filenames: either a path to a file or a list of paths.
:param spearman corr: correlation coefficient to use
'param 1 off_diag:
:param True verbose: print the results to the standard output
"""
if isinstance(filenames, str):
filenames = [filenames]
models = {}
for filename in filenames:
inf = open(filename)
models.update(load(inf))
inf.close()
count = 0
pool = mu.Pool(n_cpus, maxtasksperchild=1)
jobs = {}
for scale, kbending, maxdist, lowfreq, upfreq, dcutoff in models:
svd = models[(scale, kbending, maxdist, lowfreq, upfreq, dcutoff)]
jobs[(scale, kbending, maxdist, lowfreq, upfreq, dcutoff)] = pool.apply_async(
_mu_correlate, args=(svd, corr, off_diag,
scale, kbending, maxdist, lowfreq, upfreq, dcutoff,
verbose, count))
count += 1
pool.close()
pool.join()
for scale, kbending, maxdist, lowfreq, upfreq, dcutoff in models:
self.results[(scale, kbending, maxdist, lowfreq, upfreq, dcutoff)] = \
jobs[(scale, kbending, maxdist, lowfreq, upfreq, dcutoff)].get()
if not scale in self.scale_range:
self.scale_range.append(scale)
if not kbending in self.kbending_range:
self.kbending_range.append(kbending)
if not maxdist in self.maxdist_range:
self.maxdist_range.append(maxdist)
if not lowfreq in self.lowfreq_range:
self.lowfreq_range.append(lowfreq)
if not upfreq in self.upfreq_range:
self.upfreq_range.append(upfreq)
if not dcutoff in self.dcutoff_range:
self.dcutoff_range.append(dcutoff)
self.scale_range.sort( key=float)
self.kbending_range.sort(key=float)
self.maxdist_range.sort(key=float)
self.lowfreq_range.sort(key=float)
self.upfreq_range.sort( key=float)
self.dcutoff_range.sort(key=float)
def get_best_parameters_dict(self, reference=None, with_corr=False):
"""
:param None reference: a description of the dataset optimized
:param False with_corr: if True, returns also the correlation value
:returns: a dict that can be used for modelling, see config parameter in
:func:`pytadbit.experiment.Experiment.model_region`
"""
if not self.results:
stderr.write('WARNING: no optimization done yet\n')
return
best = ((float('nan'), float('nan'), float('nan'), float('nan'), float('nan'), float('nan')), 0.0)
kbending = 0
try:
for (scale, maxdist, upfreq, lowfreq, kbending, cutoff), val in self.results.iteritems():
if val > best[-1]:
best = ((scale, maxdist, upfreq, lowfreq, kbending, cutoff), val)
except ValueError:
for (scale, maxdist, upfreq, lowfreq, cutoff), val in self.results.iteritems():
if val > best[-1]:
best = ((scale, maxdist, upfreq, lowfreq, kbending, cutoff), val)
if with_corr:
print best
return (dict((('scale' , float(best[0][0])),
('kbending' , float(best[0][1])),
('maxdist' , float(best[0][2])),
('lowfreq' , float(best[0][3])),
('upfreq' , float(best[0][4])),
('dcutoff' , float(best[0][5])),
('reference', reference or ''), ('kforce', 5))),
best[-1])
else:
return dict((('scale' , float(best[0][0])),
('kbending' , float(best[0][1])),
('maxdist' , float(best[0][2])),
('lowfreq' , float(best[0][3])),
('upfreq' , float(best[0][4])),
('dcutoff' , float(best[0][5])),
('reference', reference or ''), ('kforce', 5)))
def plot_2d(self, axes=('scale', 'kbending', 'maxdist', 'lowfreq', 'upfreq'),
show_best=0, skip=None, savefig=None,clim=None, cmap='inferno'):
"""
A grid of heatmaps representing the result of the optimization.
:param 'scale','kbending','maxdist','lowfreq','upfreq' axes: list of
axes to be represented in the plot. The order will define which
parameter will be placed on the x, y, z or w axe.
:param 0 show_best: number of best correlation values to highlight in
the plot
:param None skip: if passed (as a dictionary), fix a given axe,
e.g.: {'scale': 0.001, 'maxdist': 500}
:param None savefig: path to a file where to save the image generated;
if None, the image will be shown using matplotlib GUI (the extension
of the file name will determine the desired format).
:param None clim: color scale. If None, the max and min values of the input are used.
:param inferno cmap: matplotlib colormap
"""
results = self._result_to_array()
plot_2d_optimization_result((('scale', 'kbending', 'maxdist', 'lowfreq', 'upfreq'),
([float(i) for i in self.scale_range],
[float(i) for i in self.kbending_range],
[float(i) for i in self.maxdist_range],
[float(i) for i in self.lowfreq_range],
[float(i) for i in self.upfreq_range]),
results), dcutoff=self.dcutoff_range, axes=axes, show_best=show_best,
skip=skip, savefig=savefig,clim=clim, cmap=cmap)
def _result_to_array(self):
# This auxiliary method organizes the results of the grid optimization in a
# Numerical array to be passed to the plot_2d and plot_3d functions above
results = np.empty((len(self.scale_range), len(self.kbending_range), len(self.maxdist_range),
len(self.lowfreq_range), len(self.upfreq_range)))
"""
for i in xrange(len(self.scale_range)):
for j in xrange(len(self.kbending_range)):
for k in xrange(len(self.maxdist_range)):
for l in xrange(len(self.lowfreq_range)):
for m in xrange(len(self.upfreq_range)):
print "Correlation",self.scale_range[i],self.kbending_range[j],\
self.maxdist_range[k],self.lowfreq_range[l],self.upfreq_range[m],\
results[i][j][k][l][m]
"""
for v, scale in enumerate(self.scale_range):
for w, kbending in enumerate(self.kbending_range):
for x, maxdist in enumerate(self.maxdist_range):
for y, lowfreq in enumerate(self.lowfreq_range):
for z, upfreq in enumerate(self.upfreq_range):
# Case in which there is more than 1 distance cutoff (dcutoff)
try:
cut = [str(int(c)) for c in self.dcutoff_range
if (scale, kbending, maxdist, lowfreq, upfreq, str(int(c)))
in self.results][0]
except IndexError:
results[v, w, x, y, z] = float('nan')
continue
#
try:
results[v, w, x, y, z] = self.results[
(scale, kbending, maxdist, lowfreq, upfreq, cut)]
except KeyError:
results[v, w, x, y, z] = float('nan')
"""
for i in xrange(len(self.scale_range)):
for j in xrange(len(self.kbending_range)):
for k in xrange(len(self.maxdist_range)):
for l in xrange(len(self.lowfreq_range)):
for m in xrange(len(self.upfreq_range)):
print "Correlation",self.scale_range[i],self.kbending_range[j],\
self.maxdist_range[k],self.lowfreq_range[l],self.upfreq_range[m],\
results[i][j][k][l][m]
exit(1)
"""
return results
def write_result(self, f_name):
"""
This function writes a log file of all the values tested for each
parameter, and the resulting correlation value.
This file can be used to load or merge data a posteriori using
the function pytadbit.modelling.impoptimizer.IMPoptimizer.load_from_file
:param f_name: file name with the absolute path
"""
out = open(f_name, 'w')
out.write(('## n_models: %s n_keep: %s ' +
'close_bins: %s\n') % (self.n_models,
self.n_keep, self.close_bins))
out.write('# scale\tkbending\tmax_dist\tlow_freq\tup_freq\tdcutoff\tcorrelation\n')
parameters_sets = itertools.product(*[[my_round(i) for i in self.scale_range ],
[my_round(i) for i in self.kbending_range],
[my_round(i) for i in self.maxdist_range ],
[my_round(i) for i in self.lowfreq_range ],
[my_round(i) for i in self.upfreq_range ]])
for (scale, kbending, maxdist, lowfreq, upfreq) in parameters_sets:
try:
cut = sorted(
[c for c in self.dcutoff_range
if (scale, kbending, maxdist, lowfreq, upfreq, c)
in self.results],
key=lambda x: self.results[
(scale, kbending, maxdist, lowfreq, upfreq, x)])[0]
except IndexError:
print 'Missing dcutoff', (scale, kbending, maxdist, lowfreq, upfreq)
continue
try:
result = self.results[(scale, kbending, maxdist, lowfreq, upfreq, cut)]
out.write(' %-5s\t%-8s\t%-8s\t%-8s\t%-7s\t%-7s\t%-11s\n' % (
scale, kbending, maxdist, lowfreq, upfreq, cut, result))
except KeyError:
print 'KeyError', (scale, kbending, maxdist, lowfreq, upfreq, cut, result)
continue
out.close()
def load_from_file_OLD(self, f_name):
"""
Loads the optimized parameters from a file generated with the function:
pytadbit.modelling.impoptimizer.IMPoptimizer.write_result.
This function does not overwrite the parameters that were already
loaded or calculated.
:param f_name: file name with the absolute path
"""
for line in open(f_name):
# Check same parameters
if line.startswith('##'):
n_models, _, n_keep, _, close_bins = line.split()[2:]
if ([int(n_models), int(n_keep), int(close_bins)]
!=
[self.n_models, self.n_keep, self.close_bins]):
raise Exception('Parameters does in %s not match: %s\n%s' %(
f_name,
[int(n_models), int(n_keep), int(close_bins)],
[self.n_models, self.n_keep, self.close_bins]))
if line.startswith('#'):
continue
# OLD format before May 2017 without kbending parameter
scale, maxdist, upfreq, lowfreq, dcutoff, result = line.split()
# Setting the kbending to 0.0 for to be compatible with the new version
kbending = 0.0
scale, kbending, maxdist, lowfreq, upfreq, dcutoff = (
float(scale), float(kbending), int(maxdist), float(lowfreq), float(upfreq),
float(dcutoff))
scale = my_round(scale, val=5)
kbending = my_round(kbending)
maxdist = my_round(maxdist)
lowfreq = my_round(lowfreq)
upfreq = my_round(upfreq)
dcutoff = my_round(dcutoff)
self.results[(scale, kbending, maxdist, lowfreq, upfreq, dcutoff)] = float(result)
if not scale in self.scale_range:
self.scale_range.append(scale)
if not kbending in self.kbending_range:
self.kbending_range.append(kbending)
if not maxdist in self.maxdist_range:
self.maxdist_range.append(maxdist)
if not lowfreq in self.lowfreq_range:
self.lowfreq_range.append(lowfreq)
if not upfreq in self.upfreq_range:
self.upfreq_range.append(upfreq)
if not dcutoff in self.dcutoff_range:
self.dcutoff_range.append(dcutoff)
self.scale_range.sort( key=float)
self.kbending_range.sort(key=float)
self.maxdist_range.sort( key=float)
self.lowfreq_range.sort( key=float)
self.upfreq_range.sort( key=float)
self.dcutoff_range.sort( key=float)
def load_from_file(self, f_name):
"""
Loads the optimized parameters from a file generated with the function:
pytadbit.modelling.impoptimizer.IMPoptimizer.write_result.
This function does not overwrite the parameters that were already
loaded or calculated.
:param f_name: file name with the absolute path
"""
for line in open(f_name):
# Check same parameters
if line.startswith('##'):
n_models, _, n_keep, _, close_bins = line.split()[2:]
if ([int(n_models), int(n_keep), int(close_bins)]
!=
[self.n_models, self.n_keep, self.close_bins]):
raise Exception('Parameters does in %s not match: %s\n%s' %(
f_name,
[int(n_models), int(n_keep), int(close_bins)],
[self.n_models, self.n_keep, self.close_bins]))
if line.startswith('#'):
continue
scale, kbending, maxdist, lowfreq, upfreq, dcutoff, result = line.split()
scale, kbending, maxdist, lowfreq, upfreq, dcutoff = (
float(scale), float(kbending), float(maxdist), float(lowfreq), float(upfreq),
float(dcutoff))
scale = my_round(scale, val=5)
kbending = my_round(kbending)
maxdist = my_round(maxdist)
lowfreq = my_round(lowfreq)
upfreq = my_round(upfreq)
dcutoff = my_round(dcutoff)
self.results[(scale, kbending, maxdist, lowfreq, upfreq, dcutoff)] = float(result)
if not scale in self.scale_range:
self.scale_range.append(scale)
if not kbending in self.kbending_range:
self.kbending_range.append(kbending)
if not maxdist in self.maxdist_range:
self.maxdist_range.append(maxdist)
if not lowfreq in self.lowfreq_range:
self.lowfreq_range.append(lowfreq)
if not upfreq in self.upfreq_range:
self.upfreq_range.append(upfreq)
if not dcutoff in self.dcutoff_range:
self.dcutoff_range.append(dcutoff)
self.scale_range.sort( key=float)
self.kbending_range.sort(key=float)
self.maxdist_range.sort( key=float)
self.lowfreq_range.sort( key=float)
self.upfreq_range.sort( key=float)
self.dcutoff_range.sort( key=float)
def my_round(num, val=4):
num = round(float(num), val)
return str(int(num) if num == int(num) else num)
def _mu_correlate(svd, corr, off_diag, scale, kbending, maxdist, lowfreq, upfreq,
dcutoff, verbose, count):
tdm = StructuralModels(
nloci=svd['nloci'], models=svd['models'],
bad_models=svd['bad_models'],
resolution=svd['resolution'],
original_data=svd['original_data'],
clusters=svd['clusters'], config=svd['config'],
zscores=svd['zscore'])
try:
result = tdm.correlate_with_real_data(
cutoff=dcutoff, corr=corr,
off_diag=off_diag)[0]
if verbose:
verb = ' %-5s\t%-8s\t%-7s\t%-8s\t%-8s\t%-7s\n' % (
scale, kbending, maxdist, lowfreq, upfreq, dcutoff)
if verbose == 2:
stderr.write(verb + str(result) + '\n')
else:
print verb + str(result)
except Exception, e:
print 'ERROR %s' % e
return result
|
gpl-3.0
|
marcocaccin/scikit-learn
|
examples/cluster/plot_dict_face_patches.py
|
337
|
2747
|
"""
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
|
deepesch/scikit-learn
|
setup.py
|
143
|
7364
|
#! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def is_scipy_installed():
try:
import scipy
except ImportError:
return False
return True
def is_numpy_installed():
try:
import numpy
except ImportError:
return False
return True
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
if is_numpy_installed() is False:
raise ImportError("Numerical Python (NumPy) is not installed.\n"
"scikit-learn requires NumPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if is_scipy_installed() is False:
raise ImportError("Scientific Python (SciPy) is not installed.\n"
"scikit-learn requires SciPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
|
bsd-3-clause
|
seanpquig/study-group
|
neural-networks-and-deep-learning/src/old/gradient_descent_hack.py
|
4
|
3524
|
"""
gradient_descent_hack
~~~~~~~~~~~~~~~~~~~~~
This program uses gradient descent to learn weights and biases for a
three-neuron network to compute the XOR function. The program is a
quick-and-dirty hack meant to illustrate the basic ideas of gradient
descent, not a cleanly-designed and generalizable implementation."""
#### Libraries
# Third-party libraries
import matplotlib.pyplot as plt
import numpy as np
def sigmoid(z):
return 1.0/(1.0+np.exp(-z))
def neuron(w, x):
""" Return the output from the sigmoid neuron with weights ``w``
and inputs ``x``. Both are numpy arrays, with three and two
elements, respectively. The first input weight is the bias."""
return sigmoid(w[0]+np.inner(w[1:], x))
def h(w, x):
""" Return the output from the three-neuron network with weights
``w`` and inputs ``x``. Note that ``w`` is a numpy array with
nine elements, consisting of three weights for each neuron (the
bias plus two input weights). ``x`` is a numpy array with just
two elements."""
neuron1_out = neuron(w[0:3], x) # top left neuron
neuron2_out = neuron(w[3:6], x) # bottom left neuron
return neuron(w[6:9], np.array([neuron1_out, neuron2_out]))
# inputs and corresponding outputs for the function we're computing (XOR)
INPUTS = [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]
OUTPUTS = [0.0, 1.0, 1.0, 0.0]
def cost(w):
""" Return the cost when the neural network has weights ``w``.
The cost is computed with respect to the XOR function."""
return 0.5 * sum((y-h(w, np.array(x)))**2 for x, y in zip(INPUTS, OUTPUTS))
def partial(f, k, w):
""" Return the partial derivative of the function ``f`` with
respect to the ``k``th variable, at location ``w``. Note that
``f`` must take a numpy array as input, and the partial derivative
is evaluated with respect to the ``k``th element in that array.
Similarly, ``w`` is a numpy array which can be used as input to
``f``."""
w_plus, w_minus = w.copy(), w.copy()
w_plus[k] += 0.01 # using epsilon = 0.01
w_minus[k] += -0.01
return (f(w_plus)-f(w_minus))/0.02
def gradient_descent(cost, eta, n):
""" Perform ``n`` iterations of the gradient descent algorithm to
minimize the ``cost`` function, with a learning rate ``eta``.
Return a tuple whose first entry is an array containing the final
weights, and whose second entry is a list of the values the
``cost`` function took at different iterations."""
w = np.random.uniform(-1, 1, 9) # initialize weights randomly
costs = []
for j in xrange(n):
c = cost(w)
print "Current cost: {0:.3f}".format(c)
costs.append(c)
gradient = [partial(cost, k, w) for k in xrange(9)]
w = np.array([wt-eta*d for wt, d in zip(w, gradient)])
return w, costs
def main():
""" Perform gradient descent to find weights for a sigmoid neural
network to compute XOR. 10,000 iterations are used. Outputs the
final value of the cost function, the final weights, and plots a
graph of cost as a function of iteration."""
w, costs = gradient_descent(cost, 0.1, 10000)
print "\nFinal cost: {0:.3f}".format(cost(w))
print "\nFinal weights: %s" % w
plt.plot(np.array(costs))
plt.xlabel('iteration')
plt.ylabel('cost')
plt.title('How cost decreases with the number of iterations')
plt.show()
if __name__ == "__main__":
main()
|
mit
|
zhisong/EGAMERS
|
py/fqc.py
|
1
|
1796
|
#!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
f = open("fqc.out")
line1 = f.readline()
ns = list(map(int, line1.split()))
neigen = ns[0]
nr = ns[1]
nregam = ns[2]
r = np.zeros((nr), dtype=float)
omglocal = np.zeros((nr), dtype=float)
omgegamlocal = np.zeros((nregam), dtype=float)
regamlocal = np.zeros((nregam), dtype=float)
gammaegamlocal = np.zeros((nregam), dtype=float)
omgglobal = np.zeros((neigen), dtype=float)
gammaglobal = np.zeros((neigen), dtype=float)
plt.figure(1)
for i1 in range(neigen):
line = f.readline()
number_r = list(map(float, line.split()))
omgglobal[i1] = (number_r[0])
gammaglobal[i1] = number_r[1]
for i2 in range(nr):
line = f.readline()
number_r = list(map(float, line.split()))
r[i2] = (number_r[0])
omglocal[i2] = (number_r[1])
for i3 in range(nregam):
line = f.readline()
number_r = list(map(float, line.split()))
regamlocal[i3] = number_r[0]
omgegamlocal[i3] = number_r[1]
gammaegamlocal[i3] = number_r[2]
ll,=plt.plot(r, omglocal)
llegam,=plt.plot(regamlocal, omgegamlocal)
for i1 in range(neigen):
gg, =plt.plot([0, 1], [omgglobal[i1], omgglobal[i1]])
# leg = plt.legend([gg], ['Global Mode ' + str(i1+1)])
# ax = plt.gca().add_artist(leg)
plt.legend([ll,llegam, gg], ['GAM continuum','EGAM continuum', 'Global EGAM'])
plt.ylabel(r'$Re(\Omega)$')
plt.xlabel('r')
plt.title(r'$\gamma/\omega$ = ' + str(gammaglobal[0]/omgglobal[0]))
plt.figure(2)
llegam,=plt.plot(regamlocal, gammaegamlocal)
for i1 in range(neigen):
gg, =plt.plot([0, 1], [gammaglobal[i1], gammaglobal[i1]])
plt.legend([llegam, gg], ['EGAM continuum', 'Global EGAM'])
plt.ylabel(r'$\gamma=Im(\Omega)$')
plt.xlabel('r')
plt.title(r'$\gamma/\omega$ = ' + str(gammaglobal[0]/omgglobal[0]))
plt.show()
|
lgpl-3.0
|
dvro/UnbalancedDataset
|
imblearn/combine/smote_enn.py
|
2
|
6049
|
"""Class to perform over-sampling using SMOTE and cleaning using ENN."""
from __future__ import print_function
from __future__ import division
from ..over_sampling import SMOTE
from ..under_sampling import EditedNearestNeighbours
from ..base import BaseBinarySampler
class SMOTEENN(BaseBinarySampler):
"""Class to perform over-sampling using SMOTE and cleaning using ENN.
Combine over- and under-sampling using SMOTE and Edited Nearest Neighbours.
Parameters
----------
ratio : str or float, optional (default='auto')
If 'auto', the ratio will be defined automatically to balance
the dataset. Otherwise, the ratio is defined as the
number of samples in the minority class over the the number of
samples in the majority class.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
k : int, optional (default=5)
Number of nearest neighbours to used to construct synthetic
samples.
m : int, optional (default=10)
Number of nearest neighbours to use to determine if a minority
sample is in danger.
out_step : float, optional (default=0.5)
Step size when extrapolating.
kind_smote : str, optional (default='regular')
The type of SMOTE algorithm to use one of the following
options: 'regular', 'borderline1', 'borderline2', 'svm'.
size_ngh : int, optional (default=3)
Size of the neighbourhood to consider to compute the average
distance to the minority point samples.
kind_sel : str, optional (default='all')
Strategy to use in order to exclude samples.
- If 'all', all neighbours will have to agree with the samples of
interest to not be excluded.
- If 'mode', the majority vote of the neighbours will be used in
order to exclude a sample.
n_jobs : int, optional (default=-1)
The number of threads to open if possible.
Attributes
----------
min_c_ : str or int
The identifier of the minority class.
max_c_ : str or int
The identifier of the majority class.
stats_c_ : dict of str/int : int
A dictionary in which the number of occurences of each class is
reported.
X_shape_ : tuple of int
Shape of the data `X` during fitting.
Notes
-----
The method is presented in [1]_.
This class does not support mutli-class.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.combine import SMOTEENN
>>> X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
... n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1,
... n_samples=1000, random_state=10)
>>> print('Original dataset shape {}'.format(Counter(y)))
Original dataset shape Counter({1: 900, 0: 100})
>>> sme = SMOTEENN(random_state=42)
>>> X_res, y_res = sme.fit_sample(X, y)
>>> print('Resampled dataset shape {}'.format(Counter(y_res)))
Resampled dataset shape Counter({0: 900, 1: 865})
References
----------
.. [1] G. Batista, R. C. Prati, M. C. Monard. "A study of the behavior of
several methods for balancing machine learning training data," ACM
Sigkdd Explorations Newsletter 6 (1), 20-29, 2004.
"""
def __init__(self, ratio='auto', random_state=None,
k=5, m=10, out_step=0.5, kind_smote='regular',
size_ngh=3, kind_enn='all', n_jobs=-1, **kwargs):
super(SMOTEENN, self).__init__(ratio=ratio)
self.random_state = random_state
self.k = k
self.m = m
self.out_step = out_step
self.kind_smote = kind_smote
self.size_ngh = size_ngh
self.kind_enn = kind_enn
self.n_jobs = n_jobs
self.kwargs = kwargs
self.sm = SMOTE(ratio=self.ratio, random_state=self.random_state,
k=self.k, m=self.m, out_step=self.out_step,
kind=self.kind_smote, n_jobs=self.n_jobs,
**self.kwargs)
self.enn = EditedNearestNeighbours(random_state=self.random_state,
size_ngh=self.size_ngh,
kind_sel=self.kind_enn,
n_jobs=self.n_jobs)
def fit(self, X, y):
"""Find the classes statistics before to perform sampling.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
self : object,
Return self.
"""
super(SMOTEENN, self).fit(X, y)
# Fit using SMOTE
self.sm.fit(X, y)
return self
def _sample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
X_resampled : ndarray, shape (n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_samples_new)
The corresponding label of `X_resampled`
"""
# Transform using SMOTE
X, y = self.sm.sample(X, y)
# Fit and transform using ENN
return self.enn.fit_sample(X, y)
|
mit
|
tim777z/seaborn
|
seaborn/rcmod.py
|
19
|
15751
|
"""Functions that alter the matplotlib rc dictionary on the fly."""
import numpy as np
import matplotlib as mpl
from . import palettes
_style_keys = (
"axes.facecolor",
"axes.edgecolor",
"axes.grid",
"axes.axisbelow",
"axes.linewidth",
"axes.labelcolor",
"figure.facecolor",
"grid.color",
"grid.linestyle",
"text.color",
"xtick.color",
"ytick.color",
"xtick.direction",
"ytick.direction",
"xtick.major.size",
"ytick.major.size",
"xtick.minor.size",
"ytick.minor.size",
"legend.frameon",
"legend.numpoints",
"legend.scatterpoints",
"lines.solid_capstyle",
"image.cmap",
"font.family",
"font.sans-serif",
)
_context_keys = (
"figure.figsize",
"font.size",
"axes.labelsize",
"axes.titlesize",
"xtick.labelsize",
"ytick.labelsize",
"legend.fontsize",
"grid.linewidth",
"lines.linewidth",
"patch.linewidth",
"lines.markersize",
"lines.markeredgewidth",
"xtick.major.width",
"ytick.major.width",
"xtick.minor.width",
"ytick.minor.width",
"xtick.major.pad",
"ytick.major.pad"
)
def set(context="notebook", style="darkgrid", palette="deep",
font="sans-serif", font_scale=1, color_codes=False, rc=None):
"""Set aesthetic parameters in one step.
Each set of parameters can be set directly or temporarily, see the
referenced functions below for more information.
Parameters
----------
context : string or dict
Plotting context parameters, see :func:`plotting_context`
style : string or dict
Axes style parameters, see :func:`axes_style`
palette : string or sequence
Color palette, see :func:`color_palette`
font : string
Font family, see matplotlib font manager.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
color_codes : bool
If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
rc : dict or None
Dictionary of rc parameter mappings to override the above.
"""
set_context(context, font_scale)
set_style(style, rc={"font.family": font})
set_palette(palette, color_codes=color_codes)
if rc is not None:
mpl.rcParams.update(rc)
def reset_defaults():
"""Restore all RC params to default settings."""
mpl.rcParams.update(mpl.rcParamsDefault)
def reset_orig():
"""Restore all RC params to original settings (respects custom rc)."""
mpl.rcParams.update(mpl.rcParamsOrig)
class _AxesStyle(dict):
"""Light wrapper on a dict to set style temporarily."""
def __enter__(self):
"""Open the context."""
rc = mpl.rcParams
self._orig_style = {k: rc[k] for k in _style_keys}
set_style(self)
return self
def __exit__(self, *args):
"""Close the context."""
set_style(self._orig_style)
class _PlottingContext(dict):
"""Light wrapper on a dict to set context temporarily."""
def __enter__(self):
"""Open the context."""
rc = mpl.rcParams
self._orig_context = {k: rc[k] for k in _context_keys}
set_context(self)
return self
def __exit__(self, *args):
"""Close the context."""
set_context(self._orig_context)
def axes_style(style=None, rc=None):
"""Return a parameter dict for the aesthetic style of the plots.
This affects things like the color of the axes, whether a grid is
enabled by default, and other aesthetic elements.
This function returns an object that can be used in a ``with`` statement
to temporarily change the style parameters.
Parameters
----------
style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
A dictionary of parameters or the name of a preconfigured set.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
style dictionaries. This only updates parameters that are
considered part of the style definition.
Examples
--------
>>> st = axes_style("whitegrid")
>>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
>>> import matplotlib.pyplot as plt
>>> with axes_style("white"):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_style : set the matplotlib parameters for a seaborn theme
plotting_context : return a parameter dict to to scale plot elements
color_palette : define the color palette for a plot
"""
if style is None:
style_dict = {k: mpl.rcParams[k] for k in _style_keys}
elif isinstance(style, dict):
style_dict = style
else:
styles = ["white", "dark", "whitegrid", "darkgrid", "ticks"]
if style not in styles:
raise ValueError("style must be one of %s" % ", ".join(styles))
# Define colors here
dark_gray = ".15"
light_gray = ".8"
# Common parameters
style_dict = {
"figure.facecolor": "white",
"text.color": dark_gray,
"axes.labelcolor": dark_gray,
"legend.frameon": False,
"legend.numpoints": 1,
"legend.scatterpoints": 1,
"xtick.direction": "out",
"ytick.direction": "out",
"xtick.color": dark_gray,
"ytick.color": dark_gray,
"axes.axisbelow": True,
"image.cmap": "Greys",
"font.family": ["sans-serif"],
"font.sans-serif": ["Arial", "Liberation Sans",
"Bitstream Vera Sans", "sans-serif"],
"grid.linestyle": "-",
"lines.solid_capstyle": "round",
}
# Set grid on or off
if "grid" in style:
style_dict.update({
"axes.grid": True,
})
else:
style_dict.update({
"axes.grid": False,
})
# Set the color of the background, spines, and grids
if style.startswith("dark"):
style_dict.update({
"axes.facecolor": "#EAEAF2",
"axes.edgecolor": "white",
"axes.linewidth": 0,
"grid.color": "white",
})
elif style == "whitegrid":
style_dict.update({
"axes.facecolor": "white",
"axes.edgecolor": light_gray,
"axes.linewidth": 1,
"grid.color": light_gray,
})
elif style in ["white", "ticks"]:
style_dict.update({
"axes.facecolor": "white",
"axes.edgecolor": dark_gray,
"axes.linewidth": 1.25,
"grid.color": light_gray,
})
# Show or hide the axes ticks
if style == "ticks":
style_dict.update({
"xtick.major.size": 6,
"ytick.major.size": 6,
"xtick.minor.size": 3,
"ytick.minor.size": 3,
})
else:
style_dict.update({
"xtick.major.size": 0,
"ytick.major.size": 0,
"xtick.minor.size": 0,
"ytick.minor.size": 0,
})
# Override these settings with the provided rc dictionary
if rc is not None:
rc = {k: v for k, v in rc.items() if k in _style_keys}
style_dict.update(rc)
# Wrap in an _AxesStyle object so this can be used in a with statement
style_object = _AxesStyle(style_dict)
return style_object
def set_style(style=None, rc=None):
"""Set the aesthetic style of the plots.
This affects things like the color of the axes, whether a grid is
enabled by default, and other aesthetic elements.
Parameters
----------
style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
A dictionary of parameters or the name of a preconfigured set.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
style dictionaries. This only updates parameters that are
considered part of the style definition.
Examples
--------
>>> set_style("whitegrid")
>>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
See Also
--------
axes_style : return a dict of parameters or use in a ``with`` statement
to temporarily set the style.
set_context : set parameters to scale plot elements
set_palette : set the default color palette for figures
"""
style_object = axes_style(style, rc)
mpl.rcParams.update(style_object)
def plotting_context(context=None, font_scale=1, rc=None):
"""Return a parameter dict to scale elements of the figure.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
This function returns an object that can be used in a ``with`` statement
to temporarily change the context parameters.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> c = plotting_context("poster")
>>> c = plotting_context("notebook", font_scale=1.5)
>>> c = plotting_context("talk", rc={"lines.linewidth": 2})
>>> import matplotlib.pyplot as plt
>>> with plotting_context("paper"):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_context : set the matplotlib parameters to scale plot elements
axes_style : return a dict of parameters defining a figure style
color_palette : define the color palette for a plot
"""
if context is None:
context_dict = {k: mpl.rcParams[k] for k in _context_keys}
elif isinstance(context, dict):
context_dict = context
else:
contexts = ["paper", "notebook", "talk", "poster"]
if context not in contexts:
raise ValueError("context must be in %s" % ", ".join(contexts))
# Set up dictionary of default parameters
base_context = {
"figure.figsize": np.array([8, 5.5]),
"font.size": 12,
"axes.labelsize": 11,
"axes.titlesize": 12,
"xtick.labelsize": 10,
"ytick.labelsize": 10,
"legend.fontsize": 10,
"grid.linewidth": 1,
"lines.linewidth": 1.75,
"patch.linewidth": .3,
"lines.markersize": 7,
"lines.markeredgewidth": 0,
"xtick.major.width": 1,
"ytick.major.width": 1,
"xtick.minor.width": .5,
"ytick.minor.width": .5,
"xtick.major.pad": 7,
"ytick.major.pad": 7,
}
# Scale all the parameters by the same factor depending on the context
scaling = dict(paper=.8, notebook=1, talk=1.3, poster=1.6)[context]
context_dict = {k: v * scaling for k, v in base_context.items()}
# Now independently scale the fonts
font_keys = ["axes.labelsize", "axes.titlesize", "legend.fontsize",
"xtick.labelsize", "ytick.labelsize", "font.size"]
font_dict = {k: context_dict[k] * font_scale for k in font_keys}
context_dict.update(font_dict)
# Implement hack workaround for matplotlib bug
# See https://github.com/mwaskom/seaborn/issues/344
# There is a bug in matplotlib 1.4.2 that makes points invisible when
# they don't have an edgewidth. It will supposedly be fixed in 1.4.3.
if mpl.__version__ == "1.4.2":
context_dict["lines.markeredgewidth"] = 0.01
# Override these settings with the provided rc dictionary
if rc is not None:
rc = {k: v for k, v in rc.items() if k in _context_keys}
context_dict.update(rc)
# Wrap in a _PlottingContext object so this can be used in a with statement
context_object = _PlottingContext(context_dict)
return context_object
def set_context(context=None, font_scale=1, rc=None):
"""Set the plotting context parameters.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> set_context("paper")
>>> set_context("talk", font_scale=1.4)
>>> set_context("talk", rc={"lines.linewidth": 2})
See Also
--------
plotting_context : return a dictionary of rc parameters, or use in
a ``with`` statement to temporarily set the context.
set_style : set the default parameters for figure style
set_palette : set the default color palette for figures
"""
context_object = plotting_context(context, font_scale, rc)
mpl.rcParams.update(context_object)
def set_palette(palette, n_colors=None, desat=None, color_codes=False):
"""Set the matplotlib color cycle using a seaborn palette.
Parameters
----------
palette : hls | husl | matplotlib colormap | seaborn color palette
Palette definition. Should be something that :func:`color_palette`
can process.
n_colors : int
Number of colors in the cycle. The default number of colors will depend
on the format of ``palette``, see the :func:`color_palette`
documentation for more information.
desat : float
Proportion to desaturate each color by.
color_codes : bool
If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
Examples
--------
>>> set_palette("Reds")
>>> set_palette("Set1", 8, .75)
See Also
--------
color_palette : build a color palette or set the color cycle temporarily
in a ``with`` statement.
set_context : set parameters to scale plot elements
set_style : set the default parameters for figure style
"""
colors = palettes.color_palette(palette, n_colors, desat)
mpl.rcParams["axes.color_cycle"] = list(colors)
mpl.rcParams["patch.facecolor"] = colors[0]
if color_codes:
palettes.set_color_codes(palette)
|
bsd-3-clause
|
ernestyalumni/udacity-data-science
|
IntrotoML/Lesson01NaiveBayes/studentMain.py
|
1
|
1399
|
#!/usr/bin/python
""" Complete the code in ClassifyNB.py with the sklearn
Naive Bayes classifier to classify the terrain data.
The objective of this exercise is to recreate the decision
boundary found in the lesson video, and make a plot that
visually shows the decision boundary """
from prep_terrain_data import makeTerrainData
from class_vis import prettyPicture, output_image
from ClassifyNB import classify
import numpy as np
import pylab as pl
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the training data (features_train, labels_train) have both "fast" and "slow" points mixed
### in together--separate them so we can give them different colors in the scatterplot,
### and visually identify them
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==1]
clf = classify(features_train, labels_train)
### draw the decision boundary with the text points overlaid
prettyPicture(clf, features_test, labels_test)
output_image("test.png", "png", open("test.png", "rb").read())
|
gpl-2.0
|
ndingwall/scikit-learn
|
sklearn/preprocessing/_label.py
|
8
|
29853
|
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.sparsefuncs import min_max_axis
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.validation import _deprecate_positional_args
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils._encode import _encode, _unique
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
class LabelEncoder(TransformerMixin, BaseEstimator):
"""Encode target labels with value between 0 and n_classes-1.
This transformer should be used to encode target values, *i.e.* `y`, and
not the input `X`.
Read more in the :ref:`User Guide <preprocessing_targets>`.
.. versionadded:: 0.12
Attributes
----------
classes_ : ndarray of shape (n_classes,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6])
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"])
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
See Also
--------
OrdinalEncoder : Encode categorical features using an ordinal encoding
scheme.
OneHotEncoder : Encode categorical features as a one-hot numeric array.
"""
def fit(self, y):
"""Fit label encoder.
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
self.classes_ = _unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels.
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
y : array-like of shape (n_samples,)
"""
y = column_or_1d(y, warn=True)
self.classes_, y = _unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
y : array-like of shape (n_samples,)
"""
check_is_fitted(self)
y = column_or_1d(y, warn=True)
# transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
return _encode(y, uniques=self.classes_)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : ndarray of shape (n_samples,)
Target values.
Returns
-------
y : ndarray of shape (n_samples,)
"""
check_is_fitted(self)
y = column_or_1d(y, warn=True)
# inverse transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if len(diff):
raise ValueError(
"y contains previously unseen labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
def _more_tags(self):
return {'X_types': ['1dlabels']}
class LabelBinarizer(TransformerMixin, BaseEstimator):
"""Binarize labels in a one-vs-all fashion.
Several regression and binary classification algorithms are
available in scikit-learn. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int, default=0
Value with which negative labels must be encoded.
pos_label : int, default=1
Value with which positive labels must be encoded.
sparse_output : bool, default=False
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
Holds the label for each class.
y_type_ : str
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'multiclass-multioutput', 'multilabel-indicator', and 'unknown'.
sparse_input_ : bool
True if the input data to transform is given as a sparse matrix, False
otherwise.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer()
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer()
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See Also
--------
label_binarize : Function to perform the transform operation of
LabelBinarizer with fixed classes.
OneHotEncoder : Encode categorical features using a one-hot aka one-of-K
scheme.
"""
@_deprecate_positional_args
def __init__(self, *, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer.
Parameters
----------
y : ndarray of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def fit_transform(self, y):
"""Fit label binarizer and transform multi-class labels to binary
labels.
The output of transform is sometimes referred to as
the 1-of-K coding scheme.
Parameters
----------
y : {ndarray, sparse matrix} of shape (n_samples,) or \
(n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
Shape will be (n_samples, 1) for binary problems. Sparse matrix
will be of CSR format.
"""
return self.fit(y).transform(y)
def transform(self, y):
"""Transform multi-class labels to binary labels.
The output of transform is sometimes referred to by some authors as
the 1-of-K coding scheme.
Parameters
----------
y : {array, sparse matrix} of shape (n_samples,) or \
(n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
Shape will be (n_samples, 1) for binary problems. Sparse matrix
will be of CSR format.
"""
check_is_fitted(self)
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, classes=self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels.
Parameters
----------
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float, default=None
Threshold used in the binary and multi-label cases.
Use 0 when ``Y`` contains the output of decision_function
(classifier).
Use 0.5 when ``Y`` contains the output of predict_proba.
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : {ndarray, sparse matrix} of shape (n_samples,)
Target values. Sparse matrix will be of CSR format.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self)
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def _more_tags(self):
return {'X_types': ['1dlabels']}
@_deprecate_positional_args
def label_binarize(y, *, classes, neg_label=0, pos_label=1,
sparse_output=False):
"""Binarize labels in a one-vs-all fashion.
Several regression and binary classification algorithms are
available in scikit-learn. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape (n_classes,)
Uniquely holds the label for each class.
neg_label : int, default=0
Value with which negative labels must be encoded.
pos_label : int, default=1
Value with which positive labels must be encoded.
sparse_output : bool, default=False,
Set to true if output binary array is desired in CSR sparse format.
Returns
-------
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
Shape will be (n_samples, 1) for binary problems. Sparse matrix will
be of CSR format.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See Also
--------
LabelBinarizer : Class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation.
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if n_classes == 1:
if sparse_output:
return sp.csr_matrix((n_samples, 1), dtype=int)
else:
Y = np.zeros((len(y), 1), dtype=int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if y_type == "multilabel-indicator":
y_n_classes = y.shape[1] if hasattr(y, 'shape') else len(y[0])
if classes.size != y_n_classes:
raise ValueError("classes {0} mismatch with the labels {1}"
" found in the data"
.format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = np.in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = Y.astype(int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = Y.data.astype(int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = min_max_axis(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=int)
else:
y = np.array(y > threshold, dtype=int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
return np.repeat(classes[0], len(y))
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(TransformerMixin, BaseEstimator):
"""Transform between iterable of iterables and a multilabel format.
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape (n_classes,), default=None
Indicates an ordering for the class labels.
All entries should be unique (cannot contain duplicate classes).
sparse_output : bool, default=False
Set to True if output binary array is desired in CSR sparse format.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
A copy of the `classes` parameter when provided.
Otherwise it corresponds to the sorted set of classes found
when fitting.
Examples
--------
>>> from sklearn.preprocessing import MultiLabelBinarizer
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([{'sci-fi', 'thriller'}, {'comedy'}])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
A common mistake is to pass in a list, which leads to the following issue:
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit(['sci-fi', 'thriller', 'comedy'])
MultiLabelBinarizer()
>>> mlb.classes_
array(['-', 'c', 'd', 'e', 'f', 'h', 'i', 'l', 'm', 'o', 'r', 's', 't',
'y'], dtype=object)
To correct this, the list of labels should be passed in as:
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit([['sci-fi', 'thriller', 'comedy']])
MultiLabelBinarizer()
>>> mlb.classes_
array(['comedy', 'sci-fi', 'thriller'], dtype=object)
See Also
--------
OneHotEncoder : Encode categorical features using a one-hot aka one-of-K
scheme.
"""
@_deprecate_positional_args
def __init__(self, *, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing :term:`classes_`.
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
self._cached_dict = None
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
elif len(set(self.classes)) < len(self.classes):
raise ValueError("The classes argument contains duplicate "
"classes. Remove these duplicates before passing "
"them to MultiLabelBinarizer.")
else:
classes = self.classes
dtype = int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets.
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` i.f.f. `classes_[j]`
is in `y[i]`, and 0 otherwise. Sparse matrix will be of CSR
format.
"""
self._cached_dict = None
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
# ensure yt.indices keeps its current dtype
yt.indices = np.array(inverse[yt.indices], dtype=yt.indices.dtype,
copy=False)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets.
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
check_is_fitted(self)
class_to_index = self._build_cache()
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _build_cache(self):
if self._cached_dict is None:
self._cached_dict = dict(zip(self.classes_,
range(len(self.classes_))))
return self._cached_dict
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix.
Returns
-------
y_indicator : sparse matrix of shape (n_samples, n_classes)
Label indicator matrix. Will be of CSR format.
"""
indices = array.array('i')
indptr = array.array('i', [0])
unknown = set()
for labels in y:
index = set()
for label in labels:
try:
index.add(class_mapping[label])
except KeyError:
unknown.add(label)
indices.extend(index)
indptr.append(len(indices))
if unknown:
warnings.warn('unknown class(es) {0} will be ignored'
.format(sorted(unknown, key=str)))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets.
Parameters
----------
yt : {ndarray, sparse matrix} of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
check_is_fitted(self)
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
def _more_tags(self):
return {'X_types': ['2dlabels']}
|
bsd-3-clause
|
mrocklin/blaze
|
blaze/server/server.py
|
1
|
7939
|
from __future__ import absolute_import, division, print_function
try:
import flask
from flask import Flask, request
except ImportError:
pass
import blaze
import socket
import json
from toolz import assoc
from functools import partial, wraps
from blaze import into, compute
from blaze.expr import utils as expr_utils
from blaze.compute import compute_up
from datashape.predicates import iscollection, isscalar
from ..interactive import InteractiveSymbol, coerce_scalar
from ..utils import json_dumps
from ..expr import Expr, symbol
from datashape import Mono, discover
__all__ = 'Server', 'to_tree', 'from_tree'
# http://www.speedguide.net/port.php?port=6363
# http://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers
DEFAULT_PORT = 6363
class Server(object):
""" Blaze Data Server
Host local data through a web API
Parameters
----------
data : ``dict`` or ``None``, optional
A dictionary mapping dataset name to any data format that blaze
understands.
Examples
--------
>>> from pandas import DataFrame
>>> df = DataFrame([[1, 'Alice', 100],
... [2, 'Bob', -200],
... [3, 'Alice', 300],
... [4, 'Dennis', 400],
... [5, 'Bob', -500]],
... columns=['id', 'name', 'amount'])
>>> server = Server({'accounts': df})
>>> server.run() # doctest: +SKIP
"""
__slots__ = 'app', 'data', 'port'
def __init__(self, data=None):
app = self.app = Flask('blaze.server.server')
if data is None:
data = dict()
self.data = data
for args, kwargs, func in routes:
func2 = wraps(func)(partial(func, self.data))
app.route(*args, **kwargs)(func2)
def run(self, *args, **kwargs):
"""Run the server"""
port = kwargs.pop('port', DEFAULT_PORT)
self.port = port
try:
self.app.run(*args, port=port, **kwargs)
except socket.error:
print("\tOops, couldn't connect on port %d. Is it busy?" % port)
self.run(*args, **assoc(kwargs, 'port', port + 1))
routes = list()
def route(*args, **kwargs):
def f(func):
routes.append((args, kwargs, func))
return func
return f
@route('/datashape')
def dataset(data):
return str(discover(data))
def to_tree(expr, names=None):
""" Represent Blaze expression with core data structures
Transform a Blaze expression into a form using only strings, dicts, lists
and base types (int, float, datetime, ....) This form can be useful for
serialization.
Parameters
----------
expr: Blaze Expression
Examples
--------
>>> t = symbol('t', 'var * {x: int32, y: int32}')
>>> to_tree(t) # doctest: +SKIP
{'op': 'Symbol',
'args': ['t', 'var * { x : int32, y : int32 }', False]}
>>> to_tree(t.x.sum()) # doctest: +SKIP
{'op': 'sum',
'args': [
{'op': 'Column',
'args': [
{
'op': 'Symbol'
'args': ['t', 'var * { x : int32, y : int32 }', False]
}
'x']
}]
}
Simplify expresion using explicit ``names`` dictionary. In the example
below we replace the ``Symbol`` node with the string ``'t'``.
>>> tree = to_tree(t.x, names={t: 't'})
>>> tree # doctest: +SKIP
{'op': 'Column', 'args': ['t', 'x']}
>>> from_tree(tree, namespace={'t': t})
t.x
See Also
--------
blaze.server.server.from_tree
"""
if names and expr in names:
return names[expr]
if isinstance(expr, tuple):
return [to_tree(arg, names=names) for arg in expr]
if isinstance(expr, expr_utils._slice):
return to_tree(expr.as_slice(), names=names)
if isinstance(expr, slice):
return {'op': 'slice',
'args': [to_tree(arg, names=names) for arg in
[expr.start, expr.stop, expr.step]]}
elif isinstance(expr, Mono):
return str(expr)
elif isinstance(expr, InteractiveSymbol):
return to_tree(symbol(expr._name, expr.dshape), names)
elif isinstance(expr, Expr):
return {'op': type(expr).__name__,
'args': [to_tree(arg, names) for arg in expr._args]}
else:
return expr
def expression_from_name(name):
"""
>>> expression_from_name('By')
<class 'blaze.expr.split_apply_combine.By'>
>>> expression_from_name('And')
<class 'blaze.expr.arithmetic.And'>
"""
import blaze
if hasattr(blaze, name):
return getattr(blaze, name)
if hasattr(blaze.expr, name):
return getattr(blaze.expr, name)
for signature, func in compute_up.funcs.items():
try:
if signature[0].__name__ == name:
return signature[0]
except TypeError:
pass
raise ValueError('%s not found in compute_up' % name)
def from_tree(expr, namespace=None):
""" Convert core data structures to Blaze expression
Core data structure representations created by ``to_tree`` are converted
back into Blaze expressions.
Parameters
----------
expr : dict
Examples
--------
>>> t = symbol('t', 'var * {x: int32, y: int32}')
>>> tree = to_tree(t)
>>> tree # doctest: +SKIP
{'op': 'Symbol',
'args': ['t', 'var * { x : int32, y : int32 }', False]}
>>> from_tree(tree)
t
>>> tree = to_tree(t.x.sum())
>>> tree # doctest: +SKIP
{'op': 'sum',
'args': [
{'op': 'Field',
'args': [
{
'op': 'Symbol'
'args': ['t', 'var * { x : int32, y : int32 }', False]
}
'x']
}]
}
>>> from_tree(tree)
sum(t.x)
Simplify expresion using explicit ``names`` dictionary. In the example
below we replace the ``Symbol`` node with the string ``'t'``.
>>> tree = to_tree(t.x, names={t: 't'})
>>> tree # doctest: +SKIP
{'op': 'Field', 'args': ['t', 'x']}
>>> from_tree(tree, namespace={'t': t})
t.x
See Also
--------
blaze.server.server.to_tree
"""
if isinstance(expr, dict):
op, args = expr['op'], expr['args']
if 'slice' == op:
return expr_utils._slice(*[from_tree(arg, namespace)
for arg in args])
if hasattr(blaze.expr, op):
cls = getattr(blaze.expr, op)
else:
cls = expression_from_name(op)
if 'Symbol' in op:
children = [from_tree(arg) for arg in args]
else:
children = [from_tree(arg, namespace) for arg in args]
return cls(*children)
elif isinstance(expr, list):
return tuple(from_tree(arg, namespace) for arg in expr)
if namespace and expr in namespace:
return namespace[expr]
else:
return expr
@route('/compute.json', methods=['POST', 'PUT', 'GET'])
def compserver(dataset):
if request.headers['content-type'] != 'application/json':
return ("Expected JSON data", 404)
try:
payload = json.loads(request.data.decode('utf-8'))
except ValueError:
return ("Bad JSON. Got %s " % request.data, 404)
ns = payload.get('namespace', dict())
ns[':leaf'] = symbol('leaf', discover(dataset))
expr = from_tree(payload['expr'], namespace=ns)
assert len(expr._leaves()) == 1
leaf = expr._leaves()[0]
try:
result = compute(expr, {leaf: dataset})
except Exception as e:
return ("Computation failed with message:\n%s" % e, 500)
if iscollection(expr.dshape):
result = into(list, result)
elif isscalar(expr.dshape):
result = coerce_scalar(result, str(expr.dshape))
return json.dumps({'datashape': str(expr.dshape),
'data': result}, default=json_dumps)
|
bsd-3-clause
|
DmitryOdinoky/sms-tools
|
lectures/08-Sound-transformations/plots-code/hpr-freq-transformation.py
|
21
|
2815
|
# function call to the transformation functions of relevance for the hpsModel
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import hprModel as HPR
import stft as STFT
import harmonicTransformations as HT
import utilFunctions as UF
inputFile='../../../sounds/flute-A4.wav'
window='blackman'
M=801
N=2048
t=-90
minSineDur=0.1
nH=40
minf0=350
maxf0=700
f0et=8
harmDevSlope=0.1
Ns = 512
H = 128
(fs, x) = UF.wavread(inputFile)
w = get_window(window, M)
hfreq, hmag, hphase, xr = HPR.hprModelAnal(x, fs, w, N, H, t, minSineDur, nH, minf0, maxf0, f0et, harmDevSlope)
mXr, pXr = STFT.stftAnal(xr, fs, w, N, H)
freqScaling = np.array([0, 1.5, 1, 1.5])
freqStretching = np.array([0, 1.1, 1, 1.1])
timbrePreservation = 1
hfreqt, hmagt = HT.harmonicFreqScaling(hfreq, hmag, freqScaling, freqStretching, timbrePreservation, fs)
y, yh = HPR.hprModelSynth(hfreqt, hmagt, np.array([]), xr, Ns, H, fs)
UF.wavwrite(y,fs, 'hpr-freq-transformation.wav')
plt.figure(figsize=(12, 9))
maxplotfreq = 15000.0
plt.subplot(4,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.title('x (flute-A4.wav)')
plt.subplot(4,1,2)
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mXr[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mXr[:,:maxplotbin+1]))
plt.autoscale(tight=True)
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('harmonics + residual spectrogram')
plt.subplot(4,1,3)
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mXr[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mXr[:,:maxplotbin+1]))
plt.autoscale(tight=True)
harms = hfreqt*np.less(hfreqt,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('transposed and stretched harmonics + residual spectrogram')
plt.subplot(4,1,4)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.title('y')
plt.tight_layout()
plt.savefig('hpr-freq-transformations.png')
plt.show(block=False)
|
agpl-3.0
|
a-holm/MachinelearningAlgorithms
|
Classification/SupportVectorMachine/regularSupportVectorMachine2.py
|
1
|
3479
|
# -*- coding: utf-8 -*-
"""Support Vector Machine (SVM) classification for machine learning.
SVM is a binary classifier. The objective of the SVM is to find the best
separating hyperplane in vector space which is also referred to as the
decision boundary. And it decides what separating hyperplane is the 'best'
because the distance from it and the associating data it is separating is the
greatest at the plane in question.
Example:
$ python regularSupportVectorMachine2.py
Todo:
*
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import pandas as pd
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
# importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
features = dataset.iloc[:, [2, 3]].values # Country, Age, Salary
labels = dataset.iloc[:, 4].values # Purchased
# Splitting the Dataset into a Training set and a Test set
feature_train, feature_test, label_train, label_test = train_test_split(
features, labels, test_size=0.25)
# Feature scaling, normalize scale is important. Especially on algorithms
# involving euclidian distance. Two main feature scaling formulas are:
# Standardisation: x_stand = (x-mean(x))/(standard_deviation(x))
# Normalisation: x_norm = (x-min(x))/(max(x)-min(x))
sc_feature = StandardScaler()
feature_train = sc_feature.fit_transform(feature_train)
feature_test = sc_feature.transform(feature_test)
# Fitting the Support Vector Machine Model to the dataset
classifier = SVC(kernel='linear')
classifier.fit(feature_train, label_train)
# Predicting the results of the Test set
y_pred = classifier.predict(feature_test)
# Creating the Confusion Matrix
cm = confusion_matrix(label_test, y_pred)
# Visualize the Training set results
"""X_set, y_set = feature_train, label_train
X1, X2 = np.meshgrid(
np.arange(
start=X_set[:, 0].min() - 1, stop=X_set[:, 0].max() + 1, step=0.01
),
np.arange(
start=X_set[:, 1].min() - 1, stop=X_set[:, 1].max() + 1, step=0.01
)
)
plt.contourf(
X1, X2, classifier.predict(
np.array([X1.ravel(), X2.ravel()]).T
).reshape(X1.shape), alpha=0.75, cmap=ListedColormap(('red', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c=ListedColormap(('red', 'blue'))(i), label=j)
plt.title('Support Vector Machine Model (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()"""
# Visualize the Test set results
X_set, y_set = feature_test, label_test
X1, X2 = np.meshgrid(
np.arange(
start=X_set[:, 0].min() - 1, stop=X_set[:, 0].max() + 1, step=0.01
),
np.arange(
start=X_set[:, 1].min() - 1, stop=X_set[:, 1].max() + 1, step=0.01
)
)
plt.contourf(
X1, X2, classifier.predict(
np.array([X1.ravel(), X2.ravel()]).T
).reshape(X1.shape), alpha=0.75, cmap=ListedColormap(('red', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c=ListedColormap(('red', 'blue'))(i), label=j)
plt.title('Support Vector Machine Model (Testing set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
|
mit
|
nishnik/networkx
|
examples/drawing/atlas.py
|
30
|
2769
|
#!/usr/bin/env python
"""
Atlas of all graphs of 6 nodes or less.
"""
# Author: Aric Hagberg ([email protected])
# Copyright (C) 2004-2016 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.generators.atlas import *
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic
import random
def atlas6():
""" Return the atlas of all connected graphs of 6 nodes or less.
Attempt to check for isomorphisms and remove.
"""
Atlas = graph_atlas_g()[0:208] # 208
# remove isolated nodes, only connected graphs are left
U = nx.Graph() # graph for union of all graphs in atlas
for G in Atlas:
zerodegree = [n for n in G if G.degree(n)==0]
for n in zerodegree:
G.remove_node(n)
U = nx.disjoint_union(U, G)
# list of graphs of all connected components
C = nx.connected_component_subgraphs(U)
UU = nx.Graph()
# do quick isomorphic-like check, not a true isomorphism checker
nlist = [] # list of nonisomorphic graphs
for G in C:
# check against all nonisomorphic graphs so far
if not iso(G, nlist):
nlist.append(G)
UU = nx.disjoint_union(UU, G) # union the nonisomorphic graphs
return UU
def iso(G1, glist):
"""Quick and dirty nonisomorphism checker used to check isomorphisms."""
for G2 in glist:
if isomorphic(G1, G2):
return True
return False
if __name__ == '__main__':
G=atlas6()
print("graph has %d nodes with %d edges"\
%(nx.number_of_nodes(G), nx.number_of_edges(G)))
print(nx.number_connected_components(G), "connected components")
try:
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
except ImportError:
try:
import pydotplus
from networkx.drawing.nx_pydot import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either "
"PyGraphviz or PyDotPlus")
import matplotlib.pyplot as plt
plt.figure(1, figsize=(8, 8))
# layout graphs with positions using graphviz neato
pos = graphviz_layout(G, prog="neato")
# color nodes the same in each connected subgraph
C = nx.connected_component_subgraphs(G)
for g in C:
c = [random.random()] * nx.number_of_nodes(g) # random color...
nx.draw(g,
pos,
node_size=40,
node_color=c,
vmin=0.0,
vmax=1.0,
with_labels=False
)
plt.savefig("atlas.png", dpi=75)
|
bsd-3-clause
|
mbkumar/pymatgen
|
pymatgen/analysis/chemenv/connectivity/connected_components.py
|
2
|
42585
|
"""
Connected components.
"""
import logging
import itertools
import numpy as np
from matplotlib.patches import FancyArrowPatch, Circle
import networkx as nx
from networkx.algorithms.traversal import bfs_tree
from networkx.algorithms.components import is_connected
from monty.json import MSONable
from monty.json import jsanitize
from pymatgen.analysis.chemenv.utils.graph_utils import get_delta
from pymatgen.analysis.chemenv.utils.chemenv_errors import ChemenvError
from pymatgen.analysis.chemenv.utils.math_utils import get_linearly_independent_vectors
from pymatgen.analysis.chemenv.connectivity.environment_nodes import EnvironmentNode
def draw_network(env_graph, pos, ax, sg=None, periodicity_vectors=None):
"""
Args:
env_graph ():
pos ():
ax ():
sg ():
periodicity_vectors ():
Returns:
"""
for n in env_graph:
c = Circle(pos[n], radius=0.02, alpha=0.5)
ax.add_patch(c)
env_graph.node[n]['patch'] = c
x, y = pos[n]
ax.annotate(str(n), pos[n], ha='center', va='center', xycoords='data')
seen = {}
e = None
for (u, v, d) in env_graph.edges(data=True):
n1 = env_graph.node[u]['patch']
n2 = env_graph.node[v]['patch']
rad = 0.1
if (u, v) in seen:
rad = seen.get((u, v))
rad = (rad + np.sign(rad) * 0.1) * -1
alpha = 0.5
color = 'k'
periodic_color = 'r'
delta = get_delta(u, v, d)
# center = get_center_of_arc(n1.center, n2.center, rad)
n1center = np.array(n1.center)
n2center = np.array(n2.center)
midpoint = (n1center + n2center) / 2
dist = np.sqrt(np.power(n2.center[0] - n1.center[0], 2) + np.power(n2.center[1] - n1.center[1], 2))
n1c_to_n2c = n2center - n1center
vv = np.cross(np.array([n1c_to_n2c[0], n1c_to_n2c[1], 0], np.float), np.array([0, 0, 1], np.float))
vv /= np.linalg.norm(vv)
midarc = midpoint + rad * dist * np.array([vv[0], vv[1]], np.float)
xytext_offset = 0.1 * dist * np.array([vv[0], vv[1]], np.float)
if periodicity_vectors is not None and len(periodicity_vectors) == 1:
if np.all(np.array(delta) ==
np.array(periodicity_vectors[0])) or np.all(np.array(delta) ==
-np.array(periodicity_vectors[0])):
e = FancyArrowPatch(n1center, n2center, patchA=n1, patchB=n2,
arrowstyle='-|>',
connectionstyle='arc3,rad=%s' % rad,
mutation_scale=15.0,
lw=2,
alpha=alpha,
color='r',
linestyle='dashed')
else:
e = FancyArrowPatch(n1center, n2center, patchA=n1, patchB=n2,
arrowstyle='-|>',
connectionstyle='arc3,rad=%s' % rad,
mutation_scale=10.0,
lw=2,
alpha=alpha,
color=color)
else:
ecolor = color if np.allclose(np.array(delta), np.zeros(3)) else periodic_color
e = FancyArrowPatch(n1center, n2center, patchA=n1, patchB=n2,
arrowstyle='-|>',
connectionstyle='arc3,rad=%s' % rad,
mutation_scale=10.0,
lw=2,
alpha=alpha,
color=ecolor)
ax.annotate(delta, midarc, ha='center', va='center', xycoords='data', xytext=xytext_offset,
textcoords='offset points')
seen[(u, v)] = rad
ax.add_patch(e)
return e
def make_supergraph(graph, multiplicity, periodicity_vectors):
"""
Args:
graph ():
multiplicity ():
periodicity_vectors ():
Returns:
"""
supergraph = nx.MultiGraph()
print('peridoicity vectors :')
print(periodicity_vectors)
if isinstance(multiplicity, int) or len(multiplicity) == 1:
mult = multiplicity if isinstance(multiplicity, int) else multiplicity[0]
nodes = graph.nodes(data=True)
inodes = [isite for isite, data in nodes]
indices_nodes = {isite: inodes.index(isite) for isite in inodes}
edges = graph.edges(data=True, keys=True)
connecting_edges = []
other_edges = []
for (n1, n2, key, data) in edges:
print(n1, n2, key, data)
if np.all(np.array(data['delta']) == np.array(periodicity_vectors[0])):
connecting_edges.append((n1, n2, key, data))
elif np.all(np.array(data['delta']) == -np.array(periodicity_vectors[0])):
new_data = dict(data)
new_data['delta'] = tuple(-np.array(data['delta']))
new_data['start'] = data['end']
new_data['end'] = data['start']
connecting_edges.append((n1, n2, key, new_data))
else:
if not np.all(np.array(data['delta']) == 0):
print('delta not equal to periodicity nor 0 ... : ', n1, n2, key, data['delta'], data)
input('Are we ok with this ?')
other_edges.append((n1, n2, key, data))
for imult in range(mult - 1):
for n1, n2, key, data in other_edges:
new_data = dict(data)
new_data['start'] = (imult * len(nodes)) + indices_nodes[n1]
new_data['end'] = (imult * len(nodes)) + indices_nodes[n2]
supergraph.add_edge(new_data['start'], new_data['end'],
key=key, attr_dict=new_data)
for n1, n2, key, data in connecting_edges:
new_data = dict(data)
new_data['start'] = (imult * len(nodes)) + indices_nodes[n1]
new_data['end'] = np.mod(((imult + 1) * len(nodes)) + indices_nodes[n2], len(nodes) * mult)
new_data['delta'] = (0, 0, 0)
supergraph.add_edge(new_data['start'], new_data['end'],
key=key, attr_dict=new_data)
imult = mult - 1
for n1, n2, key, data in other_edges:
new_data = dict(data)
new_data['start'] = (imult * len(nodes)) + indices_nodes[n1]
new_data['end'] = (imult * len(nodes)) + indices_nodes[n2]
supergraph.add_edge(new_data['start'], new_data['end'],
key=key, attr_dict=new_data)
for n1, n2, key, data in connecting_edges:
new_data = dict(data)
new_data['start'] = (imult * len(nodes)) + indices_nodes[n1]
new_data['end'] = indices_nodes[n2]
supergraph.add_edge(new_data['start'], new_data['end'],
key=key, attr_dict=new_data)
return supergraph
else:
raise NotImplementedError('make_supergraph not yet implemented for 2- and 3-periodic graphs')
class ConnectedComponent(MSONable):
"""
Class used to describe the connected components in a structure in terms of coordination environments.
"""
def __init__(self, environments=None, links=None, environments_data=None, links_data=None, graph=None):
"""
Constructor for the ConnectedComponent object.
Args:
environments: Environments in the connected component.
links: Links between environments in the connected component.
environments_data: Data of environment nodes.
links_data: Data of links between environment nodes.
graph: Graph of the connected component.
Returns:
ConnectedComponent: Instance of this class
"""
self._periodicity_vectors = None
self._primitive_reduced_connected_subgraph = None
self._projected = False
if graph is None:
self._connected_subgraph = nx.MultiGraph()
if environments_data is None:
self._connected_subgraph.add_nodes_from(environments)
else:
for env in environments:
if env in environments_data:
self._connected_subgraph.add_node(env, **environments_data[env])
else:
self._connected_subgraph.add_node(env)
for edge in links:
env_node1 = edge[0]
env_node2 = edge[1]
if len(edge) == 2:
key = None
else:
key = edge[2]
if ((not self._connected_subgraph.has_node(env_node1)) or
(not self._connected_subgraph.has_node(env_node2))):
raise ChemenvError(self.__class__, '__init__', 'Trying to add edge with some unexisting node ...')
if links_data is not None:
if (env_node1, env_node2, key) in links_data:
edge_data = links_data[(env_node1, env_node2, key)]
elif (env_node2, env_node1, key) in links_data:
edge_data = links_data[(env_node2, env_node1, key)]
elif (env_node1, env_node2) in links_data:
edge_data = links_data[(env_node1, env_node2)]
elif (env_node2, env_node1) in links_data:
edge_data = links_data[(env_node2, env_node1)]
else:
edge_data = None
else:
edge_data = None
if edge_data:
self._connected_subgraph.add_edge(env_node1, env_node2, key, **edge_data)
else:
self._connected_subgraph.add_edge(env_node1, env_node2, key)
else:
# TODO: should check a few requirements here ?
self._connected_subgraph = graph
def coordination_sequence(self, source_node, path_size=5, coordination='number', include_source=False):
"""Get the coordination sequence for a given node.
Args:
source_node: Node for which the coordination sequence is computed.
path_size: Maximum length of the path for the coordination sequence.
coordination: Type of coordination sequence. The default ("number") corresponds to the number
of environment nodes that are reachable by following paths of sizes between 1 and path_size.
For coordination "env:number", this resulting coordination sequence is a sequence of dictionaries
mapping the type of environment to the number of such environment reachable by following paths of
sizes between 1 and path_size.
include_source: Whether to include the source_node in the coordination sequence.
Returns:
dict: Mapping between the nth "layer" of the connected component with the corresponding coordination.
Examples:
The corner-sharing octahedral framework (as in perovskites) have the following coordination sequence (up to
a path of size 6) :
{1: 6, 2: 18, 3: 38, 4: 66, 5: 102, 6: 146}
Considering both the octahedrons and the cuboctahedrons of the typical BaTiO3 perovskite, the "env:number"
coordination sequence (up to a path of size 6) starting on the Ti octahedron and Ba cuboctahedron
are the following :
Starting on the Ti octahedron : {1: {'O:6': 6, 'C:12': 8}, 2: {'O:6': 26, 'C:12': 48},
3: {'O:6': 90, 'C:12': 128}, 4: {'O:6': 194, 'C:12': 248},
5: {'O:6': 338, 'C:12': 408}, 6: {'O:6': 522, 'C:12': 608}}
Starting on the Ba cuboctahedron : {1: {'O:6': 8, 'C:12': 18}, 2: {'O:6': 48, 'C:12': 74},
3: {'O:6': 128, 'C:12': 170}, 4: {'O:6': 248, 'C:12': 306},
5: {'O:6': 408, 'C:12': 482}, 6: {'O:6': 608, 'C:12': 698}}
If include_source is set to True, the source node is included in the sequence, e.g. for the corner-sharing
octahedral framework : {0: 1, 1: 6, 2: 18, 3: 38, 4: 66, 5: 102, 6: 146}. For the "env:number" coordination
starting on a Ba cuboctahedron (as shown above), the coordination sequence is then :
{0: {'C:12': 1}, 1: {'O:6': 8, 'C:12': 18}, 2: {'O:6': 48, 'C:12': 74}, 3: {'O:6': 128, 'C:12': 170},
4: {'O:6': 248, 'C:12': 306}, 5: {'O:6': 408, 'C:12': 482}, 6: {'O:6': 608, 'C:12': 698}}
"""
if source_node not in self._connected_subgraph:
raise ValueError('Node not in Connected Component. Cannot find coordination sequence.')
# Example of an infinite periodic net in two dimensions consisting of a stacking of
# A and B lines :
#
# * * * * *
# * * * * *
# * * A * * B * * A * * B * * A * *
# * * * * *
# * * * * *
# * * A * * B * * A * * B * * A * *
# * * * * *
# * * * * *
# * * A * * B * * A * * B * * A * *
# * * * * *
# * * * * *
# * * A * * B * * A * * B * * A * *
# * * * * *
# * * * * *
# * * A * * B * * A * * B * * A * *
# * * * * *
# * * * * *
#
# One possible quotient graph of this periodic net :
# __ __
# (0,1,0) / \ / \ (0,1,0)
# `<--A--->---B--<´
# / (0,0,0) \
# \ /
# `--->---´
# (1,0,0)
#
# The "number" coordination sequence starting from any environment is : 4-8-12-16-...
# The "env:number" coordination sequence starting from any environment is :
# {A:2, B:2}-{A:4, B:4}-{A:6, B:6}-...
current_delta = (0, 0, 0)
current_ends = [(source_node, current_delta)]
visited = {(source_node.isite, *current_delta)}
path_len = 0
cseq = {}
if include_source:
if coordination == 'number':
cseq[0] = 1
elif coordination == 'env:number':
cseq[0] = {source_node.coordination_environment: 1}
else:
raise ValueError('Coordination type "{}" is not valid for coordination_sequence.'.format(coordination))
while path_len < path_size:
new_ends = []
for current_node_end, current_delta_end in current_ends:
for nb in self._connected_subgraph.neighbors(current_node_end):
for iedge, edata in self._connected_subgraph[current_node_end][nb].items():
new_delta = current_delta_end + get_delta(current_node_end, nb, edata)
if (nb.isite, *new_delta) not in visited:
new_ends.append((nb, new_delta))
visited.add((nb.isite, *new_delta))
if nb.isite == current_node_end.isite: # Handle self loops
new_delta = current_delta_end - get_delta(current_node_end, nb, edata)
if (nb.isite, *new_delta) not in visited:
new_ends.append((nb, new_delta))
visited.add((nb.isite, *new_delta))
current_ends = new_ends
path_len += 1
if coordination == 'number':
cseq[path_len] = len(current_ends)
elif coordination == 'env:number':
myenvs = [myend.coordination_environment for myend, _ in current_ends]
cseq[path_len] = {myenv: myenvs.count(myenv) for myenv in set(myenvs)}
else:
raise ValueError('Coordination type "{}" is not valid for coordination_sequence.'.format(coordination))
return cseq
def __len__(self):
return len(self.graph)
def compute_periodicity(self, algorithm='all_simple_paths'):
"""
Args:
algorithm ():
Returns:
"""
if algorithm == 'all_simple_paths':
self.compute_periodicity_all_simple_paths_algorithm()
elif algorithm == 'cycle_basis':
self.compute_periodicity_cycle_basis()
else:
raise ValueError('Algorithm "{}" is not allowed to compute periodicity'.format(algorithm))
self._order_periodicity_vectors()
def compute_periodicity_all_simple_paths_algorithm(self):
"""
Returns:
"""
self_loop_nodes = list(nx.nodes_with_selfloops(self._connected_subgraph))
all_nodes_independent_cell_image_vectors = []
my_simple_graph = nx.Graph(self._connected_subgraph)
for test_node in self._connected_subgraph.nodes():
# TODO: do we need to go through all test nodes ?
this_node_cell_img_vectors = []
if test_node in self_loop_nodes:
for key, edge_data in self._connected_subgraph[test_node][test_node].items():
if edge_data['delta'] == (0, 0, 0):
raise ValueError('There should not be self loops with delta image = (0, 0, 0).')
this_node_cell_img_vectors.append(edge_data['delta'])
for d1, d2 in itertools.combinations(this_node_cell_img_vectors, 2):
if d1 == d2 or d1 == tuple(-ii for ii in d2):
raise ValueError('There should not be self loops with the same (or opposite) delta image.')
this_node_cell_img_vectors = get_linearly_independent_vectors(this_node_cell_img_vectors)
# Here, we adopt a cutoff equal to the size of the graph, contrary to the default of networkX (size - 1),
# because otherwise, the all_simple_paths algorithm fail when the source node is equal to the target node.
paths = []
# TODO: its probably possible to do just a dfs or bfs traversal instead of taking all simple paths!
test_node_neighbors = my_simple_graph.neighbors(test_node)
breaknodeloop = False
for test_node_neighbor in test_node_neighbors:
# Special case for two nodes
if len(self._connected_subgraph[test_node][test_node_neighbor]) > 1:
this_path_deltas = []
node_node_neighbor_edges_data = list(self._connected_subgraph[test_node]
[test_node_neighbor].values())
for edge1_data, edge2_data in itertools.combinations(node_node_neighbor_edges_data, 2):
delta1 = get_delta(test_node, test_node_neighbor, edge1_data)
delta2 = get_delta(test_node_neighbor, test_node, edge2_data)
this_path_deltas.append(delta1 + delta2)
this_node_cell_img_vectors.extend(this_path_deltas)
this_node_cell_img_vectors = get_linearly_independent_vectors(this_node_cell_img_vectors)
if len(this_node_cell_img_vectors) == 3:
break
for path in nx.all_simple_paths(my_simple_graph, test_node, test_node_neighbor,
cutoff=len(self._connected_subgraph)):
path_indices = [nodepath.isite for nodepath in path]
if path_indices == [test_node.isite, test_node_neighbor.isite]:
continue
path_indices.append(test_node.isite)
path_indices = tuple(path_indices)
if path_indices not in paths:
paths.append(path_indices)
else:
continue
path.append(test_node)
# TODO: there are some paths that appears twice for cycles, and there are some paths that should
# probably not be considered
this_path_deltas = [np.zeros(3, np.int)]
for (node1, node2) in [(node1, path[inode1 + 1]) for inode1, node1 in enumerate(path[:-1])]:
this_path_deltas_new = []
for key, edge_data in self._connected_subgraph[node1][node2].items():
delta = get_delta(node1, node2, edge_data)
for current_delta in this_path_deltas:
this_path_deltas_new.append(current_delta + delta)
this_path_deltas = this_path_deltas_new
this_node_cell_img_vectors.extend(this_path_deltas)
this_node_cell_img_vectors = get_linearly_independent_vectors(this_node_cell_img_vectors)
if len(this_node_cell_img_vectors) == 3:
breaknodeloop = True
break
if breaknodeloop:
break
this_node_cell_img_vectors = get_linearly_independent_vectors(this_node_cell_img_vectors)
independent_cell_img_vectors = this_node_cell_img_vectors
all_nodes_independent_cell_image_vectors.append(independent_cell_img_vectors)
# If we have found that the sub structure network is 3D-connected, we can stop ...
if len(independent_cell_img_vectors) == 3:
break
self._periodicity_vectors = []
if len(all_nodes_independent_cell_image_vectors) != 0:
for independent_cell_img_vectors in all_nodes_independent_cell_image_vectors:
if len(independent_cell_img_vectors) > len(self._periodicity_vectors):
self._periodicity_vectors = independent_cell_img_vectors
if len(self._periodicity_vectors) == 3:
break
def compute_periodicity_cycle_basis(self):
"""
Returns:
"""
my_simple_graph = nx.Graph(self._connected_subgraph)
cycles = nx.cycle_basis(my_simple_graph)
all_deltas = []
for cyc in cycles:
mycyc = list(cyc)
mycyc.append(cyc[0])
this_cycle_deltas = [np.zeros(3, np.int)]
for (node1, node2) in [(node1, mycyc[inode1 + 1]) for inode1, node1 in enumerate(mycyc[:-1])]:
this_cycle_deltas_new = []
for key, edge_data in self._connected_subgraph[node1][node2].items():
delta = get_delta(node1, node2, edge_data)
for current_delta in this_cycle_deltas:
this_cycle_deltas_new.append(current_delta + delta)
this_cycle_deltas = this_cycle_deltas_new
all_deltas.extend(this_cycle_deltas)
all_deltas = get_linearly_independent_vectors(all_deltas)
if len(all_deltas) == 3:
self._periodicity_vectors = all_deltas
return
# One has to consider pairs of nodes with parallel edges (these are not considered in the simple graph cycles)
edges = my_simple_graph.edges()
for n1, n2 in edges:
if n1 == n2:
continue
if len(self._connected_subgraph[n1][n2]) == 1:
continue
elif len(self._connected_subgraph[n1][n2]) > 1:
for iedge1, iedge2 in itertools.combinations(self._connected_subgraph[n1][n2], 2):
e1data = self._connected_subgraph[n1][n2][iedge1]
e2data = self._connected_subgraph[n1][n2][iedge2]
current_delta = get_delta(n1, n2, e1data)
delta = get_delta(n2, n1, e2data)
current_delta += delta
all_deltas.append(current_delta)
else:
raise ValueError('Should not be here ...')
all_deltas = get_linearly_independent_vectors(all_deltas)
if len(all_deltas) == 3:
self._periodicity_vectors = all_deltas
return
self._periodicity_vectors = all_deltas
def make_supergraph(self, multiplicity):
"""
Args:
multiplicity ():
Returns:
"""
supergraph = make_supergraph(self._connected_subgraph, multiplicity, self._periodicity_vectors)
return supergraph
def show_graph(self, graph=None, save_file=None, drawing_type='internal', pltshow=True):
"""
Args:
graph ():
save_file ():
drawing_type ():
pltshow ():
Returns:
"""
import matplotlib.pyplot as plt
if graph is None:
shown_graph = self._connected_subgraph
else:
shown_graph = graph
plt.figure()
# pos = nx.spring_layout(shown_graph)
if drawing_type == 'internal':
pos = nx.shell_layout(shown_graph)
ax = plt.gca()
draw_network(shown_graph, pos, ax, periodicity_vectors=self._periodicity_vectors)
ax.autoscale()
plt.axis('equal')
plt.axis('off')
if save_file is not None:
plt.savefig(save_file)
# nx.draw(self._connected_subgraph)
elif drawing_type == 'draw_graphviz':
import networkx
networkx.nx_pydot.graphviz_layout(shown_graph)
elif drawing_type == 'draw_random':
import networkx
networkx.draw_random(shown_graph)
if pltshow:
plt.show()
@property
def graph(self):
"""Return the graph of this connected component.
Returns:
MultiGraph: Networkx MultiGraph object with environment as nodes and links between these nodes as edges
with information about the image cell difference if any.
"""
return self._connected_subgraph
@property
def is_periodic(self):
"""
Returns:
"""
return not self.is_0d
@property
def is_0d(self):
"""
Returns:
"""
if self._periodicity_vectors is None:
self.compute_periodicity()
return len(self._periodicity_vectors) == 0
@property
def is_1d(self):
"""
Returns:
"""
if self._periodicity_vectors is None:
self.compute_periodicity()
return len(self._periodicity_vectors) == 1
@property
def is_2d(self):
"""
Returns:
"""
if self._periodicity_vectors is None:
self.compute_periodicity()
return len(self._periodicity_vectors) == 2
@property
def is_3d(self):
"""
Returns:
"""
if self._periodicity_vectors is None:
self.compute_periodicity()
return len(self._periodicity_vectors) == 3
@staticmethod
def _order_vectors(vectors):
"""Orders vectors.
First, each vector is made such that the first non-zero dimension is positive.
Example: a periodicity vector [0, -1, 1] is transformed to [0, 1, -1].
Then vectors are ordered based on their first element, then (if the first element
is identical) based on their second element, then (if the first and second element
are identical) based on their third element and so on ...
Example: [[1, 1, 0], [0, 1, -1], [0, 1, 1]] is ordered as [[0, 1, -1], [0, 1, 1], [1, 1, 0]]
"""
for ipv, pv in enumerate(vectors):
nonzeros = np.nonzero(pv)[0]
if (len(nonzeros) > 0) and (pv[nonzeros[0]] < 0):
vectors[ipv] = -pv
return sorted(vectors, key=lambda x: x.tolist())
def _order_periodicity_vectors(self):
"""Orders the periodicity vectors.
"""
if len(self._periodicity_vectors) > 3:
raise ValueError('Number of periodicity vectors is larger than 3.')
self._periodicity_vectors = self._order_vectors(self._periodicity_vectors)
# for ipv, pv in enumerate(self._periodicity_vectors):
# nonzeros = np.nonzero(pv)[0]
# if (len(nonzeros) > 0) and (pv[nonzeros[0]] < 0):
# self._periodicity_vectors[ipv] = -pv
# self._periodicity_vectors = sorted(self._periodicity_vectors, key=lambda x: x.tolist())
@property
def periodicity_vectors(self):
"""
Returns:
"""
if self._periodicity_vectors is None:
self.compute_periodicity()
return [np.array(pp) for pp in self._periodicity_vectors]
@property
def periodicity(self):
"""
Returns:
"""
if self._periodicity_vectors is None:
self.compute_periodicity()
return '{:d}D'.format(len(self._periodicity_vectors))
def elastic_centered_graph(self, start_node=None):
"""
Args:
start_node ():
Returns:
"""
logging.info('In elastic centering')
# Loop on start_nodes, sometimes some nodes cannot be elastically taken
# inside the cell if you start from a specific node
ntest_nodes = 0
start_node = list(self.graph.nodes())[0]
ntest_nodes += 1
centered_connected_subgraph = nx.MultiGraph()
centered_connected_subgraph.add_nodes_from(self.graph.nodes())
centered_connected_subgraph.add_edges_from(self.graph.edges(data=True))
tree = bfs_tree(G=self.graph, source=start_node)
current_nodes = [start_node]
nodes_traversed = [start_node]
inode = 0
# Loop on "levels" in the tree
tree_level = 0
while True:
tree_level += 1
logging.debug('In tree level {:d} ({:d} nodes)'.format(tree_level, len(current_nodes)))
new_current_nodes = []
# Loop on nodes in this level of the tree
for node in current_nodes:
inode += 1
logging.debug(' In node #{:d}/{:d} in level {:d} ({})'.format(inode, len(current_nodes),
tree_level, str(node)))
node_neighbors = list(tree.neighbors(n=node))
node_edges = centered_connected_subgraph.edges(nbunch=[node],
data=True, keys=True)
# Loop on neighbors of a node (from the tree used)
for inode_neighbor, node_neighbor in enumerate(node_neighbors):
logging.debug(' Testing neighbor #{:d}/{:d} ({}) of node #{:d} ({})'.format(inode_neighbor,
len(node_neighbors),
node_neighbor,
inode,
node))
already_inside = False
ddeltas = []
for n1, n2, key, edata in node_edges:
if (n1 == node and n2 == node_neighbor) or (n2 == node and n1 == node_neighbor):
if edata['delta'] == (0, 0, 0):
already_inside = True
thisdelta = edata['delta']
else:
if edata['start'] == node.isite and edata['end'] != node.isite:
thisdelta = edata['delta']
elif edata['end'] == node.isite:
thisdelta = tuple([-dd for dd in edata['delta']])
else:
raise ValueError("Should not be here ...")
ddeltas.append(thisdelta)
logging.debug(' ddeltas : {}'.format(', '.join(['({})'.format(', '.join(str(ddd)
for ddd in dd))
for dd in ddeltas])))
if ddeltas.count((0, 0, 0)) > 1:
raise ValueError('Should not have more than one 000 delta ...')
if already_inside:
logging.debug(' Edge inside the cell ... continuing to next neighbor')
continue
logging.debug(' Edge outside the cell ... getting neighbor back inside')
if (0, 0, 0) in ddeltas:
ddeltas.remove((0, 0, 0))
myddelta = np.array(ddeltas[0], np.int)
node_neighbor_edges = centered_connected_subgraph.edges(nbunch=[node_neighbor],
data=True, keys=True)
logging.debug(' Delta image from node {} to neighbor {} : '
'{}'.format(str(node),
str(node_neighbor),
'({})'.format(', '.join([str(iii) for iii in myddelta]))))
# Loop on the edges of this neighbor
for n1, n2, key, edata in node_neighbor_edges:
if ((n1 == node_neighbor and n2 != node_neighbor) or
(n2 == node_neighbor and n1 != node_neighbor)):
if edata['start'] == node_neighbor.isite and edata['end'] != node_neighbor.isite:
centered_connected_subgraph[n1][n2][key]['delta'] = tuple([ii
for ii in
np.array(edata['delta'],
np.int) + myddelta])
elif edata['end'] == node_neighbor.isite:
centered_connected_subgraph[n1][n2][key]['delta'] = tuple([ii
for ii in
np.array(edata['delta'],
np.int) - myddelta])
else:
raise ValueError('DUHH')
logging.debug(' {} to node {} now has delta '
'{}'.format(str(n1), str(n2),
str(centered_connected_subgraph[n1][n2][key]['delta'])))
new_current_nodes.extend(node_neighbors)
nodes_traversed.extend(node_neighbors)
current_nodes = new_current_nodes
if not current_nodes:
break
# Check if the graph is indeed connected if "periodic" edges (i.e. whose "delta" is not 0, 0, 0) are removed
check_centered_connected_subgraph = nx.MultiGraph()
check_centered_connected_subgraph.add_nodes_from(centered_connected_subgraph.nodes())
check_centered_connected_subgraph.add_edges_from(
[e for e in centered_connected_subgraph.edges(data=True)
if np.allclose(e[2]['delta'], np.zeros(3))])
if not is_connected(check_centered_connected_subgraph):
raise RuntimeError('Could not find a centered graph.')
return centered_connected_subgraph
@staticmethod
def _edgekey_to_edgedictkey(key):
if isinstance(key, int):
return str(key)
elif isinstance(key, str):
try:
int(key)
raise RuntimeError('Cannot pass an edge key which is a str '
'representation of an int.')
except ValueError:
return key
else:
raise ValueError('Edge key should be either a str or an int.')
@staticmethod
def _edgedictkey_to_edgekey(key):
if isinstance(key, int):
return key
elif isinstance(key, str):
try:
int(key)
except ValueError:
return key
else:
raise ValueError('Edge key in a dict of dicts representation of a graph'
'should be either a str or an int.')
@staticmethod
def _retuplify_edgedata(edata):
"""
Private method used to cast back lists to tuples where applicable in an edge data.
The format of the edge data is :
{'start': STARTINDEX, 'end': ENDINDEX, 'delta': TUPLE(DELTAX, DELTAY, DELTAZ),
'ligands': [TUPLE(LIGAND_1_INDEX, TUPLE(DELTAX_START_LIG_1, DELTAY_START_LIG_1, DELTAZ_START_LIG_1),
TUPLE(DELTAX_END_LIG_1, DELTAY_END_LIG_1, DELTAZ_END_LIG_1)),
TUPLE(LIGAND_2_INDEX, ...),
... ]}
When serializing to json/bson, these tuples are transformed into lists. This method transforms these lists
back to tuples.
Args:
edata (dict): Edge data dictionary with possibly the above tuples as lists.
Returns:
dict: Edge data dictionary with the lists tranformed back into tuples when applicable.
"""
edata['delta'] = tuple(edata['delta'])
edata['ligands'] = [tuple([lig[0], tuple(lig[1]), tuple(lig[2])])
for lig in edata['ligands']]
return edata
def as_dict(self):
"""
Bson-serializable dict representation of the ConnectedComponent object.
Returns:
dict: Bson-serializable dict representation of the ConnectedComponent object.
"""
nodes = {'{:d}'.format(node.isite): (node, data) for node, data in self._connected_subgraph.nodes(data=True)}
node2stringindex = {node: strindex for strindex, (node, data) in nodes.items()}
dict_of_dicts = nx.to_dict_of_dicts(self._connected_subgraph)
new_dict_of_dicts = {}
for n1, n2dict in dict_of_dicts.items():
in1 = node2stringindex[n1]
new_dict_of_dicts[in1] = {}
for n2, edges_dict in n2dict.items():
in2 = node2stringindex[n2]
new_dict_of_dicts[in1][in2] = {}
for ie, edge_data in edges_dict.items():
ied = self._edgekey_to_edgedictkey(ie)
new_dict_of_dicts[in1][in2][ied] = jsanitize(edge_data)
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"nodes": {strindex: (node.as_dict(), data) for strindex, (node, data) in nodes.items()},
"graph": new_dict_of_dicts}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the ConnectedComponent object from a dict representation of the
ConnectedComponent object created using the as_dict method.
Args:
d (dict): dict representation of the ConnectedComponent object
Returns:
ConnectedComponent: The connected component representing the links of a given set of environments.
"""
nodes_map = {inode_str: EnvironmentNode.from_dict(nodedict)
for inode_str, (nodedict, nodedata) in d['nodes'].items()}
nodes_data = {inode_str: nodedata
for inode_str, (nodedict, nodedata) in d['nodes'].items()}
dod = {}
for e1, e1dict in d['graph'].items():
dod[e1] = {}
for e2, e2dict in e1dict.items():
dod[e1][e2] = {cls._edgedictkey_to_edgekey(ied): cls._retuplify_edgedata(edata)
for ied, edata in e2dict.items()}
graph = nx.from_dict_of_dicts(dod, create_using=nx.MultiGraph, multigraph_input=True)
nx.set_node_attributes(graph, nodes_data)
nx.relabel_nodes(graph, nodes_map, copy=False)
return cls(graph=graph)
@classmethod
def from_graph(cls, g):
"""
Constructor for the ConnectedComponent object from a graph of the connected component
Args:
g (MultiGraph): Graph of the connected component.
Returns:
ConnectedComponent: The connected component representing the links of a given set of environments.
"""
return cls(graph=g)
def description(self, full=False):
"""
Args:
full ():
Returns:
"""
out = ['Connected component with environment nodes :']
if not full:
out.extend([str(en) for en in sorted(self.graph.nodes())])
return '\n'.join(out)
for en in sorted(self.graph.nodes()):
out.append('{}, connected to :'.format(str(en)))
en_neighbs = nx.neighbors(self.graph, en)
for en_neighb in sorted(en_neighbs):
out.append(' - {} with delta image cells'.format(en_neighb))
all_deltas = sorted([get_delta(node1=en, node2=en_neighb,
edge_data=edge_data).tolist()
for iedge, edge_data in self.graph[en][en_neighb].items()])
out.extend([' ({:d} {:d} {:d})'.format(delta[0], delta[1], delta[2]) for delta in all_deltas])
return '\n'.join(out)
|
mit
|
FrankJet/python_dataset
|
LR/review_R.py
|
2
|
4221
|
###used to decide R
import simplejson as json
import datetime
import time
import numpy as np
import math
import matplotlib.pyplot as plt
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
from dateutil.relativedelta import *
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from sklearn.cross_validation import cross_val_score
def string_toDatetime(string):
return datetime.datetime.strptime(string, "%Y-%m-%d")
def string_toYear(string):
return datetime.datetime.strptime(string[0:4], "%Y").date()
def string_toYearMonth(string):
return datetime.datetime.strptime(string[0:7], "%Y-%m").date()
def monthDiff(timeDate1, timeDate2):
return (timeDate1.year-timeDate2.year)*12 + (timeDate1.month-timeDate2.month)
def yearDiff(timeDate1, timeDate2):
return (timeDate1.year-timeDate2)
def betweenTime(timeDate, downTime, upTime):
if ((monthDiff(timeDate, downTime) < 0)or(monthDiff(upTime, timeDate) < 0)):
return False
else:
return True
####load reviewData as format:{business:{reviewTime:[user]}}
####store reviewSum for a business as format: {business:businessSum}
###store timeReviewUser {time:[user]}
def loadReview():
reviewData = {}
reviewSum = {}
timeReviewUser = {}
reviewFile = "../../dataset/review.json"
with open(reviewFile) as f:
for line in f:
reviewJson = json.loads(line)
business = reviewJson["business"]
user = reviewJson["user"]
reviewTime = string_toYearMonth(reviewJson["date"])
reviewData.setdefault(business, {})
reviewData[business].setdefault(reviewTime, [])
reviewData[business][reviewTime].append(user)
timeReviewUser.setdefault(reviewTime, [])
timeReviewUser[reviewTime].append(user)
reviewSum.setdefault(business, 0)
reviewSum[business] += 1
return (reviewData, reviewSum, timeReviewUser)
###filter business which has more than 1000 reviews
####reviewList contains the business which has more than 1000 reviews
def filterReviewData(reviewSum, reviewData):
downMonth = string_toYearMonth('2011-05')
monthList = list()
nextMonth = downMonth
for i in range(16):
monthList.append(nextMonth)
nextMonth = increMonth(nextMonth)
print "review process"
#reviewSet = set()
# for business in reviewSum.keys():
# bNum = reviewSum[business]
# if bNum > 1000:
# reviewSet.add(business)
#print "review > 1000 business sum %d"%len(reviewSet)
businessReviewSumList = []
finalBusinessSet = set()
for business in reviewData.keys():
reviewSum = 0
for t in monthList:
if(reviewData[business].has_key(t)):
reviewSum += len(reviewData[business][t])
businessReviewSumList.append(reviewSum)
if reviewSum > 400:
finalBusinessSet.add(business)
finalBusinessList = list(finalBusinessSet)
print "finalBusiness len %d"%len(finalBusinessList)
print "end process"
return (finalBusinessList, businessReviewSumList)
def increMonth(baseMonth):
return baseMonth+relativedelta(months=+1)
def mainFunction():
(reviewData, reviewSum, timeReviewUser) = loadReview()
(reviewList, businessReviewSumList) = filterReviewData(reviewSum, reviewData)
(keyValue, keyPercentile) = statisticAttribute(businessReviewSumList)
print keyValue, keyPercentile
(cdf_x, cdf_y)=get_CDF(businessReviewSumList)
plot_result(cdf_x, cdf_y, keyValue, keyPercentile)
def statisticAttribute(numList):
maxNum = max(numList)
minNum = min(numList)
meanNum = np.mean(numList)
varNum = np.var(numList)
print "max value %d min value %d mean %d var %f"%(maxNum, minNum, meanNum, varNum)
keyPercentile = 90
keyValue = np.percentile(numList, keyPercentile)
return (keyValue, float(keyPercentile)/100)
def get_CDF(numList):
print "total number of numList %d"%len(numList)
numArray = np.asarray(numList)
bins_num = np.arange(np.floor(numArray.min()), np.ceil(numArray.max()))
hist, bin_edges = np.histogram(numArray, bins=bins_num, density=True)
#print hist
cdf = np.cumsum(hist)
return (bin_edges[1:], cdf)
def plot_result(xArray, yArray, keyValue, keyPercentile):
plt.plot(xArray, yArray)
plt.plot([keyValue], [keyPercentile], 'ro')
plt.show()
mainFunction()
|
gpl-2.0
|
rhiever/tpot
|
tpot/config/classifier.py
|
2
|
6196
|
# -*- coding: utf-8 -*-
"""This file is part of the TPOT library.
TPOT was primarily developed at the University of Pennsylvania by:
- Randal S. Olson ([email protected])
- Weixuan Fu ([email protected])
- Daniel Angell ([email protected])
- and many more generous open source contributors
TPOT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
TPOT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with TPOT. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
# Check the TPOT documentation for information on the structure of config dicts
classifier_config_dict = {
# Classifiers
'sklearn.naive_bayes.GaussianNB': {
},
'sklearn.naive_bayes.BernoulliNB': {
'alpha': [1e-3, 1e-2, 1e-1, 1., 10., 100.],
'fit_prior': [True, False]
},
'sklearn.naive_bayes.MultinomialNB': {
'alpha': [1e-3, 1e-2, 1e-1, 1., 10., 100.],
'fit_prior': [True, False]
},
'sklearn.tree.DecisionTreeClassifier': {
'criterion': ["gini", "entropy"],
'max_depth': range(1, 11),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21)
},
'sklearn.ensemble.ExtraTreesClassifier': {
'n_estimators': [100],
'criterion': ["gini", "entropy"],
'max_features': np.arange(0.05, 1.01, 0.05),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21),
'bootstrap': [True, False]
},
'sklearn.ensemble.RandomForestClassifier': {
'n_estimators': [100],
'criterion': ["gini", "entropy"],
'max_features': np.arange(0.05, 1.01, 0.05),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21),
'bootstrap': [True, False]
},
'sklearn.ensemble.GradientBoostingClassifier': {
'n_estimators': [100],
'learning_rate': [1e-3, 1e-2, 1e-1, 0.5, 1.],
'max_depth': range(1, 11),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21),
'subsample': np.arange(0.05, 1.01, 0.05),
'max_features': np.arange(0.05, 1.01, 0.05)
},
'sklearn.neighbors.KNeighborsClassifier': {
'n_neighbors': range(1, 101),
'weights': ["uniform", "distance"],
'p': [1, 2]
},
'sklearn.svm.LinearSVC': {
'penalty': ["l1", "l2"],
'loss': ["hinge", "squared_hinge"],
'dual': [True, False],
'tol': [1e-5, 1e-4, 1e-3, 1e-2, 1e-1],
'C': [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1., 5., 10., 15., 20., 25.]
},
'sklearn.linear_model.LogisticRegression': {
'penalty': ["l1", "l2"],
'C': [1e-4, 1e-3, 1e-2, 1e-1, 0.5, 1., 5., 10., 15., 20., 25.],
'dual': [True, False]
},
'xgboost.XGBClassifier': {
'n_estimators': [100],
'max_depth': range(1, 11),
'learning_rate': [1e-3, 1e-2, 1e-1, 0.5, 1.],
'subsample': np.arange(0.05, 1.01, 0.05),
'min_child_weight': range(1, 21),
'nthread': [1]
},
# Preprocesssors
'sklearn.preprocessing.Binarizer': {
'threshold': np.arange(0.0, 1.01, 0.05)
},
'sklearn.decomposition.FastICA': {
'tol': np.arange(0.0, 1.01, 0.05)
},
'sklearn.cluster.FeatureAgglomeration': {
'linkage': ['ward', 'complete', 'average'],
'affinity': ['euclidean', 'l1', 'l2', 'manhattan', 'cosine']
},
'sklearn.preprocessing.MaxAbsScaler': {
},
'sklearn.preprocessing.MinMaxScaler': {
},
'sklearn.preprocessing.Normalizer': {
'norm': ['l1', 'l2', 'max']
},
'sklearn.kernel_approximation.Nystroem': {
'kernel': ['rbf', 'cosine', 'chi2', 'laplacian', 'polynomial', 'poly', 'linear', 'additive_chi2', 'sigmoid'],
'gamma': np.arange(0.0, 1.01, 0.05),
'n_components': range(1, 11)
},
'sklearn.decomposition.PCA': {
'svd_solver': ['randomized'],
'iterated_power': range(1, 11)
},
'sklearn.preprocessing.PolynomialFeatures': {
'degree': [2],
'include_bias': [False],
'interaction_only': [False]
},
'sklearn.kernel_approximation.RBFSampler': {
'gamma': np.arange(0.0, 1.01, 0.05)
},
'sklearn.preprocessing.RobustScaler': {
},
'sklearn.preprocessing.StandardScaler': {
},
'tpot.builtins.ZeroCount': {
},
'tpot.builtins.OneHotEncoder': {
'minimum_fraction': [0.05, 0.1, 0.15, 0.2, 0.25],
'sparse': [False],
'threshold': [10]
},
# Selectors
'sklearn.feature_selection.SelectFwe': {
'alpha': np.arange(0, 0.05, 0.001),
'score_func': {
'sklearn.feature_selection.f_classif': None
}
},
'sklearn.feature_selection.SelectPercentile': {
'percentile': range(1, 100),
'score_func': {
'sklearn.feature_selection.f_classif': None
}
},
'sklearn.feature_selection.VarianceThreshold': {
'threshold': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2]
},
'sklearn.feature_selection.RFE': {
'step': np.arange(0.05, 1.01, 0.05),
'estimator': {
'sklearn.ensemble.ExtraTreesClassifier': {
'n_estimators': [100],
'criterion': ['gini', 'entropy'],
'max_features': np.arange(0.05, 1.01, 0.05)
}
}
},
'sklearn.feature_selection.SelectFromModel': {
'threshold': np.arange(0, 1.01, 0.05),
'estimator': {
'sklearn.ensemble.ExtraTreesClassifier': {
'n_estimators': [100],
'criterion': ['gini', 'entropy'],
'max_features': np.arange(0.05, 1.01, 0.05)
}
}
}
}
|
lgpl-3.0
|
arasuarun/shogun
|
examples/undocumented/python_modular/graphical/interactive_clustering_demo.py
|
16
|
11310
|
"""
Shogun demo, based on PyQT Demo by Eli Bendersky
Christian Widmer
Soeren Sonnenburg
License: GPLv3
"""
import numpy
import sys, os, csv
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib
from matplotlib import mpl
from matplotlib.colorbar import make_axes, Colorbar
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
from modshogun import *
from modshogun import *
from modshogun import *
import util
class Form(QMainWindow):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('SHOGUN interactive demo')
self.data = DataHolder()
self.series_list_model = QStandardItemModel()
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.on_show()
def load_file(self, filename=None):
filename = QFileDialog.getOpenFileName(self,
'Open a data file', '.', 'CSV files (*.csv);;All Files (*.*)')
if filename:
self.data.load_from_file(filename)
self.fill_series_list(self.data.series_names())
self.status_text.setText("Loaded " + filename)
def on_show(self):
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1_pos, self.data.x2_pos, 'o', color='0.7')
self.axes.plot(self.data.x1_neg, self.data.x2_neg, 'o', color='0.5')
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
self.canvas.draw()
self.fill_series_list(self.data.get_stats())
def on_about(self):
msg = __doc__
QMessageBox.about(self, "About the demo", msg.strip())
def fill_series_list(self, names):
self.series_list_model.clear()
for name in names:
item = QStandardItem(name)
item.setCheckState(Qt.Unchecked)
item.setCheckable(False)
self.series_list_model.appendRow(item)
def onclick(self, event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(event.button, event.x, event.y, event.xdata, event.ydata)
if event.button==1:
label = 1.0
else:
label = -1.0
self.data.add_example(event.xdata, event.ydata, label)
self.on_show()
def clear(self):
self.data.clear()
self.on_show()
def enable_widgets(self):
self.k.setEnabled(True)
def train_svm(self):
k = int(self.k.text())
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1_pos, self.data.x2_pos, 'ko')
self.axes.plot(self.data.x1_neg, self.data.x2_neg, 'ko')
# train svm
labels = self.data.get_labels()
print type(labels)
lab = BinaryLabels(labels)
features = self.data.get_examples()
train = RealFeatures(features)
distance_name = self.distance_combo.currentText()
if distance_name == "EuclideanDistance":
distance=EuclideanDistance(train, train)
elif distance_name == "ManhattanMetric":
distance=ManhattanMetric(train, train)
elif distance_name == "JensenMetric":
distance=JensenMetric(train, train)
kmeans=KMeans(k, distance)
kmeans.train()
centers = kmeans.get_cluster_centers()
radi=kmeans.get_radiuses()
self.axes.plot(features[0,labels==+1], features[1,labels==+1],'ro')
self.axes.plot(features[0,labels==-1], features[1,labels==-1],'bo')
for i in xrange(k):
self.axes.plot(centers[0,i],centers[1,i],'kx', markersize=20, linewidth=5)
t = numpy.linspace(0, 2*numpy.pi, 100)
self.axes.plot(radi[i]*numpy.cos(t)+centers[0,i],radi[i]*numpy.sin(t)+centers[1,i],'k-')
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
# ColorbarBase derives from ScalarMappable and puts a colorbar
# in a specified axes, so it has everything needed for a
# standalone colorbar. There are many more kwargs, but the
# following gives a basic continuous colorbar with ticks
# and labels.
self.canvas.draw()
def create_main_frame(self):
self.main_frame = QWidget()
plot_frame = QWidget()
self.dpi = 100
self.fig = Figure((6.0, 6.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
cid = self.canvas.mpl_connect('button_press_event', self.onclick)
self.axes = self.fig.add_subplot(111)
self.cax = None
#self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
log_label = QLabel("Number of examples:")
self.series_list_view = QListView()
self.series_list_view.setModel(self.series_list_model)
k_label = QLabel('Number of Clusters')
self.k = QLineEdit()
self.k.setText("2")
spins_hbox = QHBoxLayout()
spins_hbox.addWidget(k_label)
spins_hbox.addWidget(self.k)
spins_hbox.addStretch(1)
self.legend_cb = QCheckBox("Show Support Vectors")
self.legend_cb.setChecked(False)
self.show_button = QPushButton("&Cluster!")
self.connect(self.show_button, SIGNAL('clicked()'), self.train_svm)
self.clear_button = QPushButton("&Clear")
self.connect(self.clear_button, SIGNAL('clicked()'), self.clear)
self.distance_combo = QComboBox()
self.distance_combo.insertItem(-1, "EuclideanDistance")
self.distance_combo.insertItem(-1, "ManhattanMetric")
self.distance_combo.insertItem(-1, "JensenMetric")
self.distance_combo.maximumSize = QSize(300, 50)
self.connect(self.distance_combo, SIGNAL("currentIndexChanged(QString)"), self.enable_widgets)
left_vbox = QVBoxLayout()
left_vbox.addWidget(self.canvas)
#left_vbox.addWidget(self.mpl_toolbar)
right0_vbox = QVBoxLayout()
right0_vbox.addWidget(log_label)
right0_vbox.addWidget(self.series_list_view)
#right0_vbox.addWidget(self.legend_cb)
right0_vbox.addStretch(1)
right2_vbox = QVBoxLayout()
right2_label = QLabel("Settings")
right2_vbox.addWidget(right2_label)
right2_vbox.addWidget(self.show_button)
right2_vbox.addWidget(self.distance_combo)
right2_vbox.addLayout(spins_hbox)
right2_clearlabel = QLabel("Remove Data")
right2_vbox.addWidget(right2_clearlabel)
right2_vbox.addWidget(self.clear_button)
right2_vbox.addStretch(1)
right_vbox = QHBoxLayout()
right_vbox.addLayout(right0_vbox)
right_vbox.addLayout(right2_vbox)
hbox = QVBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(right_vbox)
self.main_frame.setLayout(hbox)
self.setCentralWidget(self.main_frame)
self.enable_widgets()
def create_status_bar(self):
self.status_text = QLabel("")
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_action = self.create_action("&Load file",
shortcut="Ctrl+L", slot=self.load_file, tip="Load a file")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
class DataHolder(object):
""" Just a thin wrapper over a dictionary that holds integer
data series. Each series has a name and a list of numbers
as its data. The length of all series is assumed to be
the same.
The series can be read from a CSV file, where each line
is a separate series. In each series, the first item in
the line is the name, and the rest are data numbers.
"""
def __init__(self, filename=None):
self.clear()
self.load_from_file(filename)
def clear(self):
self.x1_pos = []
self.x2_pos = []
self.x1_neg = []
self.x2_neg = []
def get_stats(self):
num_neg = len(self.x1_neg)
num_pos = len(self.x1_pos)
str_neg = "num negative examples: %i" % num_neg
str_pos = "num positive examples: %i" % num_pos
return (str_neg, str_pos)
def get_labels(self):
return numpy.array([1]*len(self.x1_pos) + [-1]*len(self.x1_neg), dtype=numpy.float64)
def get_examples(self):
num_pos = len(self.x1_pos)
num_neg = len(self.x1_neg)
examples = numpy.zeros((2,num_pos+num_neg))
for i in xrange(num_pos):
examples[0,i] = self.x1_pos[i]
examples[1,i] = self.x2_pos[i]
for i in xrange(num_neg):
examples[0,i+num_pos] = self.x1_neg[i]
examples[1,i+num_pos] = self.x2_neg[i]
return examples
def add_example(self, x1, x2, label):
if label==1:
self.x1_pos.append(x1)
self.x2_pos.append(x2)
else:
self.x1_neg.append(x1)
self.x2_neg.append(x2)
def load_from_file(self, filename=None):
self.data = {}
self.names = []
if filename:
for line in csv.reader(open(filename, 'rb')):
self.names.append(line[0])
self.data[line[0]] = map(int, line[1:])
self.datalen = len(line[1:])
def series_names(self):
""" Names of the data series
"""
return self.names
def series_len(self):
""" Length of a data series
"""
return self.datalen
def series_count(self):
return len(self.data)
def get_series_data(self, name):
return self.data[name]
def main():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
main()
#~ dh = DataHolder('qt_mpl_data.csv')
#~ print dh.data
#~ print dh.get_series_data('1991 Sales')
#~ print dh.series_names()
#~ print dh.series_count()
|
gpl-3.0
|
PyRsw/PyRsw
|
examples/example_1D_geoadjust2.py
|
1
|
3453
|
import numpy as np
import matplotlib.pyplot as plt
import sys
# Add the PyRsw tools to the path
# At the moment it is given explicitely.
# In the future, it could also be added to the
# pythonpath environment variable
sys.path.append('../src')
import Steppers as Step
import Fluxes as Flux
from PyRsw import Simulation
from constants import minute, hour, day
sim = Simulation() # Create a simulation object
# Geometry and Model Equations
sim.geomy = 'walls' # Geometry Types: 'periodic' or 'walls'
sim.stepper = Step.AB3 # Time-stepping algorithm: Euler, AB2, RK4
sim.method = 'Spectral' # Numerical method: 'Spectral'
sim.dynamics = 'Nonlinear' # Dynamics: 'Nonlinear' or 'Linear'
sim.flux_method = Flux.spectral_sw # Flux method: spectral_sw is only option currently
# Specify paramters
sim.Ly = 4000e3 # Domain extent (m)
sim.Nx = 1 # Grid points in x
sim.Ny = 512 # Grid points in y
sim.Nz = 1 # Number of layers
sim.g = 9.81 # Gravity (m/sec^2)
sim.f0 = 1.e-4 # Coriolis (1/sec)
sim.beta = 0e-10 # Coriolis beta parameter (1/m/sec)
sim.cfl = 0.15 # CFL coefficient (m)
sim.Hs = [100.] # Vector of mean layer depths (m)
sim.rho = [1025.] # Vector of layer densities (kg/m^3)
sim.end_time = 6.*24.*hour # End Time (sec)
# Parallel? Only applies to the FFTWs
sim.num_threads = 4
# Plotting parameters
sim.plott = 20.*minute # Period of plots
sim.animate = 'Anim' # 'Save' to create video frames,
# 'Anim' to animate,
# 'None' otherwise
sim.plot_vars = ['vort','div','h'] # Specify which variables to plot
#sim.plot_vars = ['u','v','h'] # Specify which variables to plot
# Specify manual ylimits if desired
# An empty list uses default limits
sim.ylims=[[-0.01,0.01],[-2.0,2.0],[-1.0,1.0]]
#sim.ylims=[[-0.3,0.3],[-0.2,0.2],[-1.0,1.0]]
# Output parameters
sim.output = False # True or False
sim.savet = 1.*hour # Time between saves
# Diagnostics parameters
sim.diagt = 2.*minute # Time for output
sim.diagnose = False # True or False
# Initialize the grid and zero solutions
sim.initialize()
for ii in range(sim.Nz): # Set mean depths
sim.soln.h[:,:,ii] = sim.Hs[ii]
# Hyperbolic Tangent initial conditions
W = 50.e3 # Width
amp = 0.5 # Amplitude
sim.soln.h[:,:,0] +=-amp*np.tanh(sim.Y/W)
sim.run() # Run the simulation
# Hovmuller plot
plt.figure()
t = np.arange(0,sim.end_time+sim.plott,sim.plott)/86400.
if sim.Ny==1:
x = sim.x/1e3
elif sim.Nx == 1:
x = sim.y/1e3
for L in range(sim.Nz):
field = sim.hov_h[:,0,:].T - np.sum(sim.Hs[L:])
cv = np.max(np.abs(field.ravel()))
plt.subplot(sim.Nz,1,L+1)
plt.pcolormesh(x,t, field,
cmap=sim.cmap, vmin = -cv, vmax = cv)
plt.axis('tight')
plt.title(r"$\mathrm{Hovm{\"o}ller} \; \mathrm{Plot} \; \mathrm{of} \; \eta$", fontsize = 16)
if sim.Nx > 1:
plt.xlabel(r"$\mathrm{x} \; \mathrm{(km)}$", fontsize=14)
else:
plt.xlabel(r"$\mathrm{y} \; \mathrm{(km)}$", fontsize=14)
plt.ylabel(r"$\mathrm{Time} \; \mathrm{(days)}$", fontsize=14)
plt.colorbar()
plt.show()
|
mit
|
MartinDelzant/scikit-learn
|
examples/ensemble/plot_random_forest_embedding.py
|
286
|
3531
|
"""
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
cbertinato/pandas
|
pandas/core/computation/engines.py
|
1
|
3651
|
"""
Engine classes for :func:`~pandas.eval`
"""
import abc
from pandas.core.computation.align import _align, _reconstruct_object
from pandas.core.computation.ops import (
UndefinedVariableError, _mathops, _reductions)
import pandas.io.formats.printing as printing
_ne_builtins = frozenset(_mathops + _reductions)
class NumExprClobberingError(NameError):
pass
def _check_ne_builtin_clash(expr):
"""Attempt to prevent foot-shooting in a helpful way.
Parameters
----------
terms : Term
Terms can contain
"""
names = expr.names
overlap = names & _ne_builtins
if overlap:
s = ', '.join(map(repr, overlap))
raise NumExprClobberingError('Variables in expression "{expr}" '
'overlap with builtins: ({s})'
.format(expr=expr, s=s))
class AbstractEngine(metaclass=abc.ABCMeta):
"""Object serving as a base class for all engines."""
has_neg_frac = False
def __init__(self, expr):
self.expr = expr
self.aligned_axes = None
self.result_type = None
def convert(self):
"""Convert an expression for evaluation.
Defaults to return the expression as a string.
"""
return printing.pprint_thing(self.expr)
def evaluate(self):
"""Run the engine on the expression
This method performs alignment which is necessary no matter what engine
is being used, thus its implementation is in the base class.
Returns
-------
obj : object
The result of the passed expression.
"""
if not self._is_aligned:
self.result_type, self.aligned_axes = _align(self.expr.terms)
# make sure no names in resolvers and locals/globals clash
res = self._evaluate()
return _reconstruct_object(self.result_type, res, self.aligned_axes,
self.expr.terms.return_type)
@property
def _is_aligned(self):
return self.aligned_axes is not None and self.result_type is not None
@abc.abstractmethod
def _evaluate(self):
"""Return an evaluated expression.
Parameters
----------
env : Scope
The local and global environment in which to evaluate an
expression.
Notes
-----
Must be implemented by subclasses.
"""
pass
class NumExprEngine(AbstractEngine):
"""NumExpr engine class"""
has_neg_frac = True
def __init__(self, expr):
super().__init__(expr)
def convert(self):
return str(super().convert())
def _evaluate(self):
import numexpr as ne
# convert the expression to a valid numexpr expression
s = self.convert()
try:
env = self.expr.env
scope = env.full_scope
truediv = scope['truediv']
_check_ne_builtin_clash(self.expr)
return ne.evaluate(s, local_dict=scope, truediv=truediv)
except KeyError as e:
# python 3 compat kludge
try:
msg = e.message
except AttributeError:
msg = str(e)
raise UndefinedVariableError(msg)
class PythonEngine(AbstractEngine):
"""Evaluate an expression in Python space.
Mostly for testing purposes.
"""
has_neg_frac = False
def __init__(self, expr):
super().__init__(expr)
def evaluate(self):
return self.expr()
def _evaluate(self):
pass
_engines = {'numexpr': NumExprEngine, 'python': PythonEngine}
|
bsd-3-clause
|
cl4rke/scikit-learn
|
sklearn/utils/testing.py
|
47
|
23587
|
"""Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import sklearn
from sklearn.base import BaseEstimator
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
|
bsd-3-clause
|
HolgerPeters/scikit-learn
|
examples/neural_networks/plot_rbm_logistic_classification.py
|
99
|
4608
|
"""
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.model_selection import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
|
rochefort-lab/fissa
|
docs/conf.py
|
1
|
9533
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import datetime
import os
import sys
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../"))
# Can't import __meta__.py if the requirements aren't installed
# due to imports in __init__.py. This is a workaround.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
meta = {}
exec(read("../fissa/__meta__.py"), meta)
# -- Project information -----------------------------------------------------
now = datetime.datetime.now()
project = meta["name"].upper()
project_path = meta["path"]
author = meta["author"]
copyright = "{}, {}".format(now.year, author)
# The full version, including alpha/beta/rc tags
release = meta["version"]
# The short X.Y version
version = ".".join(release.split(".")[0:2])
# -- Automatically generate API documentation --------------------------------
def run_apidoc(_):
ignore_paths = [
os.path.join("..", project_path, "tests"),
]
argv = [
"--force", # Overwrite output files
"--follow-links", # Follow symbolic links
"--separate", # Put each module file in its own page
"--module-first", # Put module documentation before submodule
"-o",
"source/packages", # Output path
os.path.join("..", project_path),
] + ignore_paths
try:
# Sphinx 1.7+
from sphinx.ext import apidoc
apidoc.main(argv)
except ImportError:
# Sphinx 1.6 (and earlier)
from sphinx import apidoc
argv.insert(0, apidoc.__file__)
apidoc.main(argv)
def retitle_modules(_):
pth = "source/packages/modules.rst"
lines = open(pth).read().splitlines()
# Overwrite the junk in the first two lines with a better title
lines[0] = "API Reference"
lines[1] = "============="
open(pth, "w").write("\n".join(lines))
def setup(app):
app.connect("builder-inited", run_apidoc)
app.connect("builder-inited", retitle_modules)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = "1.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named "sphinx.ext.*") or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"numpydoc", # handle NumPy documentation formatted docstrings
]
# Some extension features only available on later Python versions
if sys.version_info >= (3, 6):
# Enables search as you type with Elasticsearch on readthedocs.com
# but only available on Python 3.6 and above.
extensions.append("sphinx_search.extension")
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_rtype = True
napoleon_use_param = True
napoleon_type_aliases = {
"array_like": ":term:`array_like`",
"array-like": ":term:`array-like <array_like>`",
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = [".rst", ".md"]
source_suffix = ".rst"
# The encoding of source files.
source_encoding = "utf-8"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Use pydata on Python 3.6 and above. If it is not available, use the
# readthedocs theme. If that is unavailable, use the default builtin theme.
for name in ["pydata_sphinx_theme", "sphinx_rtd_theme"]:
try:
__import__(name)
html_theme = name
break
except ImportError:
pass
else:
print("No sphinx theme installed. Using default theme.")
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``["localtoc.html", "relations.html", "sourcelink.html",
# "searchbox.html"]``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = project + "doc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ("letterpaper" or "a4paper").
# "papersize": "letterpaper",
#
# The font size ("10pt", "11pt" or "12pt").
# "pointsize": "10pt",
#
# Latex figure (float) alignment
# 'figure_align': 'htbp',
#
# Additional stuff for the LaTeX preamble.
# Need to manually declare what the delta symbol (Δ) corresponds to.
"preamble": r"""
\DeclareUnicodeCharacter{394}{$\Delta$}
""",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
project + ".tex",
project + " Documentation",
meta["author"],
"manual",
),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, project, project + " Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
project,
project + " Documentation",
author,
project,
meta["description"],
"Miscellaneous",
),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ""
# A unique identification for the text.
#
# epub_uid = ""
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Configuration for intersphinx
# Common intersphinx mappings can be found here:
# https://gist.github.com/bskinn/0e164963428d4b51017cebdb6cda5209
intersphinx_mapping = {
"python": ("https://docs.python.org/{.major}".format(sys.version_info), None),
"attrs": ("https://www.attrs.org/en/stable/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
"matplotlib": ("https://matplotlib.org/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"Pillow": ("https://pillow.readthedocs.io/en/stable/", None),
"skimage": ("https://scikit-image.org/docs/stable/", None),
"sklearn": ("https://scikit-learn.org/stable/", None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
|
gpl-3.0
|
kjyv/dynamical-system-identification
|
identification/output.py
|
2
|
33435
|
#-*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
from typing import Tuple
import sys
import os
import numpy as np
import numpy.linalg as la
import scipy.linalg as sla
import colorama
from colorama import Fore, Style
from identification import helpers
np.core.arrayprint._line_width = 160
# redefine unicode for testing in python2/3
if sys.version_info >= (3, 0):
unicode = str
# color triplets
color_triplets_6 = [
[ 0.29019608, 0.43529412, 0.89019608],
[ 0.52156863, 0.58431373, 0.88235294],
[ 0.70980392, 0.73333333, 0.89019608],
[ 0.90196078, 0.68627451, 0.7254902 ],
[ 0.87843137, 0.48235294, 0.56862745],
[ 0.82745098, 0.24705882, 0.41568627],
]
#set some more colors for higher DOF
from palettable.tableau import Tableau_10, Tableau_20
colors = Tableau_10.mpl_colors[0:6] + Tableau_20.mpl_colors + Tableau_20.mpl_colors
#swap some values for aesthetics
colors[2], colors[0] = colors[0], colors[2]
class OutputConsole(object):
def __init__(self, idf):
self.idf = idf
if not idf.opt['useEssentialParams']:
idf.stdEssentialIdx = list(range(0, idf.model.num_identified_params))
idf.stdNonEssentialIdx = []
#if requested, load params from other urdf for comparison
if idf.urdf_file_real:
self.xStdReal = idf.xStdReal
self.xBaseReal = idf.xBaseReal
p_idf = idf.model.identified_params
if idf.opt['showStandardParams']:
# convert params to COM-relative instead of frame origin-relative (linearized parameters)
if idf.opt['outputBarycentric']:
if idf.opt['identifyGravityParamsOnly']:
xStd_full = idf.model.xStdModel.copy()
xStd_full[p_idf] = idf.model.xStd
xStd = idf.paramHelpers.paramsLink2Bary(xStd_full)
self.xStd = xStd[p_idf]
else:
self.xStd = idf.paramHelpers.paramsLink2Bary(idf.model.xStd)
self.xStdModel = idf.paramHelpers.paramsLink2Bary(idf.model.xStdModel)
if idf.urdf_file_real:
self.xStdReal = idf.paramHelpers.paramsLink2Bary(self.xStdReal)
else:
self.xStd = idf.model.xStd
self.xStdModel = idf.model.xStdModel
def printStdParams(self, summary_only=False):
idf = self.idf
if not idf.opt['showStandardParams']:
return
if not summary_only:
if idf.opt['outputBarycentric']:
print("Barycentric (relative to COM) Standard Parameters")
else:
print("Linear (relative to Frame) Standard Parameters")
# collect values for parameters
description = idf.model.generator.getDescriptionOfParameters()
if idf.opt['identifyFriction']:
for i in range(0, idf.model.num_dofs):
description += "Parameter {}: Constant friction / offset of joint {}\n".format(
i+idf.model.num_model_params,
idf.model.jointNames[i]
)
for i in range(0, idf.model.num_dofs*2):
description += "Parameter {}: Velocity dep. friction joint {}\n".format(
i+idf.model.num_dofs+idf.model.num_model_params,
idf.model.jointNames[i%idf.model.num_dofs]
)
idx_ep = 0 #count essential params
lines = list()
sum_diff_r_pc_ess = 0
sum_diff_r_pc_all = 0
sum_pc_delta_all = 0
sum_pc_delta_ess = 0
descriptions = description.replace(r'Parameter ', '#').split('\n')
for idx_p in range(idf.model.num_identified_params):
idx_p_full = idf.model.identified_params[idx_p]
d = descriptions[idx_p_full]
if idf.opt['outputBarycentric']:
d = d.replace(r'first moment', 'center')
# add symbol for each parameter
d = d.replace(r':', ': {} -'.format(idf.model.param_syms[idx_p_full]))
# print beginning of each link block in green
if idx_p_full % 10 == 0 and idx_p_full < idf.model.num_model_params:
d = Fore.GREEN + d
# get some error values for each parameter
approx = self.xStd[idx_p]
apriori = self.xStdModel[idx_p_full]
diff = approx - apriori
if idf.urdf_file_real:
real = self.xStdReal[idx_p_full]
# set real params that are 0 to some small value
#if real == 0: real = 0.01
# get error percentage (new to real)
# so if 100% are the real value, how big is the error
diff_real = approx - real
if real != 0:
diff_r_pc = (100 * diff_real) / real
else:
diff_r_pc = (100 * diff_real) / 0.01
# add to final error percent sum
sum_diff_r_pc_all += np.abs(diff_r_pc)
if idx_p in idf.stdEssentialIdx:
sum_diff_r_pc_ess += np.abs(diff_r_pc)
# get error percentage (new to apriori)
diff_apriori = apriori - real
#if apriori == 0: apriori = 0.01
if diff_apriori != 0:
pc_delta = np.abs((100/diff_apriori)*diff_real)
elif np.abs(diff_real) > 0:
# if there was no error between apriori and real
#pc_delta = np.abs((100/0.01)*diff_real)
pc_delta = 100 + np.abs(diff_r_pc)
else:
# both real and a priori are zero, error is still at 100% (of zero error)
pc_delta = 100
sum_pc_delta_all += pc_delta
if idx_p in idf.stdEssentialIdx:
sum_pc_delta_ess += pc_delta
else:
# get percentage difference between apriori and identified values
# (shown when real values are not known)
if apriori != 0:
diff_pc = (100 * diff) / apriori
else:
diff_pc = (100 * diff) / 0.01
#values for each line
if idf.opt['useEssentialParams'] and idx_ep < idf.num_essential_params and idx_p in idf.stdEssentialIdx:
sigma = idf.p_sigma_x[idx_ep]
else:
sigma = 0.0
if idf.urdf_file_real and idf.opt['constrainToConsistent']:
if idx_p_full in idf.model.non_id:
idf.sdp.constr_per_param[idx_p_full].append('nID')
vals = [real, apriori, approx, diff, np.abs(diff_r_pc), pc_delta, sigma, ' '.join(idf.sdp.constr_per_param[idx_p_full]), d]
elif idf.urdf_file_real:
vals = [real, apriori, approx, diff, np.abs(diff_r_pc), pc_delta, sigma, d]
elif idf.opt['constrainToConsistent']:
if idx_p_full in idf.model.non_id:
idf.sdp.constr_per_param[idx_p_full].append('nID')
vals = [apriori, approx, diff, diff_pc, ' '.join(idf.sdp.constr_per_param[idx_p_full]), d]
elif idf.opt['useEssentialParams']:
vals = [apriori, approx, diff, diff_pc, sigma, d]
else:
vals = [apriori, approx, diff, diff_pc, d]
lines.append(vals)
if idf.opt['useEssentialParams'] and idx_p in idf.stdEssentialIdx:
idx_ep += 1
if idf.urdf_file_real and idf.opt['constrainToConsistent']:
column_widths = [13, 13, 13, 7, 7, 7, 6, 8, 45]
precisions = [8, 8, 8, 4, 1, 1, 3, 0, 0]
elif idf.urdf_file_real:
column_widths = [13, 13, 13, 7, 7, 7, 6, 45]
precisions = [8, 8, 8, 4, 1, 1, 3, 0]
elif idf.opt['constrainToConsistent']:
column_widths = [13, 13, 7, 7, 8, 45]
precisions = [8, 8, 4, 1, 0, 0]
elif idf.opt['useEssentialParams']:
column_widths = [13, 13, 7, 7, 6, 45]
precisions = [8, 8, 4, 1, 3, 0]
else:
column_widths = [13, 13, 7, 7, 45]
precisions = [8, 8, 4, 1, 0]
if not summary_only:
# print column header
template = ''
for w in range(0, len(column_widths)):
template += '|{{{}:{}}}'.format(w, column_widths[w])
if idf.urdf_file_real and idf.opt['constrainToConsistent']:
print(template.format("'Real'", "A priori", "Ident", "Change", "%e", "Δ%e", "%σ", "Constr", "Description"))
elif idf.urdf_file_real:
print(template.format("'Real'", "A priori", "Ident", "Change", "%e", "Δ%e", "%σ", "Description"))
elif idf.opt['constrainToConsistent']:
print(template.format("A priori", "Ident", "Change", "%e", "Constr", "Description"))
elif idf.opt['useEssentialParams']:
print(template.format("A priori", "Ident", "Change", "%e", "%σ", "Description"))
else:
print(template.format("A priori", "Ident", "Change", "%e", "Description"))
# print values/description
template = ''
for w in range(0, len(column_widths)):
if(type(lines[0][w]) in [str, unicode, list]):
# strings don't have precision
template += '|{{{}:{}}}'.format(w, column_widths[w])
else:
template += '|{{{}:{}.{}f}}'.format(w, column_widths[w], precisions[w])
idx_p = 0
for l in lines:
t = template.format(*l)
if idx_p in idf.stdNonEssentialIdx:
t = Style.DIM + t
if idx_p in idf.stdEssentialIdx:
t = Style.BRIGHT + t
print(t, end=' ')
idx_p += 1
print(Style.RESET_ALL)
print("\n")
def printBaseParams(self, summary_only=False):
idf = self.idf
if not idf.opt['showBaseParams'] or summary_only or idf.opt['estimateWith'] in ['urdf', 'std_direct']:
return
print("Base Parameters and Corresponding standard columns")
if not idf.opt['useEssentialParams']:
baseEssentialIdx = list(range(0, idf.model.num_base_params))
baseNonEssentialIdx = []
xBase_essential = idf.model.xBase
else:
baseEssentialIdx = idf.baseEssentialIdx
baseNonEssentialIdx = idf.baseNonEssentialIdx
xBase_essential = idf.xBase_essential
# collect values for parameters
idx_ep = 0
lines = list()
sum_error_all_base = 0
for idx_p in range(0, idf.model.num_base_params):
if idf.opt['useEssentialParams']: # and xBase_essential[idx_p] != 0:
new = xBase_essential[idx_p]
else:
new = idf.model.xBase[idx_p]
old = idf.model.xBaseModel[idx_p]
diff = new - old
if idf.urdf_file_real:
real = self.xBaseReal[idx_p]
error = new - real
sum_error_all_base += np.abs(error)
# collect linear dependencies for this param
#deps = np.where(np.abs(idf.linear_deps[idx_p, :])>0.1)[0]
#dep_factors = idf.linear_deps[idx_p, deps]
if idf.opt['showBaseEqns']:
param_columns = " = "
param_columns += "{}".format(idf.model.base_deps[idx_p])
#for p in range(0, len(deps)):
# param_columns += ' {:.4f}*|{}|'.format(dep_factors[p], idf.P[idf.num_base_params:][deps[p]])
else:
param_columns = ""
if idf.opt['useEssentialParams']:
if idx_p in idf.baseEssentialIdx:
sigma = idf.p_sigma_x[idx_ep]
else:
sigma = 0
else:
sigma = idf.p_sigma_x[idx_p]
if idf.urdf_file_real:
lines.append([idx_p, real, old, new, diff, error, sigma, param_columns])
else:
lines.append([idx_p, old, new, diff, sigma, param_columns])
if idf.opt['useEssentialParams'] and idx_p in idf.baseEssentialIdx:
idx_ep+=1
if idf.urdf_file_real:
column_widths = [3, 13, 13, 13, 7, 7, 6, 30] # widths of the columns
precisions = [0, 8, 8, 8, 4, 4, 3, 0] # numerical precision
else:
column_widths = [3, 13, 13, 7, 6, 30] # widths of the columns
precisions = [0, 8, 8, 4, 3, 0] # numerical precision
if not summary_only:
# print column header
template = ''
for w in range(0, len(column_widths)):
template += '|{{{}:{}}}'.format(w, column_widths[w])
if idf.urdf_file_real:
print(template.format("#", "Real", "A priori", "Ident", "Change", "Error", "%σ", "Description"))
else:
print(template.format("#", "A priori", "Ident", "Change", "%σ", "Description"))
# print values/description
template = ''
for w in range(0, len(column_widths)):
if(type(lines[0][w]) in [str, unicode]):
# strings don't have precision
template += '|{{{}:{}}}'.format(w, column_widths[w])
else:
template += '|{{{}:{}.{}f}}'.format(w, column_widths[w], precisions[w])
idx_p = 0
for l in lines:
t = template.format(*l)
if idx_p in baseNonEssentialIdx:
t = Style.DIM + t
elif idx_p in baseEssentialIdx:
t = Style.BRIGHT + t
if idf.opt['showEssentialSteps'] and l[-2] == np.max(idf.p_sigma_x):
t = Fore.CYAN + t
print(t, end=' ')
idx_p+=1
print(Style.RESET_ALL)
def printLatex(self):
''' print standard params also as latex table '''
idf = self.idf
if not idf.opt['outputLatex']:
return
print('As Latex:')
import inspect
print(inspect.cleandoc(r"""
\begin{table}[h]
\caption{Identified standard parameters. Non-identifiable parameters are marked
with *. These have no effect on dynamics and are determined only to satisfy
consistency constraints.}
\begin{center}
"""))
header = inspect.cleandoc(r"""
\begin{minipage}[t]{0.32\linewidth}
\resizebox{0.97\textwidth}{!}{%
\begin{tabular}[t]{c c c}
\hline
\rule{0pt}{12pt} Parameter & Prior & Identified \\[2pt]
\hline\rule{0pt}{12pt}
""")
footer = inspect.cleandoc(r"""
\hline
\end{tabular}}
\end{minipage}
""")
print(header)
#print table rows
for idx_p in range(10, idf.model.num_identified_params):
#if idx_p == len(idf.model.identifiable) // 2:
if idx_p-10 in [(idf.model.num_identified_params-10) // 3, ((idf.model.num_identified_params-10) // 3)*2]:
#start new table after half of params
print(footer)
print(header)
#if idx_p in idf.model.identifiable:
#add another underscore for proper subscripts
import re
param = str(idf.model.param_syms[idx_p])
p = re.compile(r"([0-9]+)(.*)")
param = p.sub(r'{\1\2}', param)
nonid = '*' if idx_p in idf.model.non_id else ''
real = self.xStdReal if idf.urdf_file_real else self.xStdModel
print(" ${}$ & ${:.4f}$ & ${:.4f}${} \\\\".format(param, real[idx_p], self.xStd[idx_p], nonid))
print(footer)
print(inspect.cleandoc(r"""
\end{center}
\end{table}
"""))
print("")
def printStats(self, summary_only=False):
idf = self.idf
if idf.opt['selectBlocksFromMeasurements']:
if len(idf.data.usedBlocks):
print("used {} of {} blocks: {}".format(len(idf.data.usedBlocks),
len(idf.data.usedBlocks)+len(idf.data.unusedBlocks),
[b for (b,bs,cond,linkConds) in idf.data.usedBlocks]))
else:
print("\ncurrent block: {}".format(idf.data.block_pos))
#print "unused blocks: {}".format(idf.unusedBlocks)
print("condition number: {}".format(la.cond(idf.model.YBase)))
if idf.opt['identifyGravityParamsOnly']:
fric = idf.model.num_dofs * idf.opt['identifyFriction']
sum_id = np.sum(idf.model.xStd[0:idf.model.num_identified_params-fric:4])
else:
sum_id = np.sum(idf.model.xStd[0:idf.model.num_model_params:10])
print(Style.BRIGHT + "Parameters" + Style.RESET_ALL)
sum_apriori = np.sum(idf.model.xStdModel[0:idf.model.num_model_params:10])
print("Estimated overall mass: {} kg vs. a priori {} kg".format(sum_id, sum_apriori), end="")
if idf.urdf_file_real:
print(" vs. real {} kg".format(np.sum(self.xStdReal[0:idf.model.num_model_params:10])))
else:
print()
if idf.opt['showStandardParams']:
if idf.opt['showTriangleConsistency']:
cons_apriori = idf.paramHelpers.checkPhysicalConsistency(idf.model.xStdModel, full=True)
cons_ident = idf.paramHelpers.checkPhysicalConsistency(idf.model.xStd)
print("Consistency (including triangle inequality):")
else:
cons_apriori = idf.paramHelpers.checkPhysicalConsistencyNoTriangle(idf.model.xStdModel, full=True)
cons_ident = idf.paramHelpers.checkPhysicalConsistencyNoTriangle(idf.model.xStd)
if False in list(cons_apriori.values()):
print(Fore.RED + "A priori parameters are not physical consistent!" + Fore.RESET)
print("Per-link physical consistency (a priori): {}".format(cons_apriori))
else:
print("A priori parameters are physical consistent")
if False in list(cons_ident.values()):
print("Identified parameters are not physical consistent,")
print("per-link physical consistency (identified): {}".format(cons_ident))
else:
print("Identified parameters are physical consistent")
if idf.opt['identifyGravityParamsOnly']:
p_idf = idf.model.identified_params
else:
p_idf = idf.model.identifiable
if idf.urdf_file_real:
if idf.opt['showStandardParams']:
#if idf.opt['useEssentialParams']:
# print("Mean relative error of essential std params: {}%".\
# format(sum_diff_r_pc_ess / len(idf.stdEssentialIdx)))
#print("Mean relative error of all std params: {}%".format(sum_diff_r_pc_all/len(idf.model.xStd)))
#if idf.opt['useEssentialParams']:
# print("Mean error delta (a priori error vs approx error) of essential std params: {}%".\
# format(sum_pc_delta_ess/len(idf.stdEssentialIdx)))
#print("Mean error delta (a priori error vs approx error) of all std params: {}%".\
# format(sum_pc_delta_all/len(idf.model.xStd)))
sq_error_apriori = np.square(la.norm(self.xStdReal[p_idf] - idf.model.xStdModel[p_idf]))
if idf.opt['identifyGravityParamsOnly']:
xStd_full = idf.model.xStdModel.copy()
xStd_full[p_idf] = idf.model.xStd
sq_error_idf = np.square(la.norm(self.xStdReal[p_idf] - xStd_full[p_idf]))
else:
sq_error_idf = np.square(la.norm(self.xStdReal[p_idf] - idf.model.xStd[p_idf]))
print("Squared distance of identifiable std parameter vectors (identified, a priori) to real: {} vs. {}".\
format(sq_error_idf, sq_error_apriori))
#sq_error_apriori = np.square(la.norm(xStdReal - idf.model.xStdModel))
#sq_error_idf = np.square(la.norm(xStdReal - idf.model.xStd))
#print( "Squared distance of std parameter vectors (identified, a priori) to real: {} vs. {}".\
# format(sq_error_idf, sq_error_apriori))
if idf.opt['showBaseParams'] and not summary_only and idf.opt['estimateWith'] not in ['urdf', 'std_direct']:
#print("Mean error (a priori - approx) of all base params: {:.5f}".\
# format(sum_error_all_base/len(idf.model.xBase)))
sq_error_apriori = np.square(la.norm(self.xBaseReal - idf.model.xBaseModel))
sq_error_idf = np.square(la.norm(self.xBaseReal - idf.model.xBase))
print("Squared distance of base parameter vectors (identified, a priori) to real: {} vs. {}".\
format(sq_error_idf, sq_error_apriori))
else:
if idf.opt['showStandardParams'] and not summary_only:
if idf.opt['identifyGravityParamsOnly']:
xStd_full = idf.model.xStdModel.copy()
xStd_full[p_idf] = idf.model.xStd
sq_error_apriori = np.square(la.norm(xStd_full[p_idf] - idf.model.xStdModel[p_idf]))
else:
sq_error_apriori = np.square(la.norm(self.xStd[p_idf] - idf.model.xStdModel[p_idf]))
print("Squared distance of identifiable std parameter vectors to a priori: {}".\
format(sq_error_apriori))
if idf.opt['showBaseParams'] and not summary_only and idf.opt['estimateWith'] not in ['urdf', 'std_direct']:
sq_error_apriori = np.square(la.norm(idf.model.xBase - idf.model.xBaseModel))
print("Squared distance of base parameter vectors (identified vs. a priori): {}".\
format(sq_error_apriori))
print(Style.BRIGHT + "\nTorque prediction errors" + Style.RESET_ALL)
# get percentual error (i.e. how big is the error relative to the measured magnitudes)
idf.estimateRegressorTorques(estimateWith='urdf') #estimate torques with CAD params
idf.estimateRegressorTorques() #estimate torques again with identified parameters
idf.apriori_error = sla.norm(idf.tauAPriori-idf.model.tauMeasured)*100/sla.norm(idf.model.tauMeasured)
idf.res_error = sla.norm(idf.tauEstimated-idf.model.tauMeasured)*100/sla.norm(idf.model.tauMeasured)
print("Relative mean residual error: {}% vs. A priori: {}%".\
format(idf.res_error, idf.apriori_error))
idf.abs_apriori_error = np.mean(sla.norm(idf.tauAPriori-idf.model.tauMeasured, axis=1))
idf.abs_res_error = idf.base_error #np.mean(sla.norm(idf.tauEstimated-idf.model.tauMeasured, axis=1))
print("Absolute mean residual error: {} vs. A priori: {}".format(idf.abs_res_error, idf.abs_apriori_error))
torque_limits = []
for joint in idf.model.jointNames:
torque_limits.append(idf.model.limits[joint]['torque'])
idf.abs_apriori_error = helpers.getNRMSE(idf.model.tauMeasured, idf.tauAPriori, limits=torque_limits)
idf.abs_res_error = helpers.getNRMSE(idf.model.tauMeasured, idf.tauEstimated, limits=torque_limits)
print("NRMS of residual error: {}% vs. A priori: {}%".format(idf.abs_res_error, idf.abs_apriori_error))
def render(self, summary_only=False):
"""Output results on the console, tables of identified parameters and some statistics"""
colorama.init(autoreset=False)
self.printStdParams(summary_only)
self.printBaseParams(summary_only)
self.printLatex()
self.printStats(summary_only)
class OutputMatplotlib(object):
def __init__(self, datasets, text=None):
self.datasets = datasets
self.text = text
def render(self, idf, filename='output.html'):
progress_inst = helpers.Progress(idf.opt)
self.progress = progress_inst.progress
if idf.opt['outputFilename']:
filename = idf.opt['outputFilename']
if idf.opt['outputAs'] == 'html':
# write matplotlib/d3 plots to html file
import matplotlib
import matplotlib.pyplot as plt, mpld3
import matplotlib.axes
from mpld3 import plugins
from jinja2 import Environment, FileSystemLoader
elif idf.opt['outputAs'] in ['pdf', 'interactive', 'tikz']:
# show plots in separate matplotlib windows
import matplotlib
if idf.opt['outputAs'] == 'pdf':
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages(filename)
import matplotlib.pyplot as plt
import matplotlib.axes
else:
print("No proper output method given. Not plotting.")
return
font_size = 10
if idf.opt['outputAs'] in ['pdf', 'tikz']:
if idf.opt['plotPerJoint']:
font_size = 30
else:
font_size = 12
matplotlib.rcParams.update({'font.size': font_size})
matplotlib.rcParams.update({'axes.labelsize': font_size -5})
matplotlib.rcParams.update({'axes.linewidth': font_size / 15.})
matplotlib.rcParams.update({'axes.titlesize': font_size -2})
matplotlib.rcParams.update({'legend.fontsize': font_size -2})
matplotlib.rcParams.update({'xtick.labelsize': font_size -5})
matplotlib.rcParams.update({'ytick.labelsize': font_size -5})
matplotlib.rcParams.update({'lines.linewidth': font_size / 15.})
matplotlib.rcParams.update({'patch.linewidth': font_size / 15.})
matplotlib.rcParams.update({'grid.linewidth': font_size / 20.})
# skip some samples so graphs don't get too large/detailed TODO: change skip so that some
# maximum number of points is plotted (determined by screen etc.)
skip = 5
#create figures and plots
figures = list()
for ds in self.progress(range(len(self.datasets))):
group = self.datasets[ds]
fig, axes = plt.subplots(len(group['dataset']), sharex=True, sharey=True)
# scale unified scaling figures to same ranges and add some margin
if group['unified_scaling']:
ymin = 0
ymax = 0
for i in range(len(group['dataset'])):
ymin = np.min((np.min(group['dataset'][i]['data']), ymin)) * 1.05
ymax = np.max((np.max(group['dataset'][i]['data']), ymax)) * 1.05
#plot each group of data
for d_i in range(len(group['dataset'])):
d = group['dataset'][d_i]
if not issubclass(type(axes), matplotlib.axes.SubplotBase):
ax = axes[d_i]
else:
ax = axes
axes = [axes]
if idf.opt['outputAs'] != 'tikz':
ax.set_title(d['title'])
if group['unified_scaling']:
ax.set_ylim([ymin, ymax])
for data_i in range(0, len(d['data'])):
if len(d['data'][data_i].shape) > 1:
#data matrices
for i in range(0, d['data'][data_i].shape[1]):
l = group['labels'][i] if data_i == 0 else ''
if i < 6 and 'contains_base' in group and group['contains_base']:
ls = 'dashed'
else:
ls = '-'
dashes = () # type: Tuple
if idf.opt['plotErrors']:
if idf.opt['plotPrioriTorques']:
n = 3
else:
n = 2
if i == n:
ls = 'dashed'
dashes = (3, 0.5)
ax.plot(d['time'][::skip], d['data'][data_i][::skip, i], label=l,
color=colors[i], alpha=1-(data_i/2.0), linestyle=ls,
dashes=dashes)
else:
#data vector
ax.plot(d['time'][::skip], d['data'][data_i][::skip],
label=group['labels'][d_i], color=colors[0], alpha=1-(data_i/2.0))
ax.grid(which='both', linestyle="dotted", alpha=0.8)
if 'y_label' in group:
ax.set_ylabel(group['y_label'])
if idf.opt['outputAs'] != 'tikz':
ax.set_xlabel("Time (s)")
plt.setp([a.get_xticklabels() for a in axes[:-1]], visible=False)
#plt.setp([a.get_yticklabels() for a in axes], fontsize=8)
if idf.opt['plotLegend']:
handles, labels = ax.get_legend_handles_labels()
if idf.opt['outputAs'] == 'html':
#TODO: show legend properly (see mpld3 bug #274)
#leg = fig.legend(handles, labels, loc='upper right', fancybox=True, fontsize=10, title='')
leg = axes[0].legend(handles, labels, loc='upper right', fancybox=True, fontsize=10, title='', prop={'size': 8})
else:
leg = plt.figlegend(handles, labels, loc='upper right', fancybox=True,
fontsize=font_size, title='', prop={'size': font_size-3})
leg.draggable()
fig.subplots_adjust(hspace=2)
fig.set_tight_layout(True)
if idf.opt['outputAs'] == 'html':
plugins.clear(fig)
plugins.connect(fig, plugins.Reset(), plugins.BoxZoom(), plugins.Zoom(enabled=False),
plugins.MousePosition(fontsize=14, fmt=".5g"))
figures.append(mpld3.fig_to_html(fig))
elif idf.opt['outputAs'] == 'interactive':
plt.show(block=False)
elif idf.opt['outputAs'] == 'pdf':
pp.savefig(plt.gcf())
elif idf.opt['outputAs'] == 'tikz':
from matplotlib2tikz import save as tikz_save
tikz_save('{}_{}_{}.tex'.format(filename,
group['dataset'][0]['title'].replace('_','-'), ds // idf.model.num_dofs),
figureheight = '\\figureheight', figurewidth = '\\figurewidth', show_info=False)
if idf.opt['outputAs'] == 'html':
path = os.path.dirname(os.path.abspath(__file__))
template_environment = Environment(autoescape=False,
loader=FileSystemLoader(os.path.join(path, '../output')),
trim_blocks=False)
context = { 'figures': figures, 'text': self.text }
outfile = os.path.join(path, '..', 'output', filename)
import codecs
with codecs.open(outfile, 'w', 'utf-8') as f:
html = template_environment.get_template("templates/index.html").render(context)
f.write(html)
print("Saved output at file://{}".format(outfile))
elif idf.opt['outputAs'] == 'interactive':
#keep non-blocking plot windows open
plt.show()
elif idf.opt['outputAs'] == 'pdf':
pp.close()
def openURL(self):
import subprocess, time
time.sleep(1)
print("Opening output...")
#call(["open", '"http://127.0.0.1:8000"'])
filepath = "http://127.0.0.1:8080/output/output.html"
if sys.platform.startswith('darwin'):
subprocess.call(('open', filepath))
elif os.name == 'nt':
os.startfile(filepath)
elif os.name == 'posix':
subprocess.call(('xdg-open', filepath))
def runServer(self):
import http.server
import socketserver
import threading
port = 8080
Handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", port), Handler)
threading.Thread(target=self.openURL).start()
print("serving on port {}, press ctrl-c to stop".format(port))
httpd.serve_forever()
|
lgpl-3.0
|
matthewpklein/battsimpy
|
tests/dae_twoParamPoly.py
|
1
|
44523
|
import numpy
import numpy.linalg
import scipy.linalg
import scipy.interpolate
from matplotlib import pyplot as plt
import scipy.sparse as sps
from assimulo.solvers import IDA
from assimulo.problem import Implicit_Problem
from scipy.sparse.linalg import spsolve as sparseSolve
from scipy.sparse import csr_matrix as sparseMat
import scipy.sparse as sparse
import math
from copy import deepcopy
def compute_deriv( func, x0 ) :
y0 = func(x0)
J = numpy.zeros( (len(x0),len(x0)), dtype='d' )
x_higher = deepcopy(x0)
eps = 1e-8
for ivar in range(len(x0)) :
x_higher[ivar] = x_higher[ivar] + eps
# evaluate the function
y_higher = func(x_higher)
dy_dx = (y_higher-y0) / eps
J[:,ivar] = dy_dx
x_higher[ivar] = x0[ivar]
return J
def mid_to_edge( var_mid, x_e ) :
var_edge = numpy.array( [var_mid[0]] + [ var_mid[i]*var_mid[i+1]/( ((x_e[i+1]-x_e[i])/((x_e[i+2]-x_e[i+1])+(x_e[i+1]-x_e[i])))*var_mid[i+1] + (1- ((x_e[i+1]-x_e[i])/((x_e[i+2]-x_e[i+1])+(x_e[i+1]-x_e[i]))))*var_mid[i] ) for i in range(len(var_mid)-1) ] + [var_mid[-1]] )
return var_edge
def flux_mat_builder( N, x_m, vols, P ) :
A = numpy.zeros([N,N], dtype='d')
for i in range(1,N-1) :
A[i,i-1] = (1./vols[i]) * (P[i ]) / (x_m[i ] - x_m[i-1])
A[i,i ] = -(1./vols[i]) * (P[i ]) / (x_m[i ] - x_m[i-1]) - (1./vols[i]) * (P[i+1]) / (x_m[i+1] - x_m[i])
A[i,i+1] = (1./vols[i]) * (P[i+1]) / (x_m[i+1] - x_m[i ])
i=0
A[0,0] = -(1./vols[i]) * (P[i+1]) / (x_m[i+1] - x_m[i])
A[0,1] = (1./vols[i]) * (P[i+1]) / (x_m[i+1] - x_m[i])
i=N-1
A[i,i-1] = (1./vols[i]) * (P[i]) / (x_m[i] - x_m[i-1])
A[i,i ] = -(1./vols[i]) * (P[i]) / (x_m[i] - x_m[i-1])
return A
class MyProblem( Implicit_Problem ) :
def __init__(self, Na, Ns, Nc, X, Ac, bsp_dir, y0, yd0, name ) :
Implicit_Problem.__init__(self,y0=y0,yd0=yd0,name=name)
self.T = 298.15 # Cell temperature, [K]
self.Ac = Ac # Cell coated area, [m^2]
# Control volumes and node points (mid node points and edge node points)
self.Ns = Ns
self.Na = Na
self.Nc = Nc
self.N = Na + Ns + Nc
self.X = X
self.num_diff_vars = N + Na + Nc
self.num_algr_vars = Na + Nc + N + Na + Nc
self.x_e = numpy.linspace( 0.0, X, N+1 )
self.x_m = numpy.array( [ 0.5*(self.x_e[i+1]+self.x_e[i]) for i in range(N) ], dtype='d' )
self.vols = numpy.array( [ (self.x_e[i+1] - self.x_e[i]) for i in range(N)], dtype='d' )
# Useful sub-meshes for the phi_s functions
self.x_m_a = self.x_m[:Na]
self.x_m_c = self.x_m[-Nc:]
self.x_e_a = self.x_e[:Na+1]
self.x_e_c = self.x_e[-Nc-1:]
self.vols_a = self.vols[:Na]
self.vols_c = self.vols[-Nc:]
# Volume fraction vectors and matrices for effective parameters
self.La, self.Ls, self.Lc = self.Na*X/self.N, self.Ns*X/self.N, self.Nc*X/self.N
self.Na, self.Ns, self.Nc = Na, Ns, Nc
eps_a = 0.25
eps_s = 0.5
eps_c = 0.2
ba, bs, bc = 1.2, 0.5, 0.5
eps_a_vec = [ eps_a for i in range(Na) ] # list( eps_a + eps_a/2.*numpy.sin(numpy.linspace(0.,Na/4,Na)) ) # list(eps_a + eps_a*numpy.random.randn(Na)/5.) #
eps_s_vec = [ eps_s for i in range(Ns) ]
eps_c_vec = [ eps_c for i in range(Nc) ] # list( eps_c + eps_c/2.*numpy.sin(numpy.linspace(0.,Nc/4,Nc)) ) # list(eps_c + eps_c*numpy.random.randn(Nc)/5.) #
self.eps_m = numpy.array( eps_a_vec + eps_s_vec + eps_c_vec, dtype='d' )
self.k_m = 1./self.eps_m
self.eps_mb = numpy.array( [ ea**ba for ea in eps_a_vec ] + [ es**bs for es in eps_s_vec ] + [ ec**bc for ec in eps_c_vec ], dtype='d' )
self.eps_eff = numpy.array( [ ea**(1.+ba) for ea in eps_a_vec ] + [ es**(1.+bs) for es in eps_s_vec ] + [ ec**(1.+bc) for ec in eps_c_vec ], dtype='d' )
self.eps_a_eff = self.eps_eff[:Na]
self.eps_c_eff = self.eps_eff[-Nc:]
self.K_m = numpy.diag( self.k_m )
t_plus = 0.43
F = 96485.0
self.t_plus = t_plus
self.F = F
self.R_gas = 8.314
Rp_a = 12.0e-6
Rp_c = 6.5e-6
self.Rp_a = Rp_a
self.Rp_c = Rp_c
as_a = 3.*(1.0-numpy.array(eps_a_vec, dtype='d'))/Rp_a
as_c = 3.*(1.0-numpy.array(eps_c_vec, dtype='d'))/Rp_c
self.as_a = as_a
self.as_c = as_c
self.as_a_mean = 1./self.La*sum( [ asa*v for asa,v in zip(as_a, self.vols[:Na]) ] )
self.as_c_mean = 1./self.Lc*sum( [ asc*v for asc,v in zip(as_c, self.vols[-Nc:]) ] )
print 'asa diff', self.as_a_mean - as_a[0]
print 'asc diff', self.as_c_mean - as_c[0]
Ba = [ (1.-t_plus)*asa/ea for ea, asa in zip(eps_a_vec,as_a) ]
Bs = [ 0.0 for i in range(Ns) ]
Bc = [ (1.-t_plus)*asc/ec for ec, asc in zip(eps_c_vec,as_c) ]
self.B_ce = numpy.diag( numpy.array(Ba+Bs+Bc, dtype='d') )
Bap = [ asa*F for asa in as_a ]
Bsp = [ 0.0 for i in range(Ns) ]
Bcp = [ asc*F for asc in as_c ]
self.B2_pe = numpy.diag( numpy.array(Bap+Bsp+Bcp, dtype='d') )
# Interpolators for De, ke
self.De_intp = build_interp_2d( bsp_dir+'data/Model_v1/Model_Pars/electrolyte/De.csv' )
self.ke_intp = build_interp_2d( bsp_dir+'data/Model_v1/Model_Pars/electrolyte/kappa.csv' )
self.fca_intp = build_interp_2d( bsp_dir+'data/Model_v1/Model_Pars/electrolyte/fca.csv' )
self.ce_nom = 1000.0
######
## Solid phase parameters and j vector matrices
self.sig_a = 100. # [S/m]
self.sig_c = 40. # [S/m]
self.sig_a_eff = self.sig_a * (1.0-self.eps_a_eff)
self.sig_c_eff = self.sig_c * (1.0-self.eps_c_eff)
self.A_ps_a = flux_mat_builder( self.Na, self.x_m_a, numpy.ones_like(self.vols_a), self.sig_a_eff )
self.A_ps_c = flux_mat_builder( self.Nc, self.x_m_c, numpy.ones_like(self.vols_c), self.sig_c_eff )
# Grounding form for BCs (was only needed during testing, before BVK was incorporated for coupling
Baps = numpy.array( [ asa*F*dxa for asa,dxa in zip(as_a, self.vols_a) ], dtype='d' )
Bcps = numpy.array( [ asc*F*dxc for asc,dxc in zip(as_c, self.vols_c) ], dtype='d' )
self.B_ps_a = numpy.diag( Baps )
self.B_ps_c = numpy.diag( Bcps )
self.B2_ps_a = numpy.zeros( self.Na, dtype='d' )
self.B2_ps_a[ 0] = -1.
self.B2_ps_c = numpy.zeros( self.Nc, dtype='d' )
self.B2_ps_c[-1] = -1.
# Two parameter Solid phase diffusion model
Dsa = 1e-12
Dsc = 1e-14
self.Dsa = Dsa
self.Dsc = Dsc
self.csa_max = 30555.0 # [mol/m^3]
self.csc_max = 51554.0 # [mol/m^3]
self.B_cs_a = numpy.diag( numpy.array( [-3.0/Rp_a for i in range(Na)], dtype='d' ) )
self.B_cs_c = numpy.diag( numpy.array( [-3.0/Rp_c for i in range(Nc)], dtype='d' ) )
self.C_cs_a = numpy.eye(Na)
self.C_cs_c = numpy.eye(Nc)
self.D_cs_a = numpy.diag( numpy.array( [-Rp_a/Dsa/5.0 for i in range(Na)], dtype='d' ) )
self.D_cs_c = numpy.diag( numpy.array( [-Rp_c/Dsc/5.0 for i in range(Nc)], dtype='d' ) )
# bsp_dir = '/home/m_klein/Projects/battsimpy/'
# bsp_dir = '/home/mk-sim-linux/Battery_TempGrad/Python/batt_simulation/battsimpy/'
uref_a_map = numpy.loadtxt( bsp_dir + '/data/Model_v1/Model_Pars/solid/thermodynamics/uref_anode_x.csv' , delimiter=',' )
uref_c_map = numpy.loadtxt( bsp_dir + '/data/Model_v1/Model_Pars/solid/thermodynamics/uref_cathode_x.csv', delimiter=',' )
duref_a = numpy.gradient( uref_a_map[:,1] ) / numpy.gradient( uref_a_map[:,0] )
duref_c = numpy.gradient( uref_c_map[:,1] ) / numpy.gradient( uref_c_map[:,0] )
if uref_a_map[1,0] > uref_a_map[0,0] :
self.uref_a_interp = scipy.interpolate.interp1d( uref_a_map[:,0], uref_a_map[:,1] )
self.duref_a_interp = scipy.interpolate.interp1d( uref_a_map[:,0], duref_a )
else :
self.uref_a_interp = scipy.interpolate.interp1d( numpy.flipud(uref_a_map[:,0]), numpy.flipud(uref_a_map[:,1]) )
self.duref_a_interp = scipy.interpolate.interp1d( numpy.flipud(uref_a_map[:,0]), numpy.flipud(duref_a) )
if uref_c_map[1,0] > uref_c_map[0,0] :
self.uref_c_interp = scipy.interpolate.interp1d( uref_c_map[:,0], uref_c_map[:,1] )
self.duref_c_interp = scipy.interpolate.interp1d( uref_c_map[:,0], duref_c )
else :
self.uref_c_interp = scipy.interpolate.interp1d( numpy.flipud(uref_c_map[:,0]), numpy.flipud(uref_c_map[:,1]) )
self.duref_c_interp = scipy.interpolate.interp1d( numpy.flipud(uref_c_map[:,0]), numpy.flipud(duref_c) )
# Plot the Uref data for verification
# xa = numpy.linspace( 0.05, 0.95, 50 )
# xc = numpy.linspace( 0.40, 0.95, 50 )
# plt.figure()
# plt.plot( uref_a_map[:,0], uref_a_map[:,1], label='Ua map' )
# plt.plot( uref_c_map[:,0], uref_c_map[:,1], label='Uc map' )
# plt.plot( xa, self.uref_a_interp(xa), label='Ua interp' )
# plt.plot( xc, self.uref_c_interp(xc), label='Uc interp' )
# plt.legend()
# plt.figure()
# plt.plot( uref_a_map[:,0], duref_a, label='dUa map' )
# plt.plot( uref_c_map[:,0], duref_c, label='dUc map' )
# plt.plot( xa, self.duref_a_interp(xa), label='dUa interp' )
# plt.plot( xc, self.duref_c_interp(xc), label='dUc interp' )
# plt.legend()
# plt.show()
# Reaction kinetics parameters
self.io_a = 5.0 # [A/m^2]
self.io_c = 5.0 # [A/m^2]
# System indices
self.ce_inds = range(self.N)
self.csa_inds = range(self.N, self.N+self.Na)
self.csc_inds = range(self.N+self.Na, self.N+self.Na+self.Nc)
c_end = self.N+self.Na+self.Nc
self.ja_inds = range(c_end, c_end+self.Na)
self.jc_inds = range(c_end+self.Na, c_end+self.Na +self.Nc)
self.pe_inds = range( c_end+self.Na+self.Nc, c_end+self.Na+self.Nc +self.N )
self.pe_a_inds = range( c_end+self.Na+self.Nc, c_end+self.Na+self.Nc +self.Na )
self.pe_c_inds = range( c_end+self.Na+self.Nc +self.Na+self.Ns, c_end+self.Na+self.Nc +self.N )
self.pa_inds = range( c_end+self.Na+self.Nc+self.N, c_end+self.Na+self.Nc+self.N +self.Na )
self.pc_inds = range( c_end+self.Na+self.Nc+self.N+self.Na, c_end+self.Na+self.Nc+self.N+self.Na +self.Nc )
def set_iapp( self, I_app ) :
self.i_app = I_app / self.Ac
## Define c_e functions
def build_Ace_mat( self, c ) :
D_eff = self.Diff_ce( c )
A = self.K_m.dot( flux_mat_builder( self.N, self.x_m, self.vols, D_eff ) )
return A
def Diff_ce( self, c ) :
T = self.T
# D_ce = 1e-4 * 10.0**( -4.43 - (54./(T-229.-5e-3*c)) - (0.22e-3*c) ) ## Torchio (LIONSIMBA) ECS paper
D_ce = self.De_intp( c, T, grid=False ).flatten()
D_mid = D_ce * self.eps_eff
if type(c) == float :
D_edge = D_mid
else :
D_edge = mid_to_edge( D_mid, self.x_e )
return D_edge
## Define phi_e functions
def build_Ape_mat( self, c ) :
k_eff = self.kapp_ce( c )
A = flux_mat_builder( self.N, self.x_m, self.vols, k_eff )
A[-1,-1] = 2*A[-1,-1]
return A
def build_Bpe_mat( self, c ) :
gam = 2.*(1.-self.t_plus)*self.R_gas*self.T / self.F
k_eff = self.kapp_ce( c )
c_edge = mid_to_edge( c, self.x_e )
B1 = flux_mat_builder( self.N, self.x_m, self.vols, k_eff*gam/c_edge )
return B1
def kapp_ce( self, c ) :
T = self.T
# k_ce = 1e-4 * c *( -10.5 +0.668e-3*c + 0.494e-6*c**2
# + (0.074 - 1.78*1e-5*c - 8.86e-10*c**2)*T
# + (-6.96e-5 + 2.8e-8*c)*T**2 )**2 ## Torchio (LIONSIMBA) ECS paper
k_ce = 1e-1*self.ke_intp( c, T, grid=False ).flatten() # 1e-1 converts from mS/cm to S/m (model uses SI units)
k_mid = k_ce * self.eps_eff
if type(c) == float :
k_edge = k_mid
else :
k_edge = mid_to_edge( k_mid, self.x_e )
return k_edge
def build_Bjac_mat( self, eta, a, b ) :
d = a*numpy.cosh( b*eta )*b
return numpy.diag( d )
def get_voltage( self, y ) :
"""
Return the cell potential
"""
pc = y[self.pc_inds]
pa = y[self.pa_inds]
Vcell = pc[-1] - pa[0]
return Vcell
## Define system equations
def res( self, t, y, yd ) :
## Parse out the states
# E-lyte conc
ce = y[ self.ce_inds]
c_dots = yd[self.ce_inds]
# Solid conc a:anode, c:cathode
csa = y[ self.csa_inds]
csc = y[ self.csc_inds]
csa_dt = yd[self.csa_inds]
csc_dt = yd[self.csc_inds]
# Reaction (Butler-Volmer Kinetics)
ja_rxn = y[self.ja_inds]
jc_rxn = y[self.jc_inds]
# E-lyte potential
phi = y[self.pe_inds]
# Solid potential
phi_s_a = y[self.pa_inds]
phi_s_c = y[self.pc_inds]
## Grab state dependent matrices
# For E-lyte conc and potential (i.e., De(ce), kapp_e(ce))
A_ce = self.build_Ace_mat( ce )
A_pe = self.build_Ape_mat( ce )
B_pe = self.build_Bpe_mat( ce )
## Compute extra variables
# For the reaction kinetics
csa_ss = csa + (self.D_cs_a.dot( ja_rxn ).flatten()) # anode surface conc
csc_ss = csc + (self.D_cs_c.dot( jc_rxn ).flatten()) # cathode surface conc
Uref_a = self.uref_a_interp( csa_ss/self.csa_max ) # anode equilibrium potential
Uref_c = self.uref_c_interp( csc_ss/self.csc_max ) # cathode equilibrium potential
eta_a = phi_s_a - phi[:self.Na] - Uref_a # anode overpotential
eta_c = phi_s_c - phi[-self.Nc:] - Uref_c # cathode overpotential
# ja = 2.0*self.io_a * numpy.sqrt( ce[:self.Na]/self.ce_nom * (1.0 - csa_ss/self.csa_max) * (csa_ss/self.csa_max) ) * numpy.sinh( self.R_gas/(2.0*self.F*self.T)*eta_a )
# jc = 2.0*self.io_c * numpy.sqrt( ce[-self.Nc:]/self.ce_nom * (1.0 - csc_ss/self.csc_max) * (csc_ss/self.csc_max) ) * numpy.sinh( self.R_gas/(2.0*self.F*self.T)*eta_c )
ja = (2.0*self.io_a/self.F) * numpy.sinh( 0.5*self.F/(self.R_gas*self.T)*eta_a )
jc = (2.0*self.io_c/self.F) * numpy.sinh( 0.5*self.F/(self.R_gas*self.T)*eta_c )
j = numpy.concatenate( [ ja_rxn, numpy.zeros(self.Ns), jc_rxn ] )
## Compute the residuals
# Time deriv components
r1 = c_dots - ( ((A_ce.dot(ce)).flatten() + (self.B_ce.dot(j)).flatten()) ) # E-lyte conc
r2 = csa_dt - (self.B_cs_a.dot(ja_rxn).flatten()) # Anode conc
r3 = csc_dt - (self.B_cs_c.dot(jc_rxn).flatten()) # Cathode conc
# Algebraic components
r4 = ja_rxn - ja
r5 = jc_rxn - jc
r6 = A_pe.dot(phi).flatten() - B_pe.dot(ce).flatten() + self.B2_pe.dot(j).flatten() # E-lyte potential
r7 = self.A_ps_a.dot(phi_s_a).flatten() - self.B_ps_a.dot(ja_rxn).flatten() - self.B2_ps_a*self.i_app # Anode potential #+ extra #
r8 = self.A_ps_c.dot(phi_s_c).flatten() - self.B_ps_c.dot(jc_rxn).flatten() + self.B2_ps_c*self.i_app # Cathode potential
res_out = numpy.concatenate( [r1, r2, r3, r4, r5, r6, r7, r8] )
return res_out
def jac( self, c, t, y, yd ) :
### Setup
## Parse out the states
# E-lyte conc
ce = y[ self.ce_inds]
# c_dots = yd[self.ce_inds]
# Solid conc a:anode, c:cathode
csa = y[ self.csa_inds]
csc = y[ self.csc_inds]
# csa_dt = yd[self.csa_inds]
# csc_dt = yd[self.csc_inds]
# Reaction (Butler-Volmer Kinetics)
ja_rxn = y[self.ja_inds]
jc_rxn = y[self.jc_inds]
# E-lyte potential
phi = y[self.pe_inds]
# Solid potential
phi_s_a = y[self.pa_inds]
phi_s_c = y[self.pc_inds]
## Grab state dependent matrices
# For E-lyte conc and potential (i.e., De(ce), kapp_e(ce))
A_ce = self.build_Ace_mat( ce )
A_pe = self.build_Ape_mat( ce )
B_pe = self.build_Bpe_mat( ce )
## Compute extra variables
# For the reaction kinetics
csa_ss = csa + (self.D_cs_a.dot( ja_rxn ).flatten()) # anode surface conc
csc_ss = csc + (self.D_cs_c.dot( jc_rxn ).flatten()) # cathode surface conc
Uref_a = self.uref_a_interp( csa_ss/self.csa_max ) # anode equilibrium potential
Uref_c = self.uref_c_interp( csc_ss/self.csc_max ) # cathode equilibrium potential
eta_a = phi_s_a - phi[:self.Na] - Uref_a # anode overpotential
eta_c = phi_s_c - phi[-self.Nc:] - Uref_c # cathode overpotential
###
### Build the Jac matrix
## Self coupling
A_dots = numpy.diag( [1*c for i in range(self.N+self.Na+self.Nc)] )
j_c = A_dots - scipy.linalg.block_diag( A_ce, numpy.zeros([self.Na,self.Na]), numpy.zeros([self.Nc,self.Nc]) )
Bjac_a = self.build_Bjac_mat( eta_a, (2.0*self.io_a/self.F), 0.5*self.F/(self.R_gas*self.T) )
Bjac_c = self.build_Bjac_mat( eta_c, (2.0*self.io_c/self.F), 0.5*self.F/(self.R_gas*self.T) )
DUDcsa_ss = numpy.diag( (1.0/self.csa_max)*self.duref_a_interp(csa_ss/self.csa_max) )
DUDcsc_ss = numpy.diag( (1.0/self.csc_max)*self.duref_c_interp(csc_ss/self.csc_max) )
A_ja = numpy.diag(numpy.ones(self.Na)) - Bjac_a.dot(DUDcsa_ss.dot(-1.0*self.D_cs_a))
A_jc = numpy.diag(numpy.ones(self.Nc)) - Bjac_c.dot(DUDcsc_ss.dot(-1.0*self.D_cs_c))
j = scipy.linalg.block_diag( j_c, A_ja, A_jc, A_pe, self.A_ps_a, self.A_ps_c )
## Cross coupling
# c_e: j coupling back in
j[ numpy.ix_(self.ce_inds, self.ja_inds) ] = -self.B_ce[:, :self.Na]
j[ numpy.ix_(self.ce_inds, self.jc_inds) ] = -self.B_ce[:, -self.Nc:]
# cs_a: j coupling
j[ numpy.ix_(self.csa_inds, self.ja_inds) ] = -self.B_cs_a
# cs_c: j coupling
j[ numpy.ix_(self.csc_inds, self.jc_inds) ] = -self.B_cs_c
# j_a: pe, pa, csa coupling
j[numpy.ix_(self.ja_inds, self.pa_inds )] = -Bjac_a*( 1.0)
j[numpy.ix_(self.ja_inds, self.pe_a_inds)] = -Bjac_a*(-1.0)
j[numpy.ix_(self.ja_inds, self.csa_inds )] = -Bjac_a.dot(-1.0*DUDcsa_ss*1.0)
# j[numpy.ix_(self.ja_inds, self.ja_inds ) ] = j[numpy.ix_(self.ja_inds, self.ja_inds ) ] - Bjac_a.dot(DUDcsa_ss.dot(self.D_cs_a)*(-1.0)))
# j_c: pe, pc, csc coupling
j[numpy.ix_(self.jc_inds, self.pc_inds )] = -Bjac_c*( 1.0)
j[numpy.ix_(self.jc_inds, self.pe_c_inds)] = -Bjac_c*(-1.0)
j[numpy.ix_(self.jc_inds, self.csc_inds )] = -Bjac_c.dot(-1.0*DUDcsc_ss*1.0)
# j[numpy.ix_(self.jc_inds, self.jc_inds ) ] = j[numpy.ix_(self.jc_inds, self.jc_inds ) ] - Bjac_c.dot(DUDcsc_ss.dot(self.D_cs_c)*(-1.0)))
# phi_e: ce coupling into phi_e equation
j[numpy.ix_(self.pe_inds,self.ce_inds)] = -B_pe
j[numpy.ix_(self.pe_inds,self.ja_inds)] = self.B2_pe[:,:self.Na]
j[numpy.ix_(self.pe_inds,self.jc_inds)] = self.B2_pe[:,-self.Nc:]
# phi_s_a: ja
j[numpy.ix_(self.pa_inds,self.ja_inds)] = -self.B_ps_a
# phi_s_c: jc
j[numpy.ix_(self.pc_inds,self.jc_inds)] = -self.B_ps_c
###
return j
csa_max = 30555.0 # [mol/m^3]
csc_max = 51554.0 # [mol/m^3]
bsp_dir = '/home/m_klein/Projects/battsimpy/'
#bsp_dir = '/home/mk-sim-linux/Battery_TempGrad/Python/batt_simulation/battsimpy/'
#bsp_dir = '/Users/mk/Desktop/battsim/battsimpy/'
uref_a_map = numpy.loadtxt( bsp_dir + '/data/Model_v1/Model_Pars/solid/thermodynamics/uref_anode_x.csv' , delimiter=',' )
uref_c_map = numpy.loadtxt( bsp_dir + '/data/Model_v1/Model_Pars/solid/thermodynamics/uref_cathode_x.csv', delimiter=',' )
if uref_a_map[1,0] > uref_a_map[0,0] :
uref_a_interp = scipy.interpolate.interp1d( uref_a_map[:,0], uref_a_map[:,1] )
else :
uref_a_interp = scipy.interpolate.interp1d( numpy.flipud(uref_a_map[:,0]), numpy.flipud(uref_a_map[:,1]) )
if uref_c_map[1,0] > uref_c_map[0,0] :
uref_c_interp = scipy.interpolate.interp1d( uref_c_map[:,0], uref_c_map[:,1] )
else :
uref_c_interp = scipy.interpolate.interp1d( numpy.flipud(uref_c_map[:,0]), numpy.flipud(uref_c_map[:,1]) )
xa_init, xc_init = 0.8, 0.37
ca_init = xa_init*csa_max
cc_init = xc_init*csc_max
Ua_init = uref_a_interp( xa_init )
Uc_init = uref_c_interp( xc_init )
print Ua_init
print Uc_init
### Mesh
La = 65.0
Ls = 25.0
Lc = 55.0
Lt = (La+Ls+Lc)
X = Lt*1e-6 # [m]
N = 80
Ns = int(N*(Ls/Lt))
Na = int(N*(La/Lt))
Nc = N - Ns - Na
Crate = 1.
Vcut = 3.0 # [V], cutoff voltage for end of discharge
ce_lims = [100.,3000.]
cell_cap = 29.0
cell_coated_area = 1.0 # [m^2]
I_app = Crate*cell_cap # A
### Initial conditions
# E-lyte conc
c_init = 1100.0 # [mol/m^3]
c_centered = c_init*numpy.ones( N, dtype='d' ) #numpy.linspace(1500, 500, N) #
# E-lyte potential
p_init = 0.0 # [V]
p_centered = p_init*numpy.ones( N, dtype='d' )
# Solid potential on anode and cathode
pa_init = Ua_init #0.0 # [V]
pa_centered = pa_init*numpy.ones( Na, dtype='d' )
pc_init = Uc_init#-Ua_init #0.0 # [V]
pc_centered = pc_init*numpy.ones( Nc, dtype='d' )
# Solid conc on anode and cathode
#ca_init = 10000.0 # [mol/m^3]
ca_centered = ca_init*numpy.ones( Na, dtype='d' )
#cc_init = 30000.0 # [mol/m^3]
cc_centered = cc_init*numpy.ones( Nc, dtype='d' )
ja = numpy.zeros(Na)
jc = numpy.zeros(Nc)
#The initial conditons
y0 = numpy.concatenate( [c_centered, ca_centered, cc_centered, ja, jc, p_centered, pa_centered, pc_centered] ) #Initial conditions
yd0 = [0.0 for i in range(N+Na+Nc +Na+Nc +N+Na+Nc)] #Initial conditions
#Create an Assimulo implicit problem
imp_mod = MyProblem(Na,Ns,Nc,X,cell_coated_area,bsp_dir,y0,yd0,'Example using an analytic Jacobian')
#Sets the options to the problem
imp_mod.algvar = [1.0 for i in range(N+Na+Nc)] + [0.0 for i in range(Na+Nc +N+Na+Nc)] #Set the algebraic components
#Create an Assimulo implicit solver (IDA)
imp_sim = IDA(imp_mod) #Create a IDA solver
#Sets the paramters
imp_sim.atol = 1e-5 #Default 1e-6
imp_sim.rtol = 1e-5 #Default 1e-6
imp_sim.suppress_alg = True #Suppres the algebraic variables on the error test
### Simulate
#imp_mod.set_iapp( I_app/10. )
#imp_sim.make_consistent('IDA_YA_YDP_INIT')
#ta, ya, yda = imp_sim.simulate(0.1,5)
##
#imp_mod.set_iapp( I_app/2. )
#imp_sim.make_consistent('IDA_YA_YDP_INIT')
#tb, yb, ydb = imp_sim.simulate(0.2,5)
#imp_mod.set_iapp( I_app )
#imp_sim.make_consistent('IDA_YA_YDP_INIT')
## Sim step 1
#t1, y1, yd1 = imp_sim.simulate(1./Crate*3600.*0.2,100)
imp_sim.display_progress = False
imp_sim.verbosity = 50
imp_sim.report_continuously = True
imp_sim.time_limit = 10.
### Simulate
t01, t02 = 0.1, 0.2
imp_mod.set_iapp( I_app/10. )
imp_sim.make_consistent('IDA_YA_YDP_INIT')
ta, ya, yda = imp_sim.simulate(t01,2)
imp_mod.set_iapp( I_app/2. )
imp_sim.make_consistent('IDA_YA_YDP_INIT')
tb, yb, ydb = imp_sim.simulate(t02,2)
print 'yb shape', yb.shape
# Sim step 1
#imp_mod.set_iapp( I_app )
#imp_sim.make_consistent('IDA_YA_YDP_INIT')
#t1, y1, yd1 = imp_sim.simulate(1.0/Crate*3600.0,100)
NT = 30
time = numpy.linspace( t02+0.1, 1.0/Crate*3600.0*0.3, NT )#numpy.linspace( t02+0.1, 60., NT ) #
t_out = [ 0 for ts in time ]
V_out = [ 0 for ts in time ]
y_out = numpy.zeros( [len(time), yb.shape[ 1]] )
yd_out = numpy.zeros( [len(time), ydb.shape[1]] )
print 'y_out.shape', y_out.shape
it = 0
V_cell = imp_mod.get_voltage( yb[-1,:].flatten() )
ce_now = yb[-1,imp_mod.ce_inds].flatten()
print 'V_cell prior to time loop:', V_cell
imp_mod.set_iapp( I_app )
imp_sim.make_consistent('IDA_YA_YDP_INIT')
sim_stopped = 0
while V_cell > Vcut and max(ce_now)<max(ce_lims) and min(ce_now)>min(ce_lims) and not sim_stopped and it<len(time) :
try :
ti, yi, ydi = imp_sim.simulate(time[it],1)
except :
ti = [t_out[it-1],t_out[it-1]]
yi = y_out[ it-2:it,:]
ydi = yd_out[ it-2:it,:]
sim_stopped = 1
print 'Sim stopped due time integration failure.'
t_out[ it] = ti[ -1 ]
y_out[ it,:] = yi[ -1,:]
yd_out[it,:] = ydi[-1,:]
V_cell = imp_mod.get_voltage( y_out[it,:] )
V_out[it] = V_cell
ce_now = y_out[it,imp_mod.ce_inds]
print 'time:',round(t_out[it],3), ' | Voltage:', round(V_cell,3)
if V_cell < Vcut :
print '\n','Vcut stopped simulation.'
elif max(ce_now)>max(ce_lims) :
print '\n','ce max stopped simulation.'
elif min(ce_now)<min(ce_lims) :
print '\n','ce min stopped simulation.'
it+=1
if it < len(time) :
t_out = t_out[ :it ]
V_out = V_out[ :it ]
y_out = y_out[ :it,:]
yd_out = yd_out[:it,:]
ce = y_out[:,imp_mod.ce_inds]
f,ax=plt.subplots(1,2)
ax[0].plot( imp_mod.x_m, ce.T )
ax[1].plot( t_out, V_out )
plt.show()
t1 = t_out
y1 = y_out
yd1 = yd_out
imp_mod.set_iapp( 0.0 )
imp_sim.make_consistent('IDA_YA_YDP_INIT')
# Sim step 1
t2, y2, yd2 = imp_sim.simulate(t1[-1]*1.5,100)
c_avg_0 = numpy.mean( imp_mod.eps_m*y0[:N] )
c_avg_f = numpy.mean( imp_mod.eps_m*y2[-1,:N] )
print c_avg_0
print c_avg_f
# extract variables
im = imp_mod
ce_1 = y1[:,im.ce_inds]
ca_1 = y1[:,im.csa_inds]
cc_1 = y1[:,im.csc_inds]
pe_1 = y1[:,im.pe_inds]
pa_1 = y1[:,im.pa_inds]
pc_1 = y1[:,im.pc_inds]
ja_1 = y1[:,im.ja_inds]
jc_1 = y1[:,im.jc_inds]
ce_2 = y2[:,im.ce_inds]
ca_2 = y2[:,im.csa_inds]
cc_2 = y2[:,im.csc_inds]
pe_2 = y2[:,im.pe_inds]
pa_2 = y2[:,im.pa_inds]
pc_2 = y2[:,im.pc_inds]
ja_2 = y2[:,im.ja_inds]
jc_2 = y2[:,im.jc_inds]
Jsum_a1 = numpy.array( [ sum(imp_mod.vols_a*imp_mod.F*imp_mod.as_a*ja_1[i,:]) for i in range(len(ja_1[:,0])) ] )
Jsum_c1 = numpy.array( [ sum(imp_mod.vols_c*imp_mod.F*imp_mod.as_c*jc_1[i,:]) for i in range(len(jc_1[:,0])) ] )
plt.figure()
plt.plot( t1, Jsum_a1-Jsum_c1 )
#Plot
# t1
# Plot through space
f, ax = plt.subplots(2,5)
# ce vs x
ax[0,0].plot(imp_mod.x_m*1e6,ce_1.T)
# pe vs x
ax[0,1].plot(imp_mod.x_m*1e6,pe_1.T)
# pa vs x
ax[0,2].plot(imp_mod.x_m_a*1e6,pa_1.T)
# pc vs x
ax[0,2].plot(imp_mod.x_m_c*1e6,pc_1.T)
# ca vs x
ax[0,3].plot(imp_mod.x_m_a*1e6,ca_1.T)
# cc vs x
ax[0,3].plot(imp_mod.x_m_c*1e6,cc_1.T)
# ja vs x
ax[0,4].plot(imp_mod.x_m_a*1e6,ja_1.T)
# jc vs x
ax[0,4].plot(imp_mod.x_m_c*1e6,jc_1.T)
ax[0,0].set_title('t1 c')
ax[0,0].set_xlabel('Cell Thickness [$\mu$m]')
ax[0,0].set_ylabel('E-lyte Conc. [mol/m$^3$]')
ax[0,1].set_title('t1 p')
ax[0,1].set_xlabel('Cell Thickness [$\mu$m]')
ax[0,1].set_ylabel('E-lyte Potential [V]')
ax[0,2].set_title('t1 p solid')
ax[0,2].set_xlabel('Cell Thickness [$\mu$m]')
ax[0,2].set_ylabel('Solid Potential [V]')
ax[0,3].set_title('t1 conc solid')
ax[0,3].set_xlabel('Cell Thickness [$\mu$m]')
ax[0,3].set_ylabel('Solid Conc. [mol/m$^3$]')
# t2
ax[1,0].plot(imp_mod.x_m*1e6,ce_2.T)
ax[1,1].plot(imp_mod.x_m*1e6,pe_2.T)
ax[1,2].plot(imp_mod.x_m_a*1e6,pa_2.T)
ax[1,2].plot(imp_mod.x_m_c*1e6,pc_2.T)
ax[1,3].plot(imp_mod.x_m_a*1e6,ca_2.T)
ax[1,3].plot(imp_mod.x_m_c*1e6,cc_2.T)
ax[1,4].plot(imp_mod.x_m_a*1e6,ja_2.T)
ax[1,4].plot(imp_mod.x_m_c*1e6,jc_2.T)
ax[1,0].set_title('t2 c')
ax[1,0].set_xlabel('Cell Thickness [$\mu$m]')
ax[1,0].set_ylabel('E-lyte Conc. [mol/m$^3$]')
ax[1,1].set_title('t2 p e-lyte')
ax[1,1].set_xlabel('Cell Thickness [$\mu$m]')
ax[1,1].set_ylabel('E-lyte Potential [V]')
ax[1,2].set_title('t2 p solid')
ax[1,2].set_xlabel('Cell Thickness [$\mu$m]')
ax[1,2].set_ylabel('Solid Potential [V]')
ax[1,3].set_title('t2 Solid Conc.')
ax[1,3].set_xlabel('Cell Thickness [$\mu$m]')
ax[1,3].set_ylabel('Solid Conc. [mol/m$^3$]')
plt.tight_layout()
# Plot through time
f, ax = plt.subplots(1,4)
ax[0].plot(t1,ce_1)
ax[1].plot(t1,pe_1)
ax[2].plot(t1,pa_1)
ax[2].plot(t1,pc_1)
ax[3].plot(t1,ca_1)
ax[3].plot(t1,cc_1)
ax[0].plot(t2,ce_2)
ax[1].plot(t2,pe_2)
ax[2].plot(t2,pa_2)
ax[2].plot(t2,pc_2)
ax[3].plot(t2,ca_2)
ax[3].plot(t2,cc_2)
ax[0].set_ylabel('E-lyte Conc. [mol/m$^3$]')
ax[0].set_xlabel('Time [s]')
ax[1].set_ylabel('E-lyte Potential [V]')
ax[1].set_xlabel('Time [s]')
ax[2].set_ylabel('Solid Potential [V]')
ax[2].set_xlabel('Time [s]')
ax[3].set_ylabel('Solid Conc. [mol/m$^3$]')
ax[3].set_xlabel('Time [s]')
plt.tight_layout()
plt.figure()
plt.plot( t1, pc_1[:,-1] - pa_1[:,0] )
plt.plot( t2, pc_2[:,-1] - pa_2[:,0] )
plt.show()
#imp_mod = MyProblem(Na,Ns,Nc,X,cell_coated_area,bsp_dir,y0,yd0,'Example using an analytic Jacobian')
## my own time solver
#delta_t = 1.0
#tf = 100.
#time = [ i*delta_t for i in range(int(tf/delta_t)+1) ]
#print time
#x_out = numpy.zeros( [imp_mod.N+imp_mod.Na+imp_mod.Nc, len(time)] )
#z_out = numpy.zeros( [imp_mod.Na+imp_mod.Nc+imp_mod.N+imp_mod.Na+imp_mod.Nc, len(time)] )
#x_out[:,0] = numpy.concatenate( [c_centered, ca_centered, cc_centered] )
#z_out[:,0] = numpy.concatenate( [ja, jc, p_centered, pa_centered, pc_centered] )
#for it, t in enumerate(time[1:]) :
# if it == 0 :
# Cur_vec = [ 0.0, 0.0, 0.1*I_app ]
# elif it == 1 :
# Cur_vec = [ 0.0, 0.1*I_app, 0.5*I_app ]
# elif it == 2 :
# Cur_vec = [ 0.1*I_app, 0.5*I_app, I_app ]
# else :
# Cur_vec = [ I_app, I_app, I_app ]
#
# x_out[:,it+1], z_out[:,it+1], newtonStats = imp_mod.cn_solver( x_out[:,it], z_out[:,it], Cur_vec, delta_t )
#plt.close()
#f, ax = plt.subplots(1,3)
#ax[0].plot( imp_mod.x_m, x_out[:imp_mod.N] )
#ax[1].plot( imp_mod.x_m, z_out[imp_mod.Na+imp_mod.Nc:imp_mod.Na+imp_mod.Nc+imp_mod.N,:-1] )
#ax[2].plot( imp_mod.x_m_a, z_out[-imp_mod.Na-imp_mod.Nc:-imp_mod.Nc,:-1] )
#ax[2].plot( imp_mod.x_m_c, z_out[-imp_mod.Nc:,:-1] )
#plt.show()
#print z_out
# def dae_system_num( self, y ) :
# self.set_iapp( self.Input )
# ## Parse out the states
# # E-lyte conc
# ce = y[numpy.ix_( self.ce_inds)]
# # Solid conc a:anode, c:cathode
# csa = y[numpy.ix_( self.csa_inds)]
# csc = y[numpy.ix_( self.csc_inds)]
# # Reaction (Butler-Volmer Kinetics)
# ja_rxn = y[numpy.ix_(self.ja_inds)]
# jc_rxn = y[numpy.ix_(self.jc_inds)]
# # E-lyte potential
# phi = y[numpy.ix_(self.pe_inds)]
# # Solid potential
# phi_s_a = y[numpy.ix_(self.pa_inds)]
# phi_s_c = y[numpy.ix_(self.pc_inds)]
# ## Grab state dependent matrices
# # For E-lyte conc and potential (i.e., De(ce), kapp_e(ce))
# A_ce = self.build_Ace_mat( ce )
# A_pe = self.build_Ape_mat( ce )
# B_pe = self.build_Bpe_mat( ce )
# ## Compute extra variables
# # For the reaction kinetics
# csa_ss = csa + (self.D_cs_a.dot( ja_rxn ).flatten()) # anode surface conc
# csc_ss = csc + (self.D_cs_c.dot( jc_rxn ).flatten()) # cathode surface conc
# xa = csa /self.csa_max
# xc = csc /self.csc_max
# xa_ss = csa_ss/self.csa_max
# xc_ss = csc_ss/self.csc_max
#
# Uref_a = self.uref_a_interp( xa_ss ) # anode equilibrium potential
# Uref_c = self.uref_c_interp( xc_ss ) # cathode equilibrium potential
# eta_a = phi_s_a - phi[:self.Na] - Uref_a # anode overpotential
# eta_c = phi_s_c - phi[-self.Nc:] - Uref_c # cathode overpotential
# ja = 2.0*self.io_a/self.F * numpy.sinh( 0.5*self.F/(self.R_gas*self.T)*eta_a )
# jc = 2.0*self.io_c/self.F * numpy.sinh( 0.5*self.F/(self.R_gas*self.T)*eta_c )
# j = numpy.concatenate( [ ja_rxn, numpy.zeros(self.Ns), jc_rxn ] )
# ## Compute the residuals
# # Time deriv components
# r1 = ( ((A_ce.dot(ce)).flatten() + (self.B_ce.dot(j)).flatten()) ) # E-lyte conc
# r2 = (self.B_cs_a.dot(ja_rxn).flatten()) # Anode conc
# r3 = (self.B_cs_c.dot(jc_rxn).flatten()) # Cathode conc
#
# # Algebraic components
# r4 = ja_rxn - ja
# r5 = jc_rxn - jc
# r6 = A_pe.dot(phi).flatten() - B_pe.dot(ce).flatten() + self.B2_pe.dot(j).flatten() # E-lyte potential
# r7 = self.A_ps_a.dot(phi_s_a).flatten() - self.B_ps_a.dot(ja_rxn).flatten() - self.B2_ps_a*self.i_app # Anode potential
# r8 = self.A_ps_c.dot(phi_s_c).flatten() - self.B_ps_c.dot(jc_rxn).flatten() + self.B2_ps_c*self.i_app # Cathode potential
# res_out = numpy.concatenate( [r1,r2,r3, r4, r5, r6, r7, r8] )
# return res_out
# def dae_system( self, x, z, Input, get_mats=0 ) :
# self.set_iapp( Input )
# y = numpy.concatenate([x,z])
# ## Parse out the states
# # E-lyte conc
# ce = y[ self.ce_inds]
# # Solid conc a:anode, c:cathode
# csa = y[ self.csa_inds]
# csc = y[ self.csc_inds]
# # Reaction (Butler-Volmer Kinetics)
# ja_rxn = y[self.ja_inds]
# jc_rxn = y[self.jc_inds]
# # E-lyte potential
# phi = y[self.pe_inds]
# # Solid potential
# phi_s_a = y[self.pa_inds]
# phi_s_c = y[self.pc_inds]
# ## Grab state dependent matrices
# # For E-lyte conc and potential (i.e., De(ce), kapp_e(ce))
# A_ce = self.build_Ace_mat( ce )
# A_pe = self.build_Ape_mat( ce )
# B_pe = self.build_Bpe_mat( ce )
# ## Compute extra variables
# # For the reaction kinetics
# csa_ss = csa + (self.D_cs_a.dot( ja_rxn ).flatten()) # anode surface conc
# csc_ss = csc + (self.D_cs_c.dot( jc_rxn ).flatten()) # cathode surface conc
# xa = csa /self.csa_max
# xc = csc /self.csc_max
# xa_ss = csa_ss/self.csa_max
# xc_ss = csc_ss/self.csc_max
# Uref_a = self.uref_a_interp( xa_ss ) # anode equilibrium potential
# Uref_c = self.uref_c_interp( xc_ss ) # cathode equilibrium potential
# eta_a = phi_s_a - phi[:self.Na] - Uref_a # anode overpotential
# eta_c = phi_s_c - phi[-self.Nc:] - Uref_c # cathode overpotential
## ja = 2.0*self.io_a * numpy.sqrt( ce[:self.Na]/self.ce_nom * (1.0 - csa_ss/self.csa_max) * (csa_ss/self.csa_max) ) * numpy.sinh( self.R_gas/(2.0*self.F*self.T)*eta_a )
## jc = 2.0*self.io_c * numpy.sqrt( ce[-self.Nc:]/self.ce_nom * (1.0 - csc_ss/self.csc_max) * (csc_ss/self.csc_max) ) * numpy.sinh( self.R_gas/(2.0*self.F*self.T)*eta_c )
# ja = 2.0*self.io_a/self.F * numpy.sinh( 0.5*self.F/(self.R_gas*self.T)*eta_a )
# jc = 2.0*self.io_c/self.F * numpy.sinh( 0.5*self.F/(self.R_gas*self.T)*eta_c )
# j = numpy.concatenate( [ ja_rxn, numpy.zeros(self.Ns), jc_rxn ] )
## plt.figure()
## plt.plot( self.x_m, j )
## plt.show()
# ## Compute the residuals
# # Time deriv components
# r1 = ( ((A_ce.dot(ce)).flatten() + (self.B_ce.dot(j)).flatten()) ) # E-lyte conc
# r2 = (self.B_cs_a.dot(ja_rxn).flatten()) # Anode conc
# r3 = (self.B_cs_c.dot(jc_rxn).flatten()) # Cathode conc
#
# # Algebraic components
# r4 = ja_rxn - ja
# r5 = jc_rxn - jc
# r6 = A_pe.dot(phi).flatten() - B_pe.dot(ce).flatten() + self.B2_pe.dot(j).flatten() # E-lyte potential
# r7 = self.A_ps_a.dot(phi_s_a).flatten() - self.B_ps_a.dot(ja_rxn).flatten() - self.B2_ps_a*self.i_app # Anode potential
# r8 = self.A_ps_c.dot(phi_s_c).flatten() - self.B_ps_c.dot(jc_rxn).flatten() + self.B2_ps_c*self.i_app # Cathode potential
# if get_mats :
# res_out = numpy.concatenate( [r1,r2,r3] ), numpy.concatenate( [r4, r5, r6, r7, r8] ), { 'A_ce':A_ce, 'A_pe':A_pe, 'B_pe':B_pe, 'csa':csa, 'csc':csc, 'csa_ss':csa_ss, 'csc_ss':csc_ss, 'xa':xa, 'xc':xc, 'xa_ss':xa_ss, 'xc_ss':xc_ss, 'eta_a':eta_a, 'eta_c':eta_c }
# else :
# res_out = numpy.concatenate( [r1,r2,r3] ), numpy.concatenate( [r4, r5, r6, r7, r8] )
# return res_out
# def jac_system( self, mats ) :
# A_ce = mats['A_ce'] #self.build_Ace_mat( ce )
# A_pe = mats['A_pe'] #self.build_Ape_mat( ce )
# B_pe = mats['B_pe'] #self.build_Bpe_mat( ce )
# Bjac_a = self.build_Bjac_mat( mats['eta_a'], 2.0*self.io_a/self.F, 0.5*self.F/(self.R_gas*self.T) )
# Bjac_c = self.build_Bjac_mat( mats['eta_c'], 2.0*self.io_c/self.F, 0.5*self.F/(self.R_gas*self.T) )
# DUDcsa_ss = numpy.diag( (1.0/self.csa_max)*self.duref_a_interp(mats['xa_ss']) )
# DUDcsc_ss = numpy.diag( (1.0/self.csc_max)*self.duref_c_interp(mats['xc_ss']) )
# Bja = Bjac_a.dot(-1.0*DUDcsa_ss.dot(self.D_cs_a))
# Bjc = Bjac_c.dot(-1.0*DUDcsc_ss.dot(self.D_cs_c))
# A_ja = numpy.diag(numpy.ones(self.Na)) - Bja
# A_jc = numpy.diag(numpy.ones(self.Nc)) - Bjc
# ##
# fx = scipy.linalg.block_diag( A_ce, numpy.zeros([self.Na,self.Na]), numpy.zeros([self.Nc,self.Nc]) )
# ##
# ##
# fz = numpy.zeros( [self.N+self.Na+self.Nc, self.Na+self.Nc + self.N+self.Na+self.Nc] )
# # ce vs j
# fz[ numpy.ix_(range(self.N), range(self.Na)) ] = self.B_ce[:, :self.Na]
# fz[ numpy.ix_(range(self.N), range(self.Na,self.Na+self.Nc)) ] = self.B_ce[:, -self.Nc:]
# # cs vs j
# fz[ numpy.ix_(range(self.N,self.N+self.Na), range(self.Na)) ] = self.B_cs_a
# fz[ numpy.ix_(range(self.N+self.Na,self.N+self.Na+self.Nc), range(self.Na,self.Na+self.Nc)) ] = self.B_cs_c
# ##
# ##
# gx = numpy.zeros( [self.Na+self.Nc + self.N+self.Na+self.Nc, self.N+self.Na+self.Nc] )
# # j vs cs_bar
# gx[numpy.ix_(range(self.Na),range(self.N,self.N+self.Na))] = -Bjac_a.dot(-1.0*DUDcsa_ss*1.0)
# gx[numpy.ix_(range(self.Na,self.Na+self.Nc),range(self.N+self.Na,self.N+self.Na+self.Nc))] = -Bjac_c.dot(-1.0*DUDcsc_ss*1.0)
# # phi_e vs ce
# gx[numpy.ix_(range(self.Na+self.Nc,self.Na+self.Nc+self.N),range(self.N))] = -B_pe
# ##
# ##
# # z vs z
# gz0 = scipy.linalg.block_diag( A_ja, A_jc, A_pe, self.A_ps_a, self.A_ps_c )
# # z cross coupling
# gz00 = numpy.zeros_like( gz0 )
# # phi_e vs j
# gz00[ numpy.ix_(range(self.Na+self.Nc,self.Na+self.Nc+self.N),range(self.Na)) ] = self.B2_pe[:,:self.Na]
# gz00[ numpy.ix_(range(self.Na+self.Nc,self.Na+self.Nc+self.N),range(self.Na,self.Na+self.Nc)) ] = self.B2_pe[:,-self.Nc:]
# # phi_s vs j
# gz00[ numpy.ix_(range(self.Na+self.Nc+self.N, self.Na+self.Nc+self.N +self.Na),range(self.Na)) ] = -self.B_ps_a
# gz00[ numpy.ix_(range(self.Na+self.Nc+self.N+self.Na,self.Na+self.Nc+self.N+self.Na+self.Nc),range(self.Na,self.Na+self.Nc)) ] = -self.B_ps_c
# # j vs phi_s
# gz00[ numpy.ix_(range(self.Na), range(self.Na+self.Nc+self.N,self.Na+self.Nc+self.N+self.Na)) ] = -Bjac_a*( 1.0)
# gz00[ numpy.ix_(range(self.Na,self.Na+self.Nc),range(self.Na+self.Nc+self.N+self.Na,self.Na+self.Nc+self.N+self.Na+self.Nc)) ] = -Bjac_c*( 1.0)
# # j vs phi_e
# gz00[ numpy.ix_(range(self.Na), range(self.Na+self.Nc,self.Na+self.Nc+self.Na)) ] = -Bjac_a*(-1.0)
# gz00[ numpy.ix_(range(self.Na,self.Na+self.Nc), range(self.Na+self.Nc+self.Na+self.Ns,self.Na+self.Nc+self.N)) ] = -Bjac_c*(-1.0)
# gz = gz0 + gz00
# return fx, fz, gx, gz
# def cn_solver( self, x, z, Cur_vec, delta_t ) :
# """
# Crank-Nicholson solver for marching through time
# """
# Cur_prev, Cur, Cur_nxt = Cur_vec[0], Cur_vec[1], Cur_vec[2]
# maxIters = 10
# tol = 1e-4
# Nx = self.N+self.Na+self.Nc
# Nz = self.Na + self.Nc + self.N + self.Na + self.Nc
# x_nxt = numpy.zeros( (Nx,maxIters), dtype='d' )
# z_nxt = numpy.zeros( (Nz,maxIters), dtype='d' )
# relres = numpy.zeros( maxIters, dtype='d' )
# relres[0] = 1.0
# var_flag = {'lim_on':0}
# # Solve for consistent ICs
# if Cur != Cur_prev :
# z_cons = numpy.zeros( (Nz, maxIters), dtype='d' )
# z_cons[:,0] = deepcopy(z)
# junk_f, g, mats = self.dae_system( x, z, Cur, get_mats=1 )
# for idx in range(maxIters-1) :
# (junk_fx, junk_fz, junk_gx, g_z) = self.jac_system( mats )
# Delta_z = -sparseSolve( sparseMat(g_z), g )
# z_cons[:,idx+1] = z_cons[:,idx] + Delta_z
# relres_z = numpy.linalg.norm(Delta_z,numpy.inf) / numpy.linalg.norm(z,numpy.inf)
# if relres_z < tol :
# break
# elif idx == maxIters-1 :
# print(('Warning: Max Newton iterations reached for consistency | RelChange=',relres_z*100.0))
# z = z_cons[:,idx+1]
# #print Cur
# f, g = self.dae_system( deepcopy(x), deepcopy(z), Cur )
# x_nxt[:,0] = deepcopy(x)
# z_nxt[:,0] = deepcopy(z)
#
# # plt.figure(1)
# # plt.plot( x_nxt[:,0] )
# # plt.plot( z_nxt[:,0] )
# # plt.show()
# for idx in range(maxIters-1) :
# f_nxt, g_nxt, mats = self.dae_system( x_nxt[:,idx], z_nxt[:,idx], Cur_nxt, get_mats=1 )
## print 'x:',x.shape
## print 'xnxt:',x_nxt[:,idx].shape
## print 'f:',f.shape
## print 'fnxt:',f_nxt.shape
## print 'z:', z.shape
## print 'g:', g.shape
## print 'znxt:', z_nxt[:,idx].shape
## print 'gnxt:', g_nxt.shape
# F1 = x - x_nxt[:,idx] + delta_t/2.*( f+f_nxt )
# F2 = g_nxt
# F = numpy.concatenate( (F1, F2), axis=0 )
# fx, fz, gx, gz = self.jac_system( mats )
# jmat = numpy.concatenate( (numpy.concatenate( (fx, fz), axis=1 ),
# numpy.concatenate( (gx, gz), axis=1 )) )
## self.Input = Cur_nxt
## jmat_num = compute_deriv( self.dae_system_num, numpy.concatenate( (x_nxt[:,idx], z_nxt[:,idx]) ) )
## fx_num = jmat_num[:self.num_diff_vars,:self.num_diff_vars]
## fz_num = jmat_num[:self.num_diff_vars,self.num_diff_vars:]
## gx_num = jmat_num[self.num_diff_vars:,:self.num_diff_vars]
## gz_num = jmat_num[self.num_diff_vars:,self.num_diff_vars:]
## F1x_num = -sparse.eye(len(x)) + delta_t/2. * fx_num
## F1z_num = delta_t/2. * fz_num
# F1_x = -sparse.eye(len(x)) + delta_t/2. * fx
# F1_z = delta_t/2. * fz
# F2_x = gx
# F2_z = gz
# J = numpy.concatenate( (numpy.concatenate( (F1_x, F1_z), axis=1 ),
# numpy.concatenate( (F2_x, F2_z), axis=1 )) )
## Jnum = numpy.concatenate( (numpy.concatenate( (F1x_num, F1z_num), axis=1 ),
## numpy.concatenate( (gx_num , gz_num ), axis=1 )) )
# Jsp = sparseMat( J )
## Jspnum = sparseMat( Jnum )
## Delta_y = -sparseSolve( Jspnum, F )
# Delta_y = -sparseSolve( Jsp, F )
# x_nxt[:,idx+1] = x_nxt[:,idx] + Delta_y[:Nx]
# z_nxt[:,idx+1] = z_nxt[:,idx] + Delta_y[Nx:]
# # plt.figure(1)
# # plt.plot(Delta_y)
# # plt.figure(2)
# # plt.plot(x_nxt[:,idx])
# # plt.plot(x_nxt[:,idx+1])
#
## plt.show()
# y = numpy.concatenate( (x_nxt[:,idx+1], z_nxt[:,idx+1]), axis=0 )
# relres[idx+1] = numpy.linalg.norm( Delta_y, numpy.inf ) / numpy.linalg.norm( y, numpy.inf )
# if (relres[idx+1]<tol) and (numpy.linalg.norm(F, numpy.inf)<tol) :
# break
# elif idx==maxIters-1 :
# print( ('Warning: Max Newton iterations reached in main CN loop | RelChange = ',relres[-1]*100.0) )
# x_nxtf = x_nxt[:,idx+1]
# z_nxtf = z_nxt[:,idx+1]
# newtonStats = {'var_flag':var_flag}
# newtonStats['iters'] = idx
# newtonStats['relres'] = relres
## jm1_sp = sps.csr_matrix(jmat)
## jm2_sp = sps.csr_matrix(jmat_num)
## fig, ax = plt.subplots(1,2)
## ax[0].spy( jm1_sp )
## ax[1].spy( jm2_sp )
## plt.show()
# return x_nxtf, z_nxtf, newtonStats
|
gpl-3.0
|
volodymyrss/3ML
|
threeML/utils/time_series/time_series.py
|
1
|
24288
|
__author__='grburgess'
import collections
import copy
import os
import numpy as np
import pandas as pd
from pandas import HDFStore
from threeML.config.config import threeML_config
from threeML.exceptions.custom_exceptions import custom_warnings
from threeML.io.file_utils import sanitize_filename
from threeML.io.progress_bar import progress_bar
from threeML.io.rich_display import display
from threeML.utils.binner import TemporalBinner
from threeML.utils.time_interval import TimeIntervalSet
from threeML.utils.time_series.polynomial import polyfit, unbinned_polyfit, Polynomial
from threeML.plugins.OGIP.response import InstrumentResponse
from threeML.plugins.spectrum.binned_spectrum import Quality
class ReducingNumberOfThreads(Warning):
pass
class ReducingNumberOfSteps(Warning):
pass
class OverLappingIntervals(RuntimeError):
pass
# find out how many splits we need to make
def ceildiv(a, b):
return -(-a // b)
class TimeSeries(object):
def __init__(self, start_time,stop_time, n_channels ,native_quality=None,
first_channel=1, ra=None, dec=None, mission=None, instrument=None, verbose=True):
"""
The EventList is a container for event data that is tagged in time and in PHA/energy. It handles event selection,
temporal polynomial fitting, temporal binning, and exposure calculations (in subclasses). Once events are selected
and/or polynomials are fit, the selections can be extracted via a PHAContainer which is can be read by an OGIPLike
instance and translated into a PHA instance.
:param n_channels: Number of detector channels
:param start_time: start time of the event list
:param stop_time: stop time of the event list
:param first_channel: where detchans begin indexing
:param rsp_file: the response file corresponding to these events
:param arrival_times: list of event arrival times
:param energies: list of event energies or pha channels
:param native_quality: native pha quality flags
:param mission:
:param instrument:
:param verbose:
:param ra:
:param dec:
"""
self._verbose = verbose
self._n_channels = n_channels
self._first_channel = first_channel
self._native_quality = native_quality
# we haven't made selections yet
self._time_intervals = None
self._poly_intervals = None
self._counts = None
self._poly_counts = None
self._poly_count_err= None
if native_quality is not None:
assert len(native_quality) == n_channels, "the native quality has length %d but you specified there were %d channels"%(len(native_quality), n_channels)
self._start_time = start_time
self._stop_time = stop_time
# name the instrument if there is not one
if instrument is None:
custom_warnings.warn('No instrument name is given. Setting to UNKNOWN')
self._instrument = "UNKNOWN"
else:
self._instrument = instrument
if mission is None:
custom_warnings.warn('No mission name is given. Setting to UNKNOWN')
self._mission = "UNKNOWN"
else:
self._mission = mission
self._user_poly_order = -1
self._time_selection_exists = False
self._poly_fit_exists = False
self._fit_method_info = {"bin type": None, 'fit method': None}
def set_active_time_intervals(self, *args):
raise RuntimeError("Must be implemented in subclass")
@property
def poly_fit_exists(self):
return self._poly_fit_exists
@property
def n_channels(self):
return self._n_channels
@property
def poly_intervals(self):
return self._poly_intervals
@property
def polynomials(self):
""" Returns polynomial is they exist"""
if self._poly_fit_exists:
return self._polynomials
else:
RuntimeError('A polynomial fit has not been made.')
def get_poly_info(self):
"""
Return a pandas panel frame with the polynomial coeffcients
and errors
Returns:
a DataFrame
"""
if self._poly_fit_exists:
coeff = []
err = []
for poly in self._polynomials:
coeff.append(poly.coefficients)
err.append(poly.error)
df_coeff = pd.DataFrame(coeff)
df_err = pd.DataFrame(err)
# print('Coefficients')
#
# display(df_coeff)
#
# print('Coefficient Error')
#
# display(df_err)
pan = pd.Panel({'coefficients': df_coeff, 'error': df_err})
return pan
else:
RuntimeError('A polynomial fit has not been made.')
def get_total_poly_count(self, start, stop, mask=None):
"""
Get the total poly counts
:param start:
:param stop:
:return:
"""
if mask is None:
mask = np.ones_like(self._polynomials, dtype=np.bool)
total_counts = 0
for p in np.asarray(self._polynomials)[mask]:
total_counts += p.integral(start, stop)
return total_counts
def get_total_poly_error(self, start, stop, mask=None):
"""
Get the total poly error
:param start:
:param stop:
:return:
"""
if mask is None:
mask = np.ones_like(self._polynomials, dtype=np.bool)
total_counts = 0
for p in np.asarray(self._polynomials)[mask]:
total_counts += p.integral_error(start, stop) ** 2
return np.sqrt(total_counts)
@property
def bins(self):
if self._temporal_binner is not None:
return self._temporal_binner
else:
raise RuntimeError('This EventList has no binning specified')
def bin_by_significance(self, start, stop, sigma, mask=None, min_counts=1):
"""
Interface to the temporal binner's significance binning model
:param start: start of the interval to bin on
:param stop: stop of the interval ot bin on
:param sigma: sigma-level of the bins
:param mask: (bool) use the energy mask to decide on significance
:param min_counts: minimum number of counts per bin
:return:
"""
if mask is not None:
# create phas to check
phas = np.arange(self._first_channel, self._n_channels)[mask]
this_mask = np.zeros_like(self._arrival_times, dtype=np.bool)
for channel in phas:
this_mask = np.logical_or(this_mask, self._energies == channel)
events = self._arrival_times[this_mask]
else:
events = copy.copy(self._arrival_times)
events = events[np.logical_and(events <= stop, events >= start)]
tmp_bkg_getter = lambda a, b: self.get_total_poly_count(a, b, mask)
tmp_err_getter = lambda a, b: self.get_total_poly_error(a, b, mask)
# self._temporal_binner.bin_by_significance(tmp_bkg_getter,
# background_error_getter=tmp_err_getter,
# sigma_level=sigma,
# min_counts=min_counts)
self._temporal_binner = TemporalBinner.bin_by_significance(events,
tmp_bkg_getter,
background_error_getter=tmp_err_getter,
sigma_level=sigma,
min_counts=min_counts)
def bin_by_constant(self, start, stop, dt=1):
"""
Interface to the temporal binner's constant binning mode
:param start: start time of the bins
:param stop: stop time of the bins
:param dt: temporal spacing of the bins
:return:
"""
events = self._arrival_times[np.logical_and(self._arrival_times >= start, self._arrival_times <= stop)]
self._temporal_binner = TemporalBinner.bin_by_constant(events, dt)
def bin_by_custom(self, start, stop):
"""
Interface to temporal binner's custom bin mode
:param start: start times of the bins
:param stop: stop times of the bins
:return:
"""
self._temporal_binner = TemporalBinner.bin_by_custom(start, stop)
#self._temporal_binner.bin_by_custom(start, stop)
def bin_by_bayesian_blocks(self, start, stop, p0, use_background=False):
events = self._arrival_times[np.logical_and(self._arrival_times >= start, self._arrival_times <= stop)]
#self._temporal_binner = TemporalBinner(events)
if use_background:
integral_background = lambda t: self.get_total_poly_count(start, t)
self._temporal_binner = TemporalBinner.bin_by_bayesian_blocks(events,
p0,
bkg_integral_distribution=integral_background)
else:
self._temporal_binner = TemporalBinner.bin_by_bayesian_blocks(events,
p0)
def __set_poly_order(self, value):
""" Set poly order only in allowed range and redo fit """
assert type(value) is int, "Polynomial order must be integer"
assert -1 <= value <= 4, "Polynomial order must be 0-4 or -1 to have it determined"
self._user_poly_order = value
if self._poly_fit_exists:
print('Refitting background with new polynomial order (%d) and existing selections' % value)
if self._time_selection_exists:
self.set_polynomial_fit_interval(*self._poly_intervals.to_string().split(','), unbinned=self._unbinned)
else:
RuntimeError("This is a bug. Should never get here")
def ___set_poly_order(self, value):
""" Indirect poly order setter """
self.__set_poly_order(value)
def __get_poly_order(self):
""" get the poly order """
return self._optimal_polynomial_grade
def ___get_poly_order(self):
""" Indirect poly order getter """
return self.__get_poly_order()
poly_order = property(___get_poly_order, ___set_poly_order,
doc="Get or set the polynomial order")
@property
def time_intervals(self):
"""
the time intervals of the events
:return:
"""
return self._time_intervals
def exposure_over_interval(self, tmin, tmax):
""" calculate the exposure over a given interval """
raise RuntimeError("Must be implemented in sub class")
def counts_over_interval(self, start, stop):
"""
return the number of counts in the selected interval
:param start: start of interval
:param stop: stop of interval
:return:
"""
# this will be a boolean list and the sum will be the
# number of events
raise RuntimeError("Must be implemented in sub class")
def set_polynomial_fit_interval(self, *time_intervals, **options):
"""Set the time interval to fit the background.
Multiple intervals can be input as separate arguments
Specified as 'tmin-tmax'. Intervals are in seconds. Example:
set_polynomial_fit_interval("-10.0-0.0","10.-15.")
:param time_intervals: intervals to fit on
:param options:
"""
# Find out if we want to binned or unbinned.
# TODO: add the option to config file
if 'unbinned' in options:
unbinned = options.pop('unbinned')
assert type(unbinned) == bool, 'unbinned option must be True or False'
else:
# assuming unbinned
# could use config file here
# unbinned = threeML_config['ogip']['use-unbinned-poly-fitting']
unbinned = True
# we create some time intervals
poly_intervals = TimeIntervalSet.from_strings(*time_intervals)
# adjust the selections to the data
for time_interval in poly_intervals:
t1 = time_interval.start_time
t2 = time_interval.stop_time
if t1 < self._start_time:
custom_warnings.warn(
"The time interval %f-%f started before the first arrival time (%f), so we are changing the intervals to %f-%f" % (
t1, t2, self._start_time, self._start_time, t2))
t1 = self._start_time
if t2 > self._stop_time:
custom_warnings.warn(
"The time interval %f-%f ended after the last arrival time (%f), so we are changing the intervals to %f-%f" % (
t1, t2, self._stop_time, t1, self._stop_time))
t2 = self._stop_time
if (self._stop_time <= t1) or (t2 <= self._start_time):
custom_warnings.warn(
"The time interval %f-%f is out side of the arrival times and will be dropped" % (
t1, t2))
continue
# set the poly intervals as an attribute
self._poly_intervals = poly_intervals
# Fit the events with the given intervals
if unbinned:
self._unbinned = True # keep track!
self._unbinned_fit_polynomials()
else:
self._unbinned = False
self._fit_polynomials()
# we have a fit now
self._poly_fit_exists = True
if self._verbose:
print("%s %d-order polynomial fit with the %s method" % (
self._fit_method_info['bin type'], self._optimal_polynomial_grade, self._fit_method_info['fit method']))
print('\n')
# recalculate the selected counts
if self._time_selection_exists:
self.set_active_time_intervals(*self._time_intervals.to_string().split(','))
def get_information_dict(self, use_poly=False):
"""
Return a PHAContainer that can be read by different builders
:param use_poly: (bool) choose to build from the polynomial fits
"""
if not self._time_selection_exists:
raise RuntimeError('No time selection exists! Cannot calculate rates')
if use_poly:
is_poisson = False
counts_err = self._poly_count_err
counts = self._poly_counts
rate_err = self._poly_count_err / self._exposure
rates = self._poly_counts / self._exposure
# removing negative counts
idx = counts < 0.
counts[idx] = 0.
counts_err[idx] = 0.
rates[idx] = 0.
rate_err[idx] = 0.
else:
is_poisson = True
counts_err = None
counts = self._counts
rates = self._counts / self._exposure
rate_err = None
if self._native_quality is None:
quality = np.zeros_like(counts, dtype=int)
else:
quality = self._native_quality
container_dict = {}
container_dict['instrument'] = self._instrument
container_dict['telescope'] = self._mission
container_dict['tstart'] = self._time_intervals.absolute_start_time
container_dict['telapse'] = self._time_intervals.absolute_stop_time - self._time_intervals.absolute_start_time
container_dict['channel'] = np.arange(self._n_channels) + self._first_channel
container_dict['counts'] = counts
container_dict['counts error'] = counts_err
container_dict['rates'] = rates
container_dict['rate error'] = rate_err
# check to see if we already have a quality object
if isinstance(quality, Quality):
container_dict['quality'] = quality
else:
container_dict['quality'] = Quality.from_ogip(quality)
# TODO: make sure the grouping makes sense
container_dict['backfile']='NONE'
container_dict['grouping'] = np.ones(self._n_channels)
container_dict['exposure'] = self._exposure
#container_dict['response'] = self._response
return container_dict
def __repr__(self):
"""
Examine the currently selected info as well other things.
"""
return self._output().to_string()
def _output(self):
info_dict = collections.OrderedDict()
for i, interval in enumerate(self.time_intervals):
info_dict['active selection (%d)' % (i + 1)] = interval.__repr__()
info_dict['active deadtime'] = self._active_dead_time
if self._poly_fit_exists:
for i, interval in enumerate(self.poly_intervals):
info_dict['polynomial selection (%d)' % (i + 1)] = interval.__repr__()
info_dict['polynomial order'] = self._optimal_polynomial_grade
info_dict['polynomial fit type'] = self._fit_method_info['bin type']
info_dict['polynomial fit method'] = self._fit_method_info['fit method']
return pd.Series(info_dict, index=info_dict.keys())
def _fit_global_and_determine_optimum_grade(self, cnts, bins, exposure):
"""
Provides the ability to find the optimum polynomial grade for *binned* counts by fitting the
total (all channels) to 0-4 order polynomials and then comparing them via a likelihood ratio test.
:param cnts: counts per bin
:param bins: the bins used
:param exposure: exposure per bin
:return: polynomial grade
"""
min_grade = 0
max_grade = 4
log_likelihoods = []
for grade in range(min_grade, max_grade + 1):
polynomial, log_like = polyfit(bins, cnts, grade, exposure)
log_likelihoods.append(log_like)
# Found the best one
delta_loglike = np.array(map(lambda x: 2 * (x[0] - x[1]), zip(log_likelihoods[:-1], log_likelihoods[1:])))
# print("\ndelta log-likelihoods:")
# for i in range(max_grade):
# print("%s -> %s: delta Log-likelihood = %s" % (i, i + 1, deltaLoglike[i]))
# print("")
delta_threshold = 9.0
mask = (delta_loglike >= delta_threshold)
if (len(mask.nonzero()[0]) == 0):
# best grade is zero!
best_grade = 0
else:
best_grade = mask.nonzero()[0][-1] + 1
return best_grade
def _unbinned_fit_global_and_determine_optimum_grade(self, events, exposure):
"""
Provides the ability to find the optimum polynomial grade for *unbinned* events by fitting the
total (all channels) to 0-4 order polynomials and then comparing them via a likelihood ratio test.
:param events: an event list
:param exposure: the exposure per event
:return: polynomial grade
"""
# Fit the sum of all the channels to determine the optimal polynomial
# grade
min_grade = 0
max_grade = 4
log_likelihoods = []
t_start = self._poly_intervals.start_times
t_stop = self._poly_intervals.stop_times
for grade in range(min_grade, max_grade + 1):
polynomial, log_like = unbinned_polyfit(events, grade, t_start, t_stop, exposure)
log_likelihoods.append(log_like)
# Found the best one
delta_loglike = np.array(map(lambda x: 2 * (x[0] - x[1]), zip(log_likelihoods[:-1], log_likelihoods[1:])))
delta_threshold = 9.0
mask = (delta_loglike >= delta_threshold)
if (len(mask.nonzero()[0]) == 0):
# best grade is zero!
best_grade = 0
else:
best_grade = mask.nonzero()[0][-1] + 1
return best_grade
def _fit_polynomials(self):
raise NotImplementedError('this must be implemented in a subclass')
def _unbinned_fit_polynomials(self):
raise NotImplementedError('this must be implemented in a subclass')
def save_background(self, filename, overwrite=False):
"""
save the background to an HD5F
:param filename:
:return:
"""
# make the file name proper
filename = os.path.splitext(filename)
filename = "%s.h5" % filename[0]
filename_sanitized = sanitize_filename(filename)
# Check that it does not exists
if os.path.exists(filename_sanitized):
if overwrite:
try:
os.remove(filename_sanitized)
except:
raise IOError("The file %s already exists and cannot be removed (maybe you do not have "
"permissions to do so?). " % filename_sanitized)
else:
raise IOError("The file %s already exists!" % filename_sanitized)
with HDFStore(filename_sanitized) as store:
# extract the polynomial information and save it
if self._poly_fit_exists:
coeff = []
err = []
for poly in self._polynomials:
coeff.append(poly.coefficients)
err.append(poly.covariance_matrix)
df_coeff = pd.Series(coeff)
df_err = pd.Series(err)
else:
raise RuntimeError('the polynomials have not been fit yet')
df_coeff.to_hdf(store, 'coefficients')
df_err.to_hdf(store, 'covariance')
store.get_storer('coefficients').attrs.metadata = {'poly_order': self._optimal_polynomial_grade,
'poly_selections': zip(self._poly_intervals.start_times,self._poly_intervals.stop_times),
'unbinned':self._unbinned,
'fit_method':self._fit_method_info['fit method']}
if self._verbose:
print("\nSaved fitted background to %s.\n"% filename)
def restore_fit(self, filename):
filename_sanitized = sanitize_filename(filename)
with HDFStore(filename_sanitized) as store:
coefficients = store['coefficients']
covariance = store['covariance']
self._polynomials = []
# create new polynomials
for i in range(len(coefficients)):
coeff = np.array(coefficients.loc[i])
# make sure we get the right order
# pandas stores the non-needed coeff
# as nans.
coeff = coeff[np.isfinite(coeff)]
cov = covariance.loc[i]
self._polynomials.append(Polynomial.from_previous_fit(coeff, cov))
metadata = store.get_storer('coefficients').attrs.metadata
self._optimal_polynomial_grade = metadata['poly_order']
poly_selections = np.array(metadata['poly_selections'])
self._poly_intervals = TimeIntervalSet.from_starts_and_stops(poly_selections[:,0],poly_selections[:,1])
self._unbinned = metadata['unbinned']
if self._unbinned:
self._fit_method_info['bin type'] = 'unbinned'
else:
self._fit_method_info['bin type'] = 'binned'
self._fit_method_info['fit method'] = metadata['fit_method']
# go thru and count the counts!
self._poly_fit_exists = True
if self._time_selection_exists:
self.set_active_time_intervals(*self._time_intervals.to_string().split(','))
def view_lightcurve(self, start=-10, stop=20., dt=1., use_binner=False):
raise NotImplementedError('must be implemented in subclass')
|
bsd-3-clause
|
henningjp/CoolProp
|
dev/TTSE/TimeComp.py
|
2
|
69624
|
#!/usr/bin/python
# -*- coding: ascii -*-
#
from __future__ import print_function, division
import os, sys
from os import path
import numpy as np
import CoolProp
import glob
from warnings import warn
from time import clock
import CoolProp.constants
from CoolProp.CoolProp import PropsSI, generate_update_pair, get_parameter_index, set_debug_level, get_phase_index
from CoolProp import AbstractState as State
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import copy
from itertools import cycle
from matplotlib import gridspec, ticker
#from jopy.dataPlotters import roundList, range_brace
def range_brace(x_min, x_max, mid=0.5,
beta1=50.0, beta2=100.0, height=1,
initial_divisions=11, resolution_factor=1.5):
"""
http://stackoverflow.com/questions/1289681/drawing-braces-with-pyx
x,y = range_brace(0, 100)
ax.plot(x, y,'-')
ax.plot(y, x,'-')
"""
# determine x0 adaptively values using second derivative
# could be replaced with less snazzy:
# x0 = NP.arange(0, 0.5, .001)
x0 = np.array(())
tmpx = np.linspace(0, 0.5, initial_divisions)
tmp = beta1**2 * (np.exp(beta1 * tmpx)) * (1 - np.exp(beta1 * tmpx)) / np.power((1 + np.exp(beta1 * tmpx)), 3)
tmp += beta2**2 * (np.exp(beta2 * (tmpx - 0.5))) * (1 - np.exp(beta2 * (tmpx - 0.5))) / np.power((1 + np.exp(beta2 * (tmpx - 0.5))), 3)
for i in range(0, len(tmpx) - 1):
t = int(np.ceil(resolution_factor * max(np.abs(tmp[i:i + 2])) / float(initial_divisions)))
x0 = np.append(x0, np.linspace(tmpx[i], tmpx[i + 1], t))
x0 = np.sort(np.unique(x0)) # sort and remove dups
# half brace using sum of two logistic functions
y0 = mid * 2 * ((1 / (1. + np.exp(-1 * beta1 * x0))) - 0.5)
y0 += (1 - mid) * 2 * (1 / (1. + np.exp(-1 * beta2 * (x0 - 0.5))))
# concat and scale x
x = np.concatenate((x0, 1 - x0[::-1])) * float((x_max - x_min)) + x_min
y = np.concatenate((y0, y0[::-1])) * float(height)
return (x, y)
# try:
#from jopy.dataPlotters import BasePlotter
#bp = BasePlotter()
# except:
#bp = None
bp = None
# The basic settings for he plots
xypoints = 1000
loops = 1
repeat = 1
runs = 0
maxruns = 5
plot = True
calc = True
check = True
folder = "dataTTSE"
figures = "figuresTTSE"
# np.random.seed(1984)
fluids = ["CO2", "Pentane", "R134a", "Water", "Air", "LiBr-0%"]
fluids = ["CO2", "Pentane", "R134a", "Water"]
fluids = ["Air"]
#glskeys = [r"\glsentryshort{co2}",r"\glsentryshort{pentane}",r"\glsentryshort{r134a}",r"\glsentryshort{water}",r"\glsentryshort{air}",r"\glsentryshort{libr} \SI{0}{\percent}"]
#glskeys = [r"\ce{CO2}",r"n-Ppentane",r"R134a",r"Water",r"Air",r"\glsentryshort{libr} \SI{0}{\percent}"]
repList = []
# for i in range(len(fluids)):
# repList.append(fluids[i])
# repList.append(glskeys[i])
#backends = ["INCOMP","HEOS","REFPROP"]
backends = ["HEOS", "REFPROP"]
backends = ["HEOS"]
# repList.append("HEOS")
# repList.append(r"\glsentryshort{cp}")
# repList.append("REFPROP")
# repList.append(r"\glsentryshort{rp}")
# CoolProp.CoolProp.set_debug_level(51)
pStr = path.dirname(path.abspath(__file__))
fStr = path.splitext(path.basename(__file__))[0]
def getFolderName():
folderName = path.join(pStr, folder)
if not path.isdir(folderName):
print("Creating data directory " + folderName)
os.makedirs(folderName)
return folderName
def getFigureFolder():
folderName = path.join(pStr, figures)
if not path.isdir(folderName):
print("Creating data directory " + folderName)
os.makedirs(folderName)
return folderName
repList.append("TimeComp-")
repList.append("chapters/FluidProperties/" + path.basename(getFigureFolder()) + "/TimeComp-")
def getFileName(qualifiers=[]):
fileName = path.join(getFolderName(), "-".join(qualifiers))
return fileName
# Some file handling
def loadNpzData(backend, fluid):
dicts = {}
globber = getFileName([backend, fluid]) + '_[0-9][0-9][0-9].npz'
for fname in glob.glob(globber):
dataDict = dict(np.load(fname))
dicts[str(dataDict["name"])] = dataDict
# if len(dicts)<1:
# #print("No readable file found for {0}".format(globber))
# dataDict = dict(name=str(0).zfill(3))
# dicts[str(dataDict["name"])] = dataDict
return dicts
def saveNpzData(backend, fluid, dicts, start=0, stop=-1):
keys = dicts.keys()
keys.sort()
for k in keys[start:stop]:
data = dicts[k]
fname = getFileName([backend, fluid]) + '_{0}.npz'.format(str(data['name']).zfill(3))
np.savez(fname, **data)
return True
def splitFluid(propsfluid):
fld = propsfluid.split("::")
if len(fld) == 2:
backend = fld[0]
fld = fld[1]
else:
backend = None
fld = fld[0]
fld = fld.split("-")
if len(fld) == 2:
conc = float(fld[1].strip('%')) / 100.0
fld = fld[0]
else:
conc = None
fld = fld[0]
return backend, fld, conc
def getInpList(backend):
if backend == "HEOS": return ["DT", "HP"]
elif backend == "REFPROP": return ["DT", "HP"]
elif backend == "INCOMP": return ["PT", "HP"]
else: raise ValueError("Unknown backend.")
def getOutList(inp=None):
if inp == "HP":
return [["Tmax"], ["D"], ["S"], ["T"], ["D", "S", "T"]]
elif inp == "DT":
return [["Tmax"], ["H"], ["P"], ["S"], ["H", "P", "S"]]
elif inp == "PT":
return [["Tmax"], ["H"], ["D"], ["S"], ["H", "D", "S"]]
else:
raise ValueError("Unknown inputs.")
# def getPhaseString(iPhase):
# for i in range(11):
# if getPhaseString(i)==sPhase:
# return i
# if iPhase==1: return "liquid"
# elif iPhase==2: return "supercritical"
# elif iPhase==3: return "supercritical_gas"
# elif iPhase==4: return "supercritical_liquid"
# elif iPhase==5: return "critical_point"
# elif iPhase==6: return "gas"
# elif iPhase==7: return "twophase"
# elif iPhase==8: return "unknown"
# elif iPhase==9: return "not_imposed"
# else: raise ValueError("Couldn't find phase.")
def getPhaseNum(sPhase):
return get_phase_index(sPhase)
# for i in range(11):
# if getPhaseString(i)==sPhase:
# return i
def getOutKey(out): return "".join(out)
def getOutLabel(out): return ",".join(out)
def getTimeKey(inp, out): return "_".join([inp, getOutKey(out)])
def getVectorKey(inp, out): return getTimeKey(inp, out) + "_V"
def getCriticalProps(propsfluid):
backend, _, _ = splitFluid(propsfluid)
if backend != "INCOMP":
p_crit_m = PropsSI('pcrit', "T", 0, "D", 0, propsfluid) * 0.995
T_crit_m = PropsSI('Tcrit', "T", 0, "D", 0, propsfluid) * 1.005
d_crit_m = PropsSI('rhocrit', "T", 0, "D", 0, propsfluid) * 0.995
h_crit_m = PropsSI('H', "T", T_crit_m, "D", d_crit_m, propsfluid)
s_crit_m = PropsSI('H', "T", T_crit_m, "D", d_crit_m, propsfluid)
else:
p_crit_m = None
T_crit_m = None
d_crit_m = None
h_crit_m = None
s_crit_m = None
return dict(P=p_crit_m, T=T_crit_m, D=d_crit_m, H=h_crit_m, S=s_crit_m)
def getPTRanges(propsfluid):
backend, _, _ = splitFluid(propsfluid)
# Setting the limits for enthalpy and pressure
T_min = PropsSI('Tmin', "T", 0, "D", 0, propsfluid) + 1
T_max = PropsSI('Tmax', "T", 0, "D", 0, propsfluid) - 1
if backend == "REFPROP":
T_min = max(T_min, PropsSI('Ttriple', "T", 0, "D", 0, propsfluid)) + 1
p_min = PropsSI('P', "T", T_min, "Q", 0, propsfluid) + 1
p_max = PropsSI('pmax', "T", 0, "D", 0, propsfluid) - 1
elif backend == "INCOMP":
p_min = 1.5 * 1e5
p_max = 200.0 * 1e5
else:
T_min = max(T_min, PropsSI('Ttriple', "T", 0, "D", 0, propsfluid)) + 1
p_min = PropsSI('ptriple', "T", 0, "D", 0, propsfluid)
p_min = max(p_min, PropsSI('pmin', "T", 0, "D", 0, propsfluid)) + 1
p_max = PropsSI('pmax', "T", 0, "D", 0, propsfluid) - 1
# One more check to debug things:
#p_min = max(p_min,0.01e5)
#T_min = max(T_min,200)
#p_max = min(p_max,200e5)
#T_max = min(T_max,1750)
p_range = np.logspace(np.log10(p_min), np.log10(p_max), xypoints)
T_range = np.linspace(T_min, T_max, xypoints)
return p_range, T_range
#p_max = min(PropsSI('pcrit',"T",0,"D",0,fluid)*20, p_max)
#T_max = min(PropsSI('Tcrit',"T",0,"D",0,fluid)* 3, T_max)
def getLists(propsfluid):
backend, _, _ = splitFluid(propsfluid)
"""Returns randomised lists of all properties within the ranges"""
p, T = getPTRanges(propsfluid)
p_min = np.min(p)
p_max = np.max(p)
T_min = np.min(T)
T_max = np.max(T)
if backend == "INCOMP":
h_min = PropsSI('H', 'T', T_min, 'P', p_min, propsfluid)
h_max = PropsSI('H', 'T', T_max, 'P', p_max, propsfluid)
else:
critProps = getCriticalProps(propsfluid)
h_min = PropsSI('H', 'T', T_min, 'Q', 0, propsfluid)
h_max = PropsSI('H', 'T', T_min, 'Q', 1, propsfluid)
h_max = max(PropsSI('H', 'T', critProps["T"], 'D', critProps["D"], propsfluid), h_max)
h_max = (h_max - h_min) * 2.0 + h_min
loop = True
count = 0
while loop:
count += 1
h_list = np.random.uniform(h_min, h_max, int(xypoints * 2.0))
p_list = np.random.uniform(np.log10(p_min), np.log10(p_max), int(xypoints * 2.0))
p_list = np.power(10, p_list)
out = ["T", "D", "S"]
res = PropsSI(out, "P", p_list, "H", h_list, propsfluid)
T_list = res[:, 0]
d_list = res[:, 1]
s_list = res[:, 2]
mask = np.isfinite(T_list) & np.isfinite(d_list) & np.isfinite(s_list)
if np.sum(mask) < xypoints:
if False:
print(h_list); print(p_list); print(T_list); print(d_list); print(s_list)
print("There were not enough valid entries in your result vector: {0:d} > {1:d} - rerunning".format(xypoints, np.sum(mask)))
loop = True
else:
loop = False
p_list = p_list[mask][0:xypoints]
h_list = h_list[mask][0:xypoints]
T_list = T_list[mask][0:xypoints]
d_list = d_list[mask][0:xypoints]
s_list = s_list[mask][0:xypoints]
return dict(P=p_list, T=T_list, D=d_list, H=h_list, S=s_list)
maxTries = 4
if count > maxTries:
loop = False
raise ValueError("{0}: Could not fill the lists in {0} runs, aborting.".format(propsfluid, maxTries))
def getInpValues(inp, dataDict):
in1 = inp[0]
in2 = dataDict[in1]
in3 = inp[1]
in4 = dataDict[in3]
return in1, in2, in3, in4
def getStateObj(propsfluid):
backend, fld, conc = splitFluid(propsfluid)
# fluidstr holds the full information and fluid is only the name
# Initialise the state object
if backend is not None:
state = State(backend, fld)
else:
state = State(fld)
# if backend=="INCOMP":
# state.set_mass_fractions([0.0])
if conc is not None:
try:
state.set_mass_fractions([conc])
except:
pass
return state
def getSpeedMeas(out, in1, in2, in3, in4, propsfluid, vector=False):
pair, out1, _ = generate_update_pair(get_parameter_index(in1), in2[0], get_parameter_index(in3), in4[0])
if out1 == in2[0]: swap = False
else: swap = True
if swap:
input1 = in4
input2 = in2
else:
input1 = in2
input2 = in4
state = getStateObj(propsfluid)
outList = [get_parameter_index(i) for i in out]
resLst = np.empty((repeat,))
timLst = np.empty((repeat,))
if vector:
for j in range(repeat):
timLst.fill(np.inf)
lrange = range(len(input1))
resTmp = np.inf
if len(outList) == 1 and outList[0] == CoolProp.constants.iT_max:
t1 = clock()
for l in lrange:
for o in outList:
resTmp = state.keyed_output(o)
t2 = clock()
timLst[j] = (t2 - t1) * 1e6 / float(len(input1))
else: # We have to update before doing other things
t1 = clock()
for l in lrange:
state.update(pair, input1[l], input2[l])
for o in outList:
resTmp = state.keyed_output(o)
t2 = clock()
timLst[j] = (t2 - t1) * 1e6 / float(len(input1))
res = None
tim = np.min(timLst) # Best of (repeat)
return res, tim
else:
res = np.empty_like(input1)
res.fill(np.inf)
tim = np.empty_like(input1)
tim.fill(np.inf)
for i in range(len(input1)):
resLst.fill(np.inf)
timLst.fill(np.inf)
for j in range(repeat):
lrange = range(loops)
resTmp = np.inf
if len(outList) == 1 and outList[0] == CoolProp.constants.iT_max:
t1 = clock()
for _ in lrange:
for o in outList:
resTmp = state.keyed_output(o)
t2 = clock()
timLst[j] = (t2 - t1) * 1e6 / float(loops)
resLst[j] = resTmp
else: # We have to update before doing other things
inV1 = input1[i]
inV2 = input2[i] # *(1.0+(l/1000.0)*pow(-1,l)) for l in lrange ]
t1 = clock()
for l in lrange:
state.update(pair, inV1, inV2)
for o in outList:
resTmp = state.keyed_output(o)
t2 = clock()
timLst[j] = (t2 - t1) * 1e6 / float(loops)
resLst[j] = resTmp
if not np.all(resLst == resLst[0]):
raise ValueError("Not all results were the same.")
res[i] = resLst[0]
tim[i] = np.min(timLst) # Best of three (repeat)
return res, tim
def checkDataSet(propsfluid, dataDict, fill=True, quiet=False):
if not check: return
backend, _, _ = splitFluid(propsfluid)
if not quiet: print("\n\n-- {0:16s} --".format(propsfluid), end="")
# Test for required inputs
newInputs = False
inLists = getLists(propsfluid)
for inp in getInpList(backend):
if not quiet: print("\n{0:2s}: ".format(inp), end="")
for inVal in inp:
if inVal not in dataDict: # A problem
if not fill:
raise ValueError("The input {0:1s} is missing or faulty, cannot continue.".format(inVal))
#dataDict[inVal] = inLists[inVal]
dataDict.update(inLists)
newInputs = True
if not quiet: print("{0:s}*({1:d}),".format(inVal, len(dataDict[inVal])), end="")
else:
if not quiet: print("{0:s} ({1:d}),".format(inVal, len(dataDict[inVal])), end="")
# All inputs are there
in1, in2, in3, in4 = getInpValues(inp, dataDict)
#in2 = in2[:3]
#in4 = in4[:3]
if in2.shape != in4.shape:
raise ValueError("The stored data for {0:s} and {1:s} do not have the same shape.".format(in1, in3))
if in2.shape != inLists[in1].shape:
raise ValueError("The stored data for {0:s} and its list do not have the same shape {1} vs {2}.".format(in1, in2.shape, inLists[in1].shape))
# Check for time data
for out in getOutList(inp):
key = getTimeKey(inp, out)
okey = getOutKey(out)
if key not in dataDict or newInputs or not np.all(np.isfinite(dataDict[key])):
if not fill:
raise ValueError("The time data for {0:s} is missing or faulty, cannot continue.".format(key))
res, tim = getSpeedMeas(out, in1, in2, in3, in4, propsfluid)
dataDict[key] = tim
dataDict[okey] = res # We calculated in, why not use it here...
if not quiet: print("{0:s}*({1:d}),".format(key, len(dataDict[key])), end="")
else:
if not quiet: print("{0:s} ({1:d}),".format(key, len(dataDict[key])), end="")
if dataDict[key].shape != in2.shape or not np.all(np.isfinite(dataDict[key])):
raise ValueError("The stored time data for {0:s} does not have the same shape as the inputs.".format(key))
# Check for vectors
for out in getOutList(inp):
key = getVectorKey(inp, out)
if key not in dataDict or not np.all(np.isfinite(dataDict[key])):
if not fill:
raise ValueError("The fluid data for {0:s} is missing or faulty, cannot continue.".format(key))
res, tim = getSpeedMeas(out, in1, in2, in3, in4, propsfluid, vector=True)
dataDict[key] = tim
if not quiet: print("{0:s}*({1:d}),".format(key, dataDict[key].size), end="")
else:
if not quiet: print("{0:s} ({1:d}),".format(key, dataDict[key].size), end="")
if dataDict[key].size != 1 or not np.all(np.isfinite(dataDict[key])):
raise ValueError("The vector data for {0:s} does not have the correct size {1}..".format(key, dataDict[key].size))
# inp = getInpList(backend)[0] # Explicit calls
# # Check for properties
# for out in getOutList(inp)[:-1]:
# key = getOutKey(out)
# if key not in dataDict or not np.all(np.isfinite(dataDict[key])):
# if not fill:
# raise ValueError("The fluid data for {0:s} is missing or faulty, cannot continue.".format(key))
# res = PropsSI(out,in1,in2,in3,in4,propsfluid)
# dataDict[key] = res
# if not quiet: print("{0:s}*({1:d}),".format(key,len(dataDict[key])),end="")
# else:
# if not quiet: print("{0:s} ({1:d}),".format(key,len(dataDict[key])),end="")
# if dataDict[key].shape != in2.shape or not np.all(np.isfinite(dataDict[key])):
# raise ValueError("The stored data for {0:s} does not have the same shape as the inputs {1} vs {2}..".format(key,dataDict[key].shape,in2.shape))
# # Check for phase
# for out in ["Phase"]:
# if backend!="HEOS":
# dataDict[key] = np.zeros_like(a, dtype, order, subok)
# key = getOutKey(out)
# if key not in dataDict or newInputs or not np.all(np.isfinite(dataDict[key])):
# if not fill:
# raise ValueError("The phase data for {0:s} is missing or faulty, cannot continue.".format(key))
# res = np.empty_like(in2)
# res.fill(np.inf)
# for i in range(len(in2)):
# res[i] = PropsSI(out,in1,in2[i],in3,in4[i],propsfluid)
# dataDict[key] = res
# if not quiet: print("{0:s}*({1:d}),".format(key,len(dataDict[key])),end="")
# else:
# if not quiet: print("{0:s} ({1:d}),".format(key,len(dataDict[key])),end="")
# if dataDict[key].shape != in2.shape or not np.all(np.isfinite(dataDict[key])):
# raise ValueError("The stored data for {0:s} does not have the same shape as the inputs {1} vs {2}..".format(key,dataDict[key].shape,in2.shape))
#
# # Now we use the vector data
# key = getVectorKey(inp, out)
# if key not in dataDict or not np.all(np.isfinite(dataDict[key])):
# if not fill:
# raise ValueError("The vector data for {0:s} is missing or faulty, cannot continue.".format(key))
# dataDict[key] = np.empty_like(in2)
# dataDict[key].fill(np.inf)
# res = []
# for _ in range(repeat):
# t1=clock()
# PropsSI(out,in1,in2,in3,in4,propsfluid)
# t2=clock()
# res.append((t2-t1)/float(len(in2)))
# dataDict[key] = np.min(res)
# if not quiet: print("{0:s}*({1}),".format(key,dataDict[key]),end="")
# else:
# if not quiet: print("{0:s} ({1}),".format(key,dataDict[key]),end="")
# try:
# float(dataDict[key])
# except:
# raise ValueError("The stored vector data for {0:s} cannot be casted to float.".format(key))
# if not quiet: print("")
# All data is loaded and checked, we can calculate more now
def getEntryCount(dicts, backend, fld):
return len(fluidData[fld][backend].keys())
def getUKey(fld, bck, inp, out):
return "-".join([fld, bck, inp, "".join(out)])
def getData(fld, backend, inp, out, fluidData):
inputs1 = []
inputs2 = []
values = []
times = []
i1key = inp[0]
i2key = inp[1]
vkey = getOutKey(out)
tkey = getTimeKey(inp, out)
for dkey in fluidData[fld][backend]:
cData = fluidData[fld][backend][dkey]
inputs1.append(cData[i1key])
inputs2.append(cData[i2key])
values.append(cData[vkey])
times.append(cData[tkey])
ret = {}
if len(inputs1) > 0:
ret[i1key] = np.concatenate(inputs1)
ret[i2key] = np.concatenate(inputs2)
ret[vkey] = np.concatenate(values)
ret[tkey] = np.concatenate(times)
return ret
def getSingleData(fld, backend, key, fluidData):
#print("Getting: "+fld+", "+backend+", "+key)
values = []
for dkey in fluidData[fld][backend]:
if key in fluidData[fld][backend][dkey]:
if "P" in fluidData[fld][backend][dkey]:
# TODO: Fix this, do we need the mask?
#mask = fluidData[fld][backend][dkey]["P"]>0.3e5
mask = fluidData[fld][backend][dkey]["P"] > 0.0e5
try:
values.append(fluidData[fld][backend][dkey][key][mask])
except Exception as e:
values.append(fluidData[fld][backend][dkey][key])
print(e)
pass
else:
values.append(fluidData[fld][backend][dkey][key])
if len(values) > 0:
if np.size(values[0]) > 1:
return np.concatenate(values)
else:
return np.array(values)
return None
def fillDict(fld, backend, fluidData, curDict, curKeys):
if curDict is None: curDict = {}
for key in curKeys:
vals = getSingleData(fld, backend, key, fluidData)
if vals is not None: curDict[key] = vals
return curDict
################################################
fluidData = {}
for fluidstr in fluids:
_, fld, _ = splitFluid(fluidstr)
if fld not in fluidData: fluidData[fld] = {}
for backend in backends:
if backend not in fluidData[fld]: # Try to add it
propsfluid = "::".join([backend, fluidstr])
try:
PropsSI('Tmax', "T", 0, "D", 0, propsfluid)
fluidData[fld][backend] = loadNpzData(backend, fld)
except:
pass
if backend in fluidData[fld]:
for k in fluidData[fld][backend]:
checkDataSet(propsfluid, fluidData[fld][backend][k], fill=False, quiet=True)
else: # Already added backend for fluid
pass
lastSave = 0
while runs < maxruns and calc:
check = True # force checking for new records
runs += 1
# Now we have the data structure with the precalculated data
for fluidstr in fluids:
_, fld, _ = splitFluid(fluidstr)
for backend in fluidData[fld]:
propsfluid = "::".join([backend, fluidstr])
dicts = fluidData[fld][backend]
keys = list(dicts.keys())
keys.sort()
while len(keys) < runs:
if len(keys) < 1: newKey = 0
else: newKey = int(keys[-1]) + 1
if newKey > 999:
raise ValueError("Too many dicts: {0}>999".format(newKey))
k = str(newKey).zfill(3)
dicts[k] = {}
dicts[k]['name'] = k
try:
checkDataSet(propsfluid, dicts[k], fill=True, quiet=False)
except Exception as e:
print("There was an error, dataset {0} from {1}.might be faulty:\n{2}".format(k, propsfluid, str(e)))
pass
todel = []
# for k in dicts:
try:
checkDataSet(propsfluid, dicts[k], fill=False, quiet=True)
except Exception as e:
print("There was an error, removing dataset {0} from {1}.\n{2}".format(k, propsfluid, str(e)))
todel.append(k)
for t in todel: del dicts[t]
keys = list(dicts.keys())
keys.sort()
# Updated all dicts for this backend, saving data
if runs >= maxruns or (lastSave + 4) < runs:
print("\n\nDone processing fluids, saving data: ")
for fld in fluidData:
for backend in fluidData[fld]:
saveNpzData(backend, fld, fluidData[fld][backend], start=lastSave, stop=runs)
print("{0} ({1})".format(backend + "::" + fld, len(fluidData[fld][backend].keys()[lastSave:runs])), end=", ")
print("")
lastSave = runs
if not plot: sys.exit(0)
# exclLst = [["Tmax"],["H"],["D"],["S"],["H","D","S"],["D"],["S"],["T"],["D","S","T"]]
#
# Start with a temporary dictionary that holds all the data we need
# for fld in fluidData:
# cstData = {} # Data from calls to constants (overhead)
# expData = {} # Data from explicit EOS calls
# impData = {} # Data from EOS calls that require iterations
# for backend in fluidData[fld]:
#
# curKeys = []
# for inp in getInpList(backend):
# for out in getOutList(inp)[:1]:
# curKeys.append(getTimeKey( inp, out))
# curKeys.append(getVectorKey(inp, out))
# curDict = {}
# fillDict(fld,backend,fluidData,curDict,curKeys)
# cstData[backend] = curDict
#
# curKeys = []
# for inp in getInpList(backend)[:1]:
# for out in getOutList(inp)[1:]:
# curKeys.append(getTimeKey( inp, out))
# curKeys.append(getVectorKey(inp, out))
# curDict = {}
# fillDict(fld,backend,fluidData,curDict,curKeys)
# expData[backend] = curDict
#
# curKeys = []
# for inp in getInpList(backend)[1:]:
# for out in getOutList(inp)[1:]:
# curKeys.append(getTimeKey( inp, out))
# curKeys.append(getVectorKey(inp, out))
# curDict = {}
# fillDict(fld,backend,fluidData,curDict,curKeys)
# impData[backend] = curDict
# curDict[backend] = {}
# for key in :
# vals = getSingleData(fld, backend, key, fluidData)
# if vals is not None: curDict[backend][key] = vals
# if curDict
#
# cstData[backend] = {}
# for out in getOutList(inp[0])[0]:
# res = getData(fld,backend,inp[0],out,fluidData)
# cstData[backend].update(res)
# for out in getOutList(inp[1])[0]:
# res = getData(fld,backend,inp[1],out,fluidData)
# cstData[backend].update
#
# expData[backend] = {}
# for out in getOutList(inp[0])[1:]:
# res = getData(fld,backend,inp[0],out,fluidData)
# expData[backend].update(res)
#
# impData[backend] = {}
# for out in getOutList(inp[1])[1:]:
# res = getData(fld,backend,inp[1],out,fluidData)
# impData[backend].update(res)
#############################################################
# All data is available in the dicts now.
#############################################################
# The first thuing to do is to print some statistical
# measures to give you an idea about the data.
# try:
# #dataOHCP = [cstData["HEOS"]["DT_Tmax"] , cstData["HEOS"]["HP_Tmax"] ]
# #dataOHRP = [cstData["REFPROP"]["DT_Tmax"], cstData["REFPROP"]["HP_Tmax"]]
# print("\n{0} - {1} points ".format(fld,np.size(cstData["HEOS"]["DT_Tmax"])))
# print("Overhead CoolProp: {0:5.3f} us".format(np.mean(cstData["HEOS"]["DT_Tmax"])))#, np.mean(cstData["HEOS"]["HP_Tmax"]))
# print("Overhead REFPROP : {0:5.3f} us".format(np.mean(cstData["REFPROP"]["DT_Tmax"])))#,np.mean(cstData["REFPROP"]["HP_Tmax"]))
# print("Mean EOS in CoolProp: f(rho,T): {0:9.3f} us, f(h,p): {1:9.3f} us".format(np.mean(expData["HEOS"]["DT_HPS"]),np.mean(impData["HEOS"]["HP_DST"])))
# print("Std. dev. in CoolProp: f(rho,T): {0:9.3f} us, f(h,p): {1:9.3f} us".format(np.std(expData["HEOS"]["DT_HPS"]) ,np.std(impData["HEOS"]["HP_DST"])))
# print("Minimum in CoolProp: f(rho,T): {0:9.3f} us, f(h,p): {1:9.3f} us".format(np.min(expData["HEOS"]["DT_HPS"]) ,np.min(impData["HEOS"]["HP_DST"])))
# print("Maximum in CoolProp: f(rho,T): {0:9.3f} us, f(h,p): {1:9.3f} us".format(np.max(expData["HEOS"]["DT_HPS"]) ,np.max(impData["HEOS"]["HP_DST"])))
# print("Mean EOS in REFPROP: f(rho,T): {0:9.3f} us, f(h,p): {1:9.3f} us".format(np.mean(expData["REFPROP"]["DT_HPS"]),np.mean(impData["REFPROP"]["HP_DST"])))
# print("Std. dev. in REFPROP: f(rho,T): {0:9.3f} us, f(h,p): {1:9.3f} us".format(np.std(expData["REFPROP"]["DT_HPS"]) ,np.std(impData["REFPROP"]["HP_DST"])))
# print("Minimum in REFPROP: f(rho,T): {0:9.3f} us, f(h,p): {1:9.3f} us".format(np.min(expData["REFPROP"]["DT_HPS"]) ,np.min(impData["REFPROP"]["HP_DST"])))
# print("Maximum in REFPROP: f(rho,T): {0:9.3f} us, f(h,p): {1:9.3f} us".format(np.max(expData["REFPROP"]["DT_HPS"]) ,np.max(impData["REFPROP"]["HP_DST"])))
# print("")
#
# print("\n{0} - {1} points ".format(fld,np.size(cstData["HEOS"]["DT_Tmax_V"])))
# print("Overhead CoolProp: {0:5.3f} us".format(np.mean(cstData["HEOS"]["DT_Tmax_V"])))#, np.mean(cstData["HEOS"]["HP_Tmax"]))
# print("Overhead REFPROP : {0:5.3f} us".format(np.mean(cstData["REFPROP"]["DT_Tmax_V"])))#,np.mean(cstData["REFPROP"]["HP_Tmax"]))
# print("Mean EOS in CoolProp: f(rho,T): {0:9.3f} us, f(h,p): {1:9.3f} us".format(np.mean(expData["HEOS"]["DT_HPS_V"]),np.mean(impData["HEOS"]["HP_DST_V"])))
# print("Std. dev. in CoolProp: f(rho,T): {0:9.3f} us, f(h,p): {1:9.3f} us".format(np.std(expData["HEOS"]["DT_HPS_V"]) ,np.std(impData["HEOS"]["HP_DST_V"])))
# print("Minimum in CoolProp: f(rho,T): {0:9.3f} us, f(h,p): {1:9.3f} us".format(np.min(expData["HEOS"]["DT_HPS_V"]) ,np.min(impData["HEOS"]["HP_DST_V"])))
# print("Maximum in CoolProp: f(rho,T): {0:9.3f} us, f(h,p): {1:9.3f} us".format(np.max(expData["HEOS"]["DT_HPS_V"]) ,np.max(impData["HEOS"]["HP_DST_V"])))
# print("Mean EOS in REFPROP: f(rho,T): {0:9.3f} us, f(h,p): {1:9.3f} us".format(np.mean(expData["REFPROP"]["DT_HPS_V"]),np.mean(impData["REFPROP"]["HP_DST_V"])))
# print("Std. dev. in REFPROP: f(rho,T): {0:9.3f} us, f(h,p): {1:9.3f} us".format(np.std(expData["REFPROP"]["DT_HPS_V"]) ,np.std(impData["REFPROP"]["HP_DST_V"])))
# print("Minimum in REFPROP: f(rho,T): {0:9.3f} us, f(h,p): {1:9.3f} us".format(np.min(expData["REFPROP"]["DT_HPS_V"]) ,np.min(impData["REFPROP"]["HP_DST_V"])))
# print("Maximum in REFPROP: f(rho,T): {0:9.3f} us, f(h,p): {1:9.3f} us".format(np.max(expData["REFPROP"]["DT_HPS_V"]) ,np.max(impData["REFPROP"]["HP_DST_V"])))
# print("")
#
# except Exception as e:
# print(str(e.message))
# pass
#
# try:
# fld = 'Water'
# cstData = {} # Data from calls to constants (overhead)
# expData = {} # Data from explicit EOS calls
# impData = {} # Data from EOS calls that require iterations
# for backend in fluidData[fld]:
# curKeys = []
# for inp in getInpList(backend):
# for out in getOutList(inp)[:1]:
# curKeys.append(getTimeKey( inp, out))
# curKeys.append(getVectorKey(inp, out))
# curDict = {}
# fillDict(fld,backend,fluidData,curDict,curKeys)
# cstData[backend] = curDict
#
# curKeys = []
# for inp in getInpList(backend)[:1]:
# for out in getOutList(inp)[1:]:
# curKeys.append(getTimeKey( inp, out))
# curKeys.append(getVectorKey(inp, out))
# curDict = {}
# fillDict(fld,backend,fluidData,curDict,curKeys)
# expData[backend] = curDict
#
# curKeys = []
# for inp in getInpList(backend)[1:]:
# for out in getOutList(inp)[1:]:
# curKeys.append(getTimeKey( inp, out))
# curKeys.append(getVectorKey(inp, out))
# curDict = {}
# fillDict(fld,backend,fluidData,curDict,curKeys)
# impData[backend] = curDict
#
#
# print("Done")
def autolabel(ax, rects, means, stds, lens=0, fac=1):
return
# attach some text labels
yerr = (stds * 100.0) / means
ypos = np.max(means) # + np.max(stds)
for i in range(len(rects)):
xpos = rects[i].get_x() + rects[i].get_width() / 2.
#ax.text(xpos, 1.05*ypos, '{0:s}{1:4.2f}{2:s}{3:3.1f}{4:s}'.format(r'',means[i]*fac,r'us +- ',yerr[i],r'%'), rotation=90, ha='center', va='bottom', fontsize='smaller')
ax.text(xpos, 1.05 * ypos, '{0:s}{1:4.2f}{2:s}{3:3.1f}{4:s}'.format(r'\SI{', means[i] * fac, r'}{\us} (\SI{\pm', yerr[i], r'}{\percent})'), rotation=90, ha='center', va='bottom', fontsize='smaller')
#ax.text(xpos, 0.25*ypos, str(lens[i]), rotation=90, ha='center', va='bottom', fontsize='smaller')
def axislabel(txt, ax, xPos, yPos=-1):
ax.text(xPos, yPos, txt, rotation=45, ha='center', va='top', fontsize='xx-small')
#############################################################
# All data is available in the dicts now.
#############################################################
# The first plot contains the time data, this is averaged and
# plotted as a bar graph with the standard deviation.
hatchLst = ["", "///", "\\\\\\"]
backendsLst = ["INCOMP", "HEOS", "REFPROP"]
for fluidstr in fluids[:-1]:
_, fld, _ = splitFluid(fluidstr)
outCst = []
labCst = []
outExp = []
labExp = []
outImp = []
labImp = []
DEBUG = True
for backend in backendsLst:
# Backend exists in fluid data?
try:
for i, inp in enumerate(getInpList(backend)):
for j, out in enumerate(getOutList(inp)):
if j == 0: # First output is Tmax
if backend not in fluidData[fld]:
outCst.append([0])
labCst.append("Dummy")
if DEBUG: print("Added a dummy for {0} and {1},{2},{3}".format(fld, backend, inp, out))
else:
outCst.append(getSingleData(fld, backend, getTimeKey(inp, out), fluidData))
labCst.append(getTimeKey(inp, out))
continue
elif i == 0: # First input is explicit
if backend not in fluidData[fld]:
outExp.append([0])
labExp.append("Dummy")
if DEBUG: print("Added a dummy for {0} and {1},{2},{3}".format(fld, backend, inp, out))
else:
outExp.append(getSingleData(fld, backend, getTimeKey(inp, out), fluidData))
labExp.append(getTimeKey(inp, out))
continue
elif i == 1:
if backend not in fluidData[fld]:
outImp.append([0])
labImp.append("Dummy")
if DEBUG: print("Added a dummy for {0} and {1},{2},{3}".format(fld, backend, inp, out))
else:
outImp.append(getSingleData(fld, backend, getTimeKey(inp, out), fluidData))
labImp.append(getTimeKey(inp, out))
continue
else:
raise ValueError("Wrong number.")
except Exception as e:
print(e)
sys.exit(1)
# Do the plotting
if bp is not None:
bp.figure = None
fg = bp.getFigure()
ccycle = bp.getColorCycle(length=3)
else:
fg = plt.figure()
ccycle = cycle(["b", "g", "r"])
#fg = plt.figure()
ax1 = fg.add_subplot(111)
ax2 = ax1.twinx()
rects1 = []
labels1 = []
rects2 = []
labels2 = []
rects3 = []
labels3 = []
col1 = ccycle.next()
col2 = ccycle.next()
col3 = ccycle.next()
numBackends = len(backendsLst)
step = 1
width = step / (numBackends + 1) # the width of the bars
offset = -0.5 * numBackends * width
entries = int((len(outCst) + len(outExp) + len(outImp)) / numBackends)
# for o in range(entries):
# ids = np.empty((numBackends,))
# for b in range(numBackends):
# i = o*numBackends+b
# ids[b] = offset + o*step + b*width
# j = i - 0
# if j < len(outCst):
# rects1.extend(ax1.bar(ids[b], np.mean(outCst[j]), width, color=col1, hatch=hatchLst[b]))#, yerr=np.std(curList[i]), ecolor='k'))
#
#
# j = i - 0
# if j < len(outCst):
# rects1.extend(ax1.bar(ids[b], np.mean(outCst[j]), width, color=col1, hatch=hatchLst[b]))#, yerr=np.std(curList[i]), ecolor='k'))
# else:
# j = i-len(outCst)
# if j < len(outExp):
# rects2.extend(ax1.bar(ids[b], np.mean(outExp[j]), width, color=col2, hatch=hatchLst[b]))#, yerr=np.std(curList[i]), ecolor='k'))
# else:
# j = i-len(outCst)-len(outExp)
# if j < len(outImp):
# rects3.extend(ax2.bar(ids[b], np.mean(outImp[j]), width, color=col3, hatch=hatchLst[b]))#, yerr=np.std(curList[i]), ecolor='k'))
# else:
# raise ValueError("Do not go here!")
DEBUG = True
entries = 2
for o in range(entries):
ids = np.empty((numBackends,))
for b in range(numBackends):
i = b * entries + o
try:
ids[b] = offset + o * step + b * width
rects1.extend(ax1.bar(ids[b], np.mean(outCst[i]), width, color=col1, hatch=hatchLst[b], rasterized=False)) # , yerr=np.std(curList[i]), ecolor='k'))
if DEBUG:
print("Plotting {0}: {1:7.1f} - {2} - {3}".format(fld, np.mean(outCst[i]), "cst.", b))
# print(ids[b],labCst[i])
#axislabel(labCst[i], ax1, ids[b]+0.5*width)
except:
pass
offset += entries * step
#entries = int(len(outExp)/numBackends)
entries = 4
for o in range(entries):
ids = np.empty((numBackends,))
for b in range(numBackends):
i = b * entries + o
try:
ids[b] = offset + o * step + b * width
rects2.extend(ax1.bar(ids[b], np.mean(outExp[i]), width, color=col2, hatch=hatchLst[b], rasterized=False)) # , yerr=np.std(curList[i]), ecolor='k'))
if DEBUG:
print("Plotting {0}: {1:7.1f} - {2} - {3}".format(fld, np.mean(outExp[i]), "exp.", b))
# print(ids[b],labExp[i])
#axislabel(labExp[i], ax1, ids[b]+0.5*width)
except:
pass
x_newaxis = np.max(ids) + 1.5 * width
plt.axvline(x_newaxis, color='k', linestyle='dashed')
offset += entries * step
entries = 4
for o in range(entries):
ids = np.empty((numBackends,))
for b in range(numBackends):
i = b * entries + o
try:
ids[b] = offset + o * step + b * width
rects3.extend(ax2.bar(ids[b], np.mean(outImp[i]), width, color=col3, hatch=hatchLst[b], rasterized=False)) # , yerr=np.std(curList[i]), ecolor='k'))
if DEBUG:
print("Plotting {0}: {1:7.1f} - {2} - {3}".format(fld, np.mean(outImp[i]), "imp.", b))
# print(ids[b],labImp[i])
#axislabel(labImp[i], ax1, ids[b]+0.5*width)
except:
pass
# ax1.set_xlim([ids.min()-2.5*width,ids.max()+2.5*width])
ax1.spines['top'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax1.xaxis.set_ticks_position('bottom')
ax2.xaxis.set_ticks_position('bottom')
labels = [r"ex.", r"im.", r"$h$", r"$\rho|p$", r"$s$", r"all", r"$\rho$", r"$s$", r"$T$", r"all"]
ax1.set_xticks(range(len(labels)))
ax1.set_xticklabels(labels)
# ax1.yaxis.get_label().set_verticalalignment("baseline")
x_min = rects1[0].get_x()
dx = rects1[0].get_width()
x_max = rects3[-1].get_x()
x_min = x_min - 1 * dx
x_max = x_max + 2 * dx
ax1.set_xlim([x_min, x_max])
y_min = 0
y_max_c = np.nanmax([a.get_height() for a in rects1])
y_max_e = np.nanmax([a.get_height() for a in rects2])
y_max_i = np.nanmax([a.get_height() for a in rects3])
y_max = np.max([y_max_c, y_max_e, y_max_i / 10.0])
y_max = np.ceil(1.3 * y_max / 10.0) * 10.0
ax1.set_ylim([y_min, y_max])
ax2.set_ylim([y_min, y_max * 10.0])
ratio = 10.0 / 4.0 * y_max / 250.0 # height of 10 for 4 points if y_max==250
x_min = rects1[0].get_x()
x_max = rects1[-1].get_x() + dx
x, y = range_brace(x_min, x_max)
dy = np.ceil(y_max_c / 10.0) * 10.0
y = dy + y * ratio * (x[-1] - x[0])
ax1.plot(x, y, ls='-', color='k')
ax1.text(np.mean(x), np.max(y), "const.", rotation=0, ha='center', va='bottom', fontsize='medium')
x_min = rects2[0].get_x()
x_max = rects2[-1].get_x() + dx
x, y = range_brace(x_min, x_max)
dy = np.ceil(y_max_e / 10.0) * 10.0
y = dy + y * ratio * (x[-1] - x[0])
ax1.plot(x, y, ls='-', color='k')
ax1.text(np.mean(x), np.max(y), "explicit", rotation=0, ha='center', va='bottom', fontsize='medium')
x_min = rects3[0].get_x()
x_max = rects3[-1].get_x() + dx
x, y = range_brace(x_min, x_max)
dy = np.ceil(y_max_i / 100.0) * 10
y = dy + y * ratio * (x[-1] - x[0])
ax1.plot(x, y, ls='-', color='k')
ax1.text(np.mean(x), np.max(y), "implicit", rotation=0, ha='center', va='bottom', fontsize='medium')
#ax1.text(x_newaxis*0.9, y_max*0.9, "<- left axis", rotation=0, ha='right', va='bottom', fontsize='medium')
#ax1.text(x_newaxis*1.1, y_max*0.9, "right axis ->", rotation=0, ha='left', va='bottom', fontsize='medium')
handles = []
for h in (rects1[0], rects2[1], rects3[2]):
handles.append(copy.copy(h))
handles[-1].set_facecolor('white')
handles.append(copy.copy(h))
handles[-1].set_hatch('')
labels = (r'$p,T$-fit', r'constant', r'CoolProp', r'explicit, $f(p|\rho,T)$', r'REFPROP', r'implicit, $f(h,p)$')
if bp is not None:
bp.drawLegend(ax=ax1,
loc='lower center',
bbox_to_anchor=(0.5, 1.05),
ncol=3,
handles=handles,
labels=labels)
else:
ax1.legend(handles, labels,
loc='lower center',
bbox_to_anchor=(0.5, 1.05),
ncol=3)
ax1.set_ylabel(r'Time per explicit call (us)')
ax2.set_ylabel(r'Time per implicit call (us)')
fg.savefig(path.join(getFigureFolder(), "TimeComp-" + fld.lower() + ".pdf"))
if bp is not None:
ax1.set_ylabel(r'Time per explicit call (\si{\us})')
ax2.set_ylabel(r'Time per implicit call (\si{\us})')
mpl.rcParams['text.usetex'] = True
fg.savefig(path.join(getFigureFolder(), "TimeComp-" + fld.lower() + "-tex.pdf"))
mpl.rcParams['text.usetex'] = False
# Fix the wrong baseline
for tick in ax1.get_xaxis().get_major_ticks():
tick.set_pad(2 * tick.get_pad())
tick.label1 = tick._get_text1()
for lab in ax1.xaxis.get_ticklabels():
lab.set_verticalalignment("baseline")
# lab.set_pad(1.5*lab.get_pad())
# ax1.set_xticklabels(labels)
#
# for tick in ax1.xaxis.get_major_ticks():
# tick.label1.set_horizontalalignment('center')
bp.savepgf(path.join(getFigureFolder(), "TimeComp-" + fld.lower() + ".pgf"), fg, repList)
plt.close()
# for fld in fluids:
# try:
# if bp is not None:
# bp.figure = None
# fg = bp.getFigure()
# ccycle = bp.getColorCycle(length=3)
# else:
# fg = plt.figure()
# ccycle = cycle(["b","g","r"])
#
# #fg = plt.figure()
# ax1 = fg.add_subplot(111)
# ax2 = ax1.twinx()
#
# if "INCOMP" in fluidData[fld]:
# el = 3
# hatch = ["","//","x"]
# else:
# el = 2
# hatch = ["//","x"]
#
# #one standard deviation above and below the mean of the data
# width = 0.25 # the width of the bars
# step = 1
# offset = -step-0.5*el*width
#
# lab = []
# rects1 = []
# rects2 = []
#
# curList = []
# if el==3: curList.append(getSingleData(fld, "INCOMP" , "PT_Tmax", fluidData)*1e2)
# curList.append(getSingleData(fld, "HEOS" , "DT_Tmax", fluidData)*1e2)
# curList.append(getSingleData(fld, "REFPROP", "DT_Tmax", fluidData)*1e2)
#
# lab.extend(["Tmax"])
#
# offset += step
# curN = len(curList)
# curInd = [ offset + i * width for i in range(curN)]
# curCol = ccycle.next()
# for i in range(len(hatch)):
# rects1.extend(ax1.bar(curInd[i], np.mean(curList[i]), width, color=curCol, hatch=hatch[i]))#, yerr=np.std(curList[i]), ecolor='k'))
# autolabel(ax1,rects1[0:],np.mean(curList,axis=1),np.std(curList,axis=1),fac=1e-2)
#
# curList = []
# if el==3: curList.append(getSingleData(fld, "INCOMP" , "PT_H", fluidData))
# curList.append(getSingleData(fld, "HEOS" , "DT_H", fluidData))
# curList.append(getSingleData(fld, "REFPROP", "DT_H", fluidData))
# lab.extend(["H"])
# offset += step
# curN = len(curList)
# curInd = [ offset + i * width for i in range(curN)]
# curCol = ccycle.next()
# for i in range(len(hatch)):
# rects1.extend(ax1.bar(curInd[i], np.mean(curList[i]), width, color=curCol, hatch=hatch[i]))#, yerr=np.std(curList[i]), ecolor='k'))
# autolabel(ax1,rects1[el:],np.mean(curList,axis=1),np.std(curList,axis=1))
#
# curList = []
# if el==3: curList.append(getSingleData(fld, "INCOMP" , "PT_D", fluidData))
# curList.append(getSingleData(fld, "HEOS" , "DT_P", fluidData))
# curList.append(getSingleData(fld, "REFPROP", "DT_P", fluidData))
# if el==3: lab.extend(["D/P"])
# else: lab.extend(["P"])
# offset += step
# curN = len(curList)
# curInd = [ offset + i * width for i in range(curN)]
# #curCol = "g"
# for i in range(len(hatch)):
# rects1.extend(ax1.bar(curInd[i], np.mean(curList[i]), width, color=curCol, hatch=hatch[i]))#, yerr=np.std(curList[i]), ecolor='k'))
# autolabel(ax1,rects1[int(2*el):],np.mean(curList,axis=1),np.std(curList,axis=1))
#
# curList = []
# if el==3: curList.append(getSingleData(fld, "INCOMP" , "PT_S", fluidData))
# curList.append(getSingleData(fld, "HEOS" , "DT_S", fluidData))
# curList.append(getSingleData(fld, "REFPROP", "DT_S", fluidData))
# lab.extend(["S"])
# offset += step
# curN = len(curList)
# curInd = [ offset + i * width for i in range(curN)]
# #curCol = "g"
# for i in range(len(hatch)):
# rects1.extend(ax1.bar(curInd[i], np.mean(curList[i]), width, color=curCol, hatch=hatch[i]))#, yerr=np.std(curList[i]), ecolor='k'))
# autolabel(ax1,rects1[int(3*el):],np.mean(curList,axis=1),np.std(curList,axis=1))
#
# curList = []
# if el==3: curList.append(getSingleData(fld, "INCOMP" , "PT_HDS", fluidData))
# curList.append(getSingleData(fld, "HEOS" , "DT_HPS", fluidData))
# curList.append(getSingleData(fld, "REFPROP", "DT_HPS", fluidData))
# lab.extend(["all"])
# offset += step
# curN = len(curList)
# curInd = [ offset + i * width for i in range(curN)]
# #curCol = "g"
# for i in range(len(hatch)):
# rects1.extend(ax1.bar(curInd[i], np.mean(curList[i]), width, color=curCol, hatch=hatch[i]))#, yerr=np.std(curList[i]), ecolor='k'))
# autolabel(ax1,rects1[int(4*el):],np.mean(curList,axis=1),np.std(curList,axis=1))
#
# curList = []
# if el==3: curList.append(getSingleData(fld, "INCOMP" , "HP_D", fluidData))
# curList.append(getSingleData(fld, "HEOS" , "HP_D", fluidData))
# curList.append(getSingleData(fld, "REFPROP", "HP_D", fluidData))
# lab.extend(["D"])
# offset += step
# curN = len(curList)
# curInd = [ offset + i * width for i in range(curN)]
# curCol = ccycle.next()
# for i in range(len(hatch)):
# rects2.extend(ax2.bar(curInd[i], np.mean(curList[i]), width, color=curCol, hatch=hatch[i]))#, yerr=np.std(curList[i]), ecolor='k'))
# autolabel(ax2,rects2[0:],np.mean(curList,axis=1),np.std(curList,axis=1))
#
# curList = []
# if el==3: curList.append(getSingleData(fld, "INCOMP" , "HP_T", fluidData))
# curList.append(getSingleData(fld, "HEOS" , "HP_T", fluidData))
# curList.append(getSingleData(fld, "REFPROP", "HP_T", fluidData))
# lab.extend(["T"])
# offset += step
# curN = len(curList)
# curInd = [ offset + i * width for i in range(curN)]
# #curCol = "r"
# for i in range(len(hatch)):
# rects2.extend(ax2.bar(curInd[i], np.mean(curList[i]), width, color=curCol, hatch=hatch[i]))#, yerr=np.std(curList[i]), ecolor='k'))
# autolabel(ax2,rects2[int(1*el):],np.mean(curList,axis=1),np.std(curList,axis=1))
#
# curList = []
# if el==3: curList.append(getSingleData(fld, "INCOMP" , "HP_S", fluidData))
# curList.append(getSingleData(fld, "HEOS" , "HP_S", fluidData))
# curList.append(getSingleData(fld, "REFPROP", "HP_S", fluidData))
# lab.extend(["S"])
# offset += step
# curN = len(curList)
# curInd = [ offset + i * width for i in range(curN)]
# #curCol = "r"
# for i in range(len(hatch)):
# rects2.extend(ax2.bar(curInd[i], np.mean(curList[i]), width, color=curCol, hatch=hatch[i]))#, yerr=np.std(curList[i]), ecolor='k'))
# autolabel(ax2,rects2[int(2*el):],np.mean(curList,axis=1),np.std(curList,axis=1))
#
# curList = []
# if el==3: curList.append(getSingleData(fld, "INCOMP" , "HP_DST", fluidData))
# curList.append(getSingleData(fld, "HEOS" , "HP_DST", fluidData))
# curList.append(getSingleData(fld, "REFPROP", "HP_DST", fluidData))
# lab.extend(["all"])
# offset += step
# curN = len(curList)
# curInd = [ offset + i * width for i in range(curN)]
# #curCol = "r"
# for i in range(len(hatch)):
# rects2.extend(ax2.bar(curInd[i], np.mean(curList[i]), width, color=curCol, hatch=hatch[i]))#, yerr=np.std(curList[i]), ecolor='k'))
# autolabel(ax2,rects2[int(3*el):],np.mean(curList,axis=1),np.std(curList,axis=1))
#
#
# ids = np.arange(len(lab))
# # add some text for labels, title and axes ticks
# #if backend=="INCOMP": ax1.set_ylabel(r'Time per $f(p,T)$ call (\si{\us})')
# if el==3: ax1.set_ylabel(r'Time per \texttt{Tmax} call (\SI{0.01}{\us}) and'+"\n"+r'per $f(p,T)$ and $f(\rho,T)$ call (\si{\us})')
# else: ax1.set_ylabel(r'Time per \texttt{Tmax} call (\SI{0.01}{\us})'+"\n"+r'and per $f(\rho,T)$ call (\si{\us})')
# ax2.set_ylabel(r'Time per $f(h,p)$ call (\si{\us})')
#
# ax1.set_xticks(ids)
# ax1.set_xticklabels([r"\texttt{"+i+r"}" for i in lab], rotation=0)
# ax1.set_xlim([ids.min()-2.5*width,ids.max()+2.5*width])
#
# ax1.spines['top'].set_visible(False)
# ax2.spines['top'].set_visible(False)
# ax1.xaxis.set_ticks_position('bottom')
# ax2.xaxis.set_ticks_position('bottom')
#
# handles = []
# if el==3:
# for h in (rects1[0], rects1[4], rects2[2]):
# handles.append(copy.copy(h))
# handles[-1].set_facecolor('white')
# handles.append(copy.copy(h))
# handles[-1].set_hatch('')
# labels = (r'$p,T$-fit', r'\texttt{Tmax}', r'CoolProp', r'explicit, $f(p|\rho,T)$', r'REFPROP', r'implicit, $f(h,p)$')
# else:
# for h in (rects1[0], rects1[2], rects2[1]):
# handles.append(copy.copy(h))
# handles[-1].set_facecolor('white')
# handles.append(copy.copy(h))
# handles[-1].set_hatch('')
# labels = (r'', r'\texttt{Tmax}', r'CoolProp', r'explicit, $f(\rho,T)$', r'REFPROP', r'implicit, $f(h,p)$')
# handles[0] = mpatches.Patch(visible=False)
#
# if bp is not None:
# bp.drawLegend(ax=ax1,
# loc='upper center',
# bbox_to_anchor=(0.5, 1.4),
# ncol=3,
# handles=handles,
# labels=labels)
# else:
# ax1.legend(handles,labels,
# loc='upper center',
# bbox_to_anchor=(0.5, 1.4),
# ncol=3)
#
# fg.savefig(path.join(getFigureFolder(),"TimeComp-"+fld+".pdf"))
# if bp is not None: bp.savepgf(path.join(getFigureFolder(),"TimeComp-"+fld+".pgf"),fg,repList)
# plt.close()
#
# except Exception as e:
# print(e)
# pass
#############################################################
# The second figure compares the backend for the full calculation
#############################################################
backendExp = []
backendImp = []
fluidLabel = []
hatchLst = ["///", "\\\\\\"]
backendsLst = ["HEOS", "REFPROP"]
for fluidstr in fluids[:-1]:
_, fld, _ = splitFluid(fluidstr)
#if fld=="CO2": fluidLabel.append("\ce{CO2}")
# else:
fluidLabel.append(fld)
for backend in backendsLst:
if backend not in fluidData[fld]:
backendExp.append([0])
backendImp.append([0])
continue
# Backend exists in fluid data
try:
inp = getInpList(backend)
outExp = getTimeKey(inp[0], getOutList(inp[0])[-1])
outImp = getTimeKey(inp[1], getOutList(inp[1])[-1])
backendExp.append(getSingleData(fld, backend, outExp, fluidData))
backendImp.append(getSingleData(fld, backend, outImp, fluidData))
except Exception as e:
backendExp.append([0])
backendImp.append([0])
print(e)
pass
# Data is prepared, we can plot now.
if bp is not None:
bp.figure = None
fg1 = bp.getFigure()
bp2 = BasePlotter()
fg2 = bp2.getFigure()
ccycle = bp.getColorCycle(length=3)
else:
fg1 = plt.figure()
fg2 = plt.figure()
ccycle = cycle(["b", "g", "r"])
fg1.set_size_inches((fg1.get_size_inches()[0] * 1, fg1.get_size_inches()[1] * 0.75))
fg2.set_size_inches((fg2.get_size_inches()[0] * 1, fg2.get_size_inches()[1] * 0.75))
ccycle.next() # No incomp
#
#ax1 = fg.add_subplot(111)
#ax2 = ax1.twinx()
ax1 = fg1.add_subplot(111)
ax2 = fg2.add_subplot(111)
#entries = int(len(backendExp)/len(fluidLabel))
# one standard deviation above and below the mean of the data
rects1 = []
rects2 = []
col1 = ccycle.next()
col2 = ccycle.next()
numFluids = len(fluidLabel)
numBackends = len(backendsLst)
step = 1
width = step / (numBackends + 1) # the width of the bars
offset = -0.5 * numBackends * width
for f in range(numFluids):
ids = np.empty((numBackends,))
for b in range(numBackends):
i = f * numBackends + b
ids[b] = offset + f * step + b * width
rects1.extend(ax1.bar(ids[b], np.mean(backendExp[i]), width, color=col1, hatch=hatchLst[b], rasterized=False)) # , yerr=np.std(curList[i]), ecolor='k'))
rects2.extend(ax2.bar(ids[b], np.mean(backendImp[i]), width, color=col2, hatch=hatchLst[b], rasterized=False)) # , yerr=np.std(curList[i]), ecolor='k'))
y_max = np.max(np.concatenate((np.ravel(ax1.get_ylim()), np.ravel(ax2.get_ylim()) / 10.0)))
ax1.set_ylim([0, y_max])
ax2.set_ylim([0, y_max * 10.0])
for ax in [ax1, ax2]:
ax.set_xticks(range(numFluids))
ax.set_xticklabels(fluidLabel, rotation=25)
ax.set_xlim([0.0 - 0.5 * step, numFluids - 1 + 0.5 * step])
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
if ax == ax1: rects = rects1
elif ax == ax2: rects = rects2
handles = (rects[0], rects[1])
labels = (r'CoolProp', r'REFPROP')
anchor = (0.5, 1.2)
if bp is not None:
bp.drawLegend(ax=ax,
loc='upper center',
bbox_to_anchor=anchor,
ncol=numBackends,
handles=handles,
labels=labels)
else:
ax.legend(handles, labels,
loc='upper center',
bbox_to_anchor=anchor,
ncol=numBackends)
ax1.set_ylabel(r'Time per $f(\rho,T)$ call (us)')
ax2.set_ylabel(r'Time per $f(h,p)$ call (us)')
fg1.savefig(path.join(getFigureFolder(), "TimeComp-backends-exp.pdf"))
fg2.savefig(path.join(getFigureFolder(), "TimeComp-backends-imp.pdf"))
if bp is not None:
ax1.set_ylabel(r'Time per $f(\rho,T)$ call (\si{\us})')
ax2.set_ylabel(r'Time per $f(h,p)$ call (\si{\us})')
mpl.rcParams['text.usetex'] = True
fg1.savefig(path.join(getFigureFolder(), "TimeComp-backends-exp-tex.pdf"))
fg2.savefig(path.join(getFigureFolder(), "TimeComp-backends-imp-tex.pdf"))
mpl.rcParams['text.usetex'] = False
bp.savepgf(path.join(getFigureFolder(), "TimeComp-backends-exp.pgf"), fg1, repList)
bp.savepgf(path.join(getFigureFolder(), "TimeComp-backends-imp.pgf"), fg2, repList)
plt.close('all')
#############################################################
# The third figure is a heat map of the execution times in
# log p h diagram
#############################################################
for fluidstr in fluids:
try:
_, fld, _ = splitFluid(fluidstr)
for backend in fluidData[fld]:
propsfluid = "::".join([backend, fluidstr])
if backend != "INCOMP":
TP = {}
points = max(int(xypoints / 2), 250)
T_range_TP = np.linspace(PropsSI('Ttriple', "T", 0, "D", 0, propsfluid) + 1, PropsSI('Tcrit', "T", 0, "D", 0, propsfluid) - 0.1, points)
T_TP = np.append(T_range_TP, T_range_TP[::-1])
Q_TP = np.zeros_like(T_TP)
Q_TP[points:] = 1
points *= 2
out = ["D", "H", "P", "S"]
res = PropsSI(out, "T", T_TP, "Q", Q_TP, propsfluid)
D_TP = res[:, 0]
H_TP = res[:, 1]
P_TP = res[:, 2]
S_TP = res[:, 3]
mask = np.isfinite(D_TP)
if np.sum(mask) < points:
warn("There were not enough valid entries in your result vector. Reducing the number of points from {0:d} to {1:d}.".format(points, np.sum(mask)))
points = np.sum(mask)
TP["T"] = T_TP[mask]
TP["D"] = D_TP[mask]
TP["H"] = H_TP[mask]
TP["P"] = P_TP[mask]
TP["S"] = S_TP[mask]
# saveNpzData(TP)
else:
TP = None
state = getStateObj(propsfluid)
if backend == "HEOS" and state.has_melting_line():
p_melt = np.logspace(np.log10(state.melting_line(CoolProp.constants.iP_min, CoolProp.constants.iT, 0)), np.log10(state.melting_line(CoolProp.constants.iP_max, CoolProp.constants.iT, 0)), xypoints)
#p_melt = p_range
ML = dict(T=[], D=[], H=[], S=[], P=p_melt)
for p in p_melt:
try:
ML["T"].append(state.melting_line(CoolProp.constants.iT, CoolProp.constants.iP, p))
except Exception as ve:
ML["T"].append(np.inf)
res = PropsSI(["D", "H", "P", "S", "T"], "T", ML["T"], "P", ML["P"], propsfluid)
ML["D"] = res[:, 0]
ML["H"] = res[:, 1]
ML["P"] = res[:, 2]
ML["S"] = res[:, 3]
ML["T"] = res[:, 4]
mask = np.isfinite(ML["T"])
ML["P"] = ML["P"][mask]
ML["T"] = ML["T"][mask]
ML["D"] = ML["D"][mask]
ML["H"] = ML["H"][mask]
ML["S"] = ML["S"][mask]
else:
ML = None
#ML = {}
IP = {}
p_range, T_range = getPTRanges(propsfluid)
critProps = getCriticalProps(propsfluid)
try:
IP["T"] = T_range
IP["P"] = np.zeros_like(T_range) + critProps["P"]
res = PropsSI(["D", "H"], "T", IP["T"], "P", IP["P"], propsfluid)
IP["D"] = res[:, 0]
IP["H"] = res[:, 1]
except Exception as ve:
IP = None
IT = {}
try:
IT["P"] = p_range
IT["T"] = np.zeros_like(p_range) + critProps["T"]
res = PropsSI(["D", "H"], "T", IT["T"], "P", IT["P"], propsfluid)
IT["D"] = res[:, 0]
IT["H"] = res[:, 1]
except Exception as ve:
IT = None
ID = {}
try:
ID["T"] = T_range
ID["D"] = np.zeros_like(p_range) + critProps["D"]
res = PropsSI(["P", "H"], "T", ID["T"], "D", ID["D"], propsfluid)
ID["P"] = res[:, 0]
ID["H"] = res[:, 1]
except Exception as ve:
ID = None
IH = {}
try:
IH["P"] = p_range
IH["H"] = np.zeros_like(p_range) + critProps["H"]
res = PropsSI(["D", "T"], "P", IH["P"], "H", IH["H"], propsfluid)
IH["D"] = res[:, 0]
IH["T"] = res[:, 1]
except Exception as ve:
IH = None
IS = {}
try:
IS["P"] = p_range
IS["S"] = np.zeros_like(p_range) + critProps["S"]
res = PropsSI(["D", "H", "T"], "P", IS["P"], "S", IS["S"], propsfluid)
IS["D"] = res[:, 0]
IS["H"] = res[:, 1]
IS["T"] = res[:, 2]
except Exception as ve:
IS = None
for I in [IP, IT, ID, IH, IS]:
if I is not None:
mask = np.isfinite(I["D"]) & np.isfinite(I["H"])
if np.sum(mask) < 20: I = None
else:
for k in I:
I[k] = I[k][mask]
for inp in getInpList(backend):
if bp is not None:
bp.figure = None
fg = bp.getFigure()
else:
fg = plt.figure()
kind = getTimeKey(inp, getOutList(inp)[-1])
t_data = getSingleData(fld, backend, kind, fluidData)
x_data = getSingleData(fld, backend, "H", fluidData)
y_data = getSingleData(fld, backend, "P", fluidData)
gs = gridspec.GridSpec(1, 2, wspace=None, hspace=None, width_ratios=[10, 1])
ax1 = fg.add_subplot(gs[0, 0], axisbg='Tan')
ax1.set_yscale('log')
#ax2 = ax1.twinx()
minHP = np.min(t_data)
maxHP = np.max(t_data)
minIT = np.percentile(t_data, 10)
maxIT = np.percentile(t_data, 90)
difIT = np.log10(maxIT / minIT) * 0.25
print(kind, ": {0:7.2f} to {1:7.2f}".format(minHP, maxHP))
if kind == "DT":
if bp is not None:
cx1 = bp.getColourMap(reverse=True)
else:
cx1 = mpl.cm.get_cmap('cubehelix_r')
minHP = minIT
maxHP = np.power(10, np.log10(maxIT) + difIT)
#minHP = np.power(10,np.log10(np.percentile(t_data,10)*1e6))
#maxHP = np.power(10,np.log10(np.percentile(t_data,90)*1e6)*1.10)
#maxHP = np.power(10,1.10*np.log10(maxHP))
#minHP = np.percentile(t_data,10)*1e6
#maxHP = np.percentile(t_data,99)*1e6
#print(kind,": {0:7.2f} to {1:7.2f}".format(minHP,maxHP))
#minHP = 100
#maxHP = 20000
else:
if bp is not None:
cx1 = bp.getColourMap()
else:
cx1 = mpl.cm.get_cmap('cubehelix')
minHP = np.power(10, np.log10(minIT) - difIT)
maxHP = maxIT
#minHP = np.power(10,np.log10(np.percentile(t_data,10)*1e6)*0.90)
#maxHP = np.power(10,np.log10(np.percentile(t_data,90)*1e6))
# minHP = np.percentile(t_data,01)*1e6
#maxHP = np.percentile(t_data,90)*1e6
#print(kind,": {0:7.2f} to {1:7.2f}".format(minHP,maxHP))
#minHP = 100
#maxHP = 20000
#cx1_r = reverse_colourmap(cx1)
cNorm = mpl.colors.LogNorm(vmin=minHP, vmax=maxHP)
#cNorm = mpl.colors.LogNorm(vmin=ceil(minHP/1e1)*1e1, vmax=floor(maxHP/1e2)*1e2)
#cNorm = mpl.colors.Normalize(vmin=round(minHP,-2), vmax=round(maxHP,-2))
colourSettings = dict(c=t_data, edgecolors='none', cmap=cx1, norm=cNorm)
pointSettings = dict(s=6)
scatterSettings = dict(rasterized=True, alpha=0.5)
#scatterSettings = dict(rasterized=False, alpha=0.5)
scatterSettings.update(colourSettings)
scatterSettings.update(pointSettings)
SC = ax1.scatter(x_data / 1e6, y_data / 1e5, **scatterSettings)
for I in [TP, ML]:
if I is not None:
ax1.plot(I["H"] / 1e6, I["P"] / 1e5, lw=1.5, c='k')
for I in [IP, IT, ID, IS, IH]:
if I is not None:
ax1.plot(I["H"] / 1e6, I["P"] / 1e5, lw=1.0, c='k', alpha=1)
# ax1.set_xlim([0e+0,6e1])
# ax1.set_ylim([5e-1,2e4])
ax1.set_xlim([np.percentile(x_data / 1e6, 0.1), np.percentile(x_data / 1e6, 99.9)])
ax1.set_ylim([np.percentile(y_data / 1e5, 0.1), np.percentile(y_data / 1e5, 99.9)])
formatter = ticker.LogFormatter(base=10.0, labelOnlyBase=False)
#formatter = ticker.ScalarFormatter()
#ticks = roundList(np.logspace(np.log10(ax1.get_ylim()[0]), np.log10(ax1.get_ylim()[1]), 5))
#locator = ticker.FixedLocator(ticks)
# ax1.yaxis.set_major_locator(locator)
ax1.yaxis.set_major_formatter(formatter)
cax = fg.add_subplot(gs[0, 1])
formatter = ticker.ScalarFormatter()
CB = fg.colorbar(SC, cax=cax, format=formatter)
CB.set_alpha(1)
CB.locator = ticker.MaxNLocator(nbins=7)
#ticks = roundList(np.logspace(np.log10(minHP), np.log10(maxHP), 5))
#CB.locator = ticker.FixedLocator(ticks)
CB.update_ticks()
CB.draw_all()
# fg.suptitle("f("+inp+")-"+backend.lower()+"-"+fld.lower())
CB.set_label(backend.upper() + "::" + fld + ', execution time per f(' + inp[0] + "," + inp[1] + ') call (us)')
ax1.set_xlabel(r'Specific enthalpy (MJ/kg)')
ax1.set_ylabel(r'Pressure (bar)')
fg.tight_layout()
fg.savefig(path.join(getFigureFolder(), "TimeComp-" + inp + "-" + backend.lower() + "-" + fld.lower() + ".pdf"))
#CB.set_label(r'Execution time per call (\si{\us})')
#ax1.set_xlabel(r'Specific enthalpy (\si{\mega\J\per\kg})')
#ax1.set_ylabel(r'Pressure (\si{\bar})')
# fg.tight_layout()
# bp.savepgf(path.join(getFigureFolder(),"TimeComp-"+inp+"-"+backend.lower()+"-"+fld.lower()+".pgf"),fg,repList)
plt.close()
except Exception as e:
print(e)
pass
plt.close('all')
|
mit
|
pianomania/scikit-learn
|
sklearn/cluster/tests/test_birch.py
|
342
|
5603
|
"""
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
|
bsd-3-clause
|
waterponey/scikit-learn
|
examples/mixture/plot_gmm.py
|
122
|
3265
|
"""
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians
obtained with Expectation Maximisation (``GaussianMixture`` class) and
Variational Inference (``BayesianGaussianMixture`` class models with
a Dirichlet process prior).
Both models have access to five components with which to fit the data. Note
that the Expectation Maximisation model will necessarily use all five
components while the Variational Inference model will effectively only use as
many as are needed for a good fit. Here we can see that the Expectation
Maximisation model splits some components arbitrarily, because it is trying to
fit too many components, while the Dirichlet Process model adapts it number of
state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-9., 5.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.BayesianGaussianMixture(n_components=5,
covariance_type='full').fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
'Bayesian Gaussian Mixture with a Dirichlet process prior')
plt.show()
|
bsd-3-clause
|
haiweiosu/Optical-Character-Recognition-using-Template-Matching-Object-Detection-in-Images
|
drawMatches.py
|
1
|
2594
|
import numpy as np
import matplotlib.pyplot as plt
def draw_matches(img1, kp1, img2, kp2, matches, color=None):
"""Draws lines between matching keypoints of two images.
Keypoints not in a matching pair are not drawn.
Places the images side by side in a new image and draws circles
around each keypoint, with line segments connecting matching pairs.
You can tweak the r, thickness, and figsize values as needed.
Args:
img1: An openCV image ndarray in a grayscale or color format.
kp1: A list of cv2.KeyPoint objects for img1.
img2: An openCV image ndarray of the same format and with the same
element type as img1.
kp2: A list of cv2.KeyPoint objects for img2.
matches: A list of DMatch objects whose trainIdx attribute refers to
img1 keypoints and whose queryIdx attribute refers to img2 keypoints.
color: The color of the circles and connecting lines drawn on the images.
A 3-tuple for color images, a scalar for grayscale images. If None, these
values are randomly generated.
"""
# We're drawing them side by side. Get dimensions accordingly.
# Handle both color and grayscale images.
if len(img1.shape) == 3:
new_shape = (max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], img1.shape[2])
elif len(img1.shape) == 2:
new_shape = (max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1])
new_img = np.zeros(new_shape, type(img1.flat[0]))
# Place images onto the new image.
new_img[0:img1.shape[0],0:img1.shape[1]] = img1
new_img[0:img2.shape[0],img1.shape[1]:img1.shape[1]+img2.shape[1]] = img2
# Draw lines between matches. Make sure to offset kp coords in second image appropriately.
r = 15
thickness = 2
if color:
c = color
for m in matches:
# Generate random color for RGB/BGR and grayscale images as needed.
if not color:
c = np.random.randint(0,256,3) if len(img1.shape) == 3 else np.random.randint(0,256)
# So the keypoint locs are stored as a tuple of floats. cv2.line(), like most other things,
# wants locs as a tuple of ints.
end1 = tuple(np.round(kp1[m.trainIdx].pt).astype(int))
end2 = tuple(np.round(kp2[m.queryIdx].pt).astype(int) + np.array([img1.shape[1], 0]))
cv2.line(new_img, end1, end2, c, thickness)
cv2.circle(new_img, end1, r, c, thickness)
cv2.circle(new_img, end2, r, c, thickness)
plt.figure(figsize=(15,15))
plt.imshow(new_img)
plt.show()
|
apache-2.0
|
HowDoesExcelWork/RocketMap
|
pogom/models.py
|
1
|
160339
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import itertools
import calendar
import sys
import traceback
import gc
import time
import geopy
import math
from peewee import InsertQuery, \
Check, CompositeKey, ForeignKeyField, \
SmallIntegerField, IntegerField, CharField, DoubleField, BooleanField, \
DateTimeField, fn, DeleteQuery, FloatField, SQL, TextField, JOIN, \
OperationalError
from playhouse.flask_utils import FlaskDB
from playhouse.pool import PooledMySQLDatabase
from playhouse.shortcuts import RetryOperationalError, case
from playhouse.migrate import migrate, MySQLMigrator, SqliteMigrator
from playhouse.sqlite_ext import SqliteExtDatabase
from datetime import datetime, timedelta
from base64 import b64encode
from cachetools import TTLCache
from cachetools import cached
from timeit import default_timer
# BALLS lol
from random import random
# for geofence
from matplotlib.path import Path
from ast import literal_eval
# fixing scout?
from queue import Queue, Empty
from pgoapi import PGoApi
from . import config
from .utils import get_pokemon_name, get_pokemon_rarity, get_pokemon_types, \
get_args, cellid, in_radius, date_secs, clock_between, secs_between, \
get_move_name, get_move_damage, get_move_energy, get_move_type, \
clear_dict_response, generate_device_info
from .transform import transform_from_wgs_to_gcj, get_new_coords
from .customLog import printPokemon
from .account import TooManyLoginAttempts, tutorial_pokestop_spin,\
get_player_level
log = logging.getLogger(__name__)
args = get_args()
flaskDb = FlaskDB()
cache = TTLCache(maxsize=100, ttl=60 * 5)
if len(args.cp_accountcsv):
cp_account_queue = Queue()
if len(args.iv_accountcsv):
iv_account_queue = Queue()
if len(args.cp_accountcsv) > 0:
for i, account in enumerate(args.cp_accountcsv):
cp_account_queue.put(account)
if len(args.iv_accountcsv) > 0:
# print(args.iv_accountcsv)
for i, account in enumerate(args.iv_accountcsv):
# print(account)
iv_account_queue.put(account)
db_schema_version = 20
class MyRetryDB(RetryOperationalError, PooledMySQLDatabase):
pass
def init_database(app):
if args.db_type == 'mysql':
log.info('Connecting to MySQL database on %s:%i...',
args.db_host, args.db_port)
connections = args.db_max_connections
if hasattr(args, 'accounts'):
connections *= len(args.accounts)
db = MyRetryDB(
args.db_name,
user=args.db_user,
password=args.db_pass,
host=args.db_host,
port=args.db_port,
max_connections=connections,
stale_timeout=300)
else:
log.info('Connecting to local SQLite database')
db = SqliteExtDatabase(args.db,
pragmas=(
('journal_mode', 'WAL'),
('mmap_size', 1024 * 1024 * 32),
('cache_size', 10000),
('journal_size_limit', 1024 * 1024 * 4),))
app.config['DATABASE'] = db
flaskDb.init_app(app)
return db
class BaseModel(flaskDb.Model):
@classmethod
def get_all(cls):
results = [m for m in cls.select().dicts()]
if args.china:
for result in results:
result['latitude'], result['longitude'] = \
transform_from_wgs_to_gcj(
result['latitude'], result['longitude'])
return results
class Pokemon(BaseModel):
# We are base64 encoding the ids delivered by the api
# because they are too big for sqlite to handle.
encounter_id = CharField(primary_key=True, max_length=50)
spawnpoint_id = CharField(index=True)
pokemon_id = SmallIntegerField(index=True)
latitude = DoubleField()
longitude = DoubleField()
disappear_time = DateTimeField(index=True)
individual_attack = SmallIntegerField(null=True)
individual_defense = SmallIntegerField(null=True)
individual_stamina = SmallIntegerField(null=True)
move_1 = SmallIntegerField(null=True)
move_2 = SmallIntegerField(null=True)
weight = FloatField(null=True)
height = FloatField(null=True)
gender = SmallIntegerField(null=True)
cp = SmallIntegerField(null=True)
cp_multiplier = FloatField(null=True)
form = SmallIntegerField(null=True)
last_modified = DateTimeField(
null=True, index=True, default=datetime.utcnow)
class Meta:
indexes = ((('latitude', 'longitude'), False),)
@staticmethod
def get_active(swLat, swLng, neLat, neLng, timestamp=0, oSwLat=None,
oSwLng=None, oNeLat=None, oNeLng=None):
now_date = datetime.utcnow()
query = Pokemon.select()
if not (swLat and swLng and neLat and neLng):
query = (query
.where(Pokemon.disappear_time > now_date)
.dicts())
elif timestamp > 0:
# If timestamp is known only load modified Pokemon.
query = (query
.where(((Pokemon.last_modified >
datetime.utcfromtimestamp(timestamp / 1000)) &
(Pokemon.disappear_time > now_date)) &
((Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng)))
.dicts())
elif oSwLat and oSwLng and oNeLat and oNeLng:
# Send Pokemon in view but exclude those within old boundaries.
# Only send newly uncovered Pokemon.
query = (query
.where(((Pokemon.disappear_time > now_date) &
(((Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng))) &
~((Pokemon.disappear_time > now_date) &
(Pokemon.latitude >= oSwLat) &
(Pokemon.longitude >= oSwLng) &
(Pokemon.latitude <= oNeLat) &
(Pokemon.longitude <= oNeLng))))
.dicts())
else:
query = (Pokemon
.select()
# Add 1 hour buffer to include spawnpoints that persist
# after tth, like shsh.
.where((Pokemon.disappear_time > now_date) &
(((Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng))))
.dicts())
# Performance: disable the garbage collector prior to creating a
# (potentially) large dict with append().
gc.disable()
pokemon = []
for p in list(query):
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
p['pokemon_rarity'] = get_pokemon_rarity(p['pokemon_id'])
p['pokemon_types'] = get_pokemon_types(p['pokemon_id'])
if args.china:
p['latitude'], p['longitude'] = \
transform_from_wgs_to_gcj(p['latitude'], p['longitude'])
pokemon.append(p)
# Re-enable the GC.
gc.enable()
return pokemon
@staticmethod
def get_active_by_id(ids, swLat, swLng, neLat, neLng):
if not (swLat and swLng and neLat and neLng):
query = (Pokemon
.select()
.where((Pokemon.pokemon_id << ids) &
(Pokemon.disappear_time > datetime.utcnow()))
.dicts())
else:
query = (Pokemon
.select()
.where((Pokemon.pokemon_id << ids) &
(Pokemon.disappear_time > datetime.utcnow()) &
(Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng))
.dicts())
# Performance: disable the garbage collector prior to creating a
# (potentially) large dict with append().
gc.disable()
pokemon = []
for p in query:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
p['pokemon_rarity'] = get_pokemon_rarity(p['pokemon_id'])
p['pokemon_types'] = get_pokemon_types(p['pokemon_id'])
if args.china:
p['latitude'], p['longitude'] = \
transform_from_wgs_to_gcj(p['latitude'], p['longitude'])
pokemon.append(p)
# Re-enable the GC.
gc.enable()
return pokemon
@classmethod
@cached(cache)
def get_seen(cls, timediff):
if timediff:
timediff = datetime.utcnow() - timediff
pokemon_count_query = (Pokemon
.select(Pokemon.pokemon_id,
fn.COUNT(Pokemon.pokemon_id).alias(
'count'),
fn.MAX(Pokemon.disappear_time).alias(
'lastappeared')
)
.where(Pokemon.disappear_time > timediff)
.group_by(Pokemon.pokemon_id)
.alias('counttable')
)
query = (Pokemon
.select(Pokemon.pokemon_id,
Pokemon.disappear_time,
Pokemon.latitude,
Pokemon.longitude,
pokemon_count_query.c.count)
.join(pokemon_count_query,
on=(Pokemon.pokemon_id ==
pokemon_count_query.c.pokemon_id))
.distinct()
.where(Pokemon.disappear_time ==
pokemon_count_query.c.lastappeared)
.dicts()
)
# Performance: disable the garbage collector prior to creating a
# (potentially) large dict with append().
gc.disable()
pokemon = []
total = 0
for p in query:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
pokemon.append(p)
total += p['count']
# Re-enable the GC.
gc.enable()
return {'pokemon': pokemon, 'total': total}
@classmethod
def get_appearances(cls, pokemon_id, timediff):
'''
:param pokemon_id: id of Pokemon that we need appearances for
:param timediff: limiting period of the selection
:return: list of Pokemon appearances over a selected period
'''
if timediff:
timediff = datetime.utcnow() - timediff
query = (Pokemon
.select(Pokemon.latitude, Pokemon.longitude,
Pokemon.pokemon_id,
fn.Count(Pokemon.spawnpoint_id).alias('count'),
Pokemon.spawnpoint_id)
.where((Pokemon.pokemon_id == pokemon_id) &
(Pokemon.disappear_time > timediff)
)
.group_by(Pokemon.latitude, Pokemon.longitude,
Pokemon.pokemon_id, Pokemon.spawnpoint_id)
.dicts()
)
return list(query)
@classmethod
def get_appearances_times_by_spawnpoint(cls, pokemon_id,
spawnpoint_id, timediff):
'''
:param pokemon_id: id of Pokemon that we need appearances times for.
:param spawnpoint_id: spawnpoint id we need appearances times for.
:param timediff: limiting period of the selection.
:return: list of time appearances over a selected period.
'''
if timediff:
timediff = datetime.utcnow() - timediff
query = (Pokemon
.select(Pokemon.disappear_time)
.where((Pokemon.pokemon_id == pokemon_id) &
(Pokemon.spawnpoint_id == spawnpoint_id) &
(Pokemon.disappear_time > timediff)
)
.order_by(Pokemon.disappear_time.asc())
.tuples()
)
return list(itertools.chain(*query))
@classmethod
def get_spawn_time(cls, disappear_time):
return (disappear_time + 2700) % 3600
@classmethod
def get_spawnpoints(cls, swLat, swLng, neLat, neLng, timestamp=0,
oSwLat=None, oSwLng=None, oNeLat=None, oNeLng=None):
query = (Pokemon
.select(Pokemon.latitude, Pokemon.longitude,
Pokemon.spawnpoint_id,
(date_secs(Pokemon.disappear_time)).alias('time'),
fn.Count(Pokemon.spawnpoint_id).alias('count')))
if timestamp > 0:
query = (query
.where(((Pokemon.last_modified >
datetime.utcfromtimestamp(timestamp / 1000))) &
((Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng)))
.dicts())
elif oSwLat and oSwLng and oNeLat and oNeLng:
# Send spawnpoints in view but exclude those within old boundaries.
# Only send newly uncovered spawnpoints.
query = (query
.where((((Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.latitude <= neLat) &
(Pokemon.longitude <= neLng))) &
~((Pokemon.latitude >= oSwLat) &
(Pokemon.longitude >= oSwLng) &
(Pokemon.latitude <= oNeLat) &
(Pokemon.longitude <= oNeLng)))
.dicts())
elif swLat and swLng and neLat and neLng:
query = (query
.where((Pokemon.latitude <= neLat) &
(Pokemon.latitude >= swLat) &
(Pokemon.longitude >= swLng) &
(Pokemon.longitude <= neLng)
))
query = query.group_by(Pokemon.latitude, Pokemon.longitude,
Pokemon.spawnpoint_id, SQL('time'))
queryDict = query.dicts()
spawnpoints = {}
for sp in queryDict:
key = sp['spawnpoint_id']
disappear_time = cls.get_spawn_time(sp.pop('time'))
count = int(sp['count'])
if key not in spawnpoints:
spawnpoints[key] = sp
else:
spawnpoints[key]['special'] = True
if ('time' not in spawnpoints[key] or
count >= spawnpoints[key]['count']):
spawnpoints[key]['time'] = disappear_time
spawnpoints[key]['count'] = count
# Helping out the GC.
for sp in spawnpoints.values():
del sp['count']
return list(spawnpoints.values())
@classmethod
def get_spawnpoints_in_hex(cls, center, steps):
log.info('Finding spawnpoints {} steps away.'.format(steps))
n, e, s, w = hex_bounds(center, steps)
query = (Pokemon
.select(Pokemon.latitude.alias('lat'),
Pokemon.longitude.alias('lng'),
(date_secs(Pokemon.disappear_time)).alias('time'),
Pokemon.spawnpoint_id
))
query = (query.where((Pokemon.latitude <= n) &
(Pokemon.latitude >= s) &
(Pokemon.longitude >= w) &
(Pokemon.longitude <= e)
))
# Sqlite doesn't support distinct on columns.
if args.db_type == 'mysql':
query = query.distinct(Pokemon.spawnpoint_id)
else:
query = query.group_by(Pokemon.spawnpoint_id)
s = list(query.dicts())
# The distance between scan circles of radius 70 in a hex is 121.2436
# steps - 1 to account for the center circle then add 70 for the edge.
step_distance = ((steps - 1) * 121.2436) + 70
# Compare spawnpoint list to a circle with radius steps * 120.
# Uses the direct geopy distance between the center and the spawnpoint.
filtered = []
for idx, sp in enumerate(s):
if geopy.distance.distance(
center, (sp['lat'], sp['lng'])).meters <= step_distance:
filtered.append(s[idx])
# At this point, 'time' is DISAPPEARANCE time, we're going to morph it
# to APPEARANCE time accounting for hour wraparound.
for location in filtered:
# todo: this DOES NOT ACCOUNT for Pokemon that appear sooner and
# live longer, but you'll _always_ have at least 15 minutes, so it
# works well enough.
location['time'] = cls.get_spawn_time(location['time'])
return filtered
class Pokestop(BaseModel):
pokestop_id = CharField(primary_key=True, max_length=50)
enabled = BooleanField()
latitude = DoubleField()
longitude = DoubleField()
last_modified = DateTimeField(index=True)
lure_expiration = DateTimeField(null=True, index=True)
active_fort_modifier = CharField(max_length=50, null=True, index=True)
last_updated = DateTimeField(
null=True, index=True, default=datetime.utcnow)
class Meta:
indexes = ((('latitude', 'longitude'), False),)
@staticmethod
def get_stops(swLat, swLng, neLat, neLng, timestamp=0, oSwLat=None,
oSwLng=None, oNeLat=None, oNeLng=None, lured=False):
query = Pokestop.select(Pokestop.active_fort_modifier,
Pokestop.enabled, Pokestop.latitude,
Pokestop.longitude, Pokestop.last_modified,
Pokestop.lure_expiration, Pokestop.pokestop_id)
if not (swLat and swLng and neLat and neLng):
query = (query
.dicts())
elif timestamp > 0:
query = (query
.where(((Pokestop.last_updated >
datetime.utcfromtimestamp(timestamp / 1000))) &
(Pokestop.latitude >= swLat) &
(Pokestop.longitude >= swLng) &
(Pokestop.latitude <= neLat) &
(Pokestop.longitude <= neLng))
.dicts())
elif oSwLat and oSwLng and oNeLat and oNeLng and lured:
query = (query
.where((((Pokestop.latitude >= swLat) &
(Pokestop.longitude >= swLng) &
(Pokestop.latitude <= neLat) &
(Pokestop.longitude <= neLng)) &
(Pokestop.active_fort_modifier.is_null(False))) &
~((Pokestop.latitude >= oSwLat) &
(Pokestop.longitude >= oSwLng) &
(Pokestop.latitude <= oNeLat) &
(Pokestop.longitude <= oNeLng)) &
(Pokestop.active_fort_modifier.is_null(False)))
.dicts())
elif oSwLat and oSwLng and oNeLat and oNeLng:
# Send stops in view but exclude those within old boundaries. Only
# send newly uncovered stops.
query = (query
.where(((Pokestop.latitude >= swLat) &
(Pokestop.longitude >= swLng) &
(Pokestop.latitude <= neLat) &
(Pokestop.longitude <= neLng)) &
~((Pokestop.latitude >= oSwLat) &
(Pokestop.longitude >= oSwLng) &
(Pokestop.latitude <= oNeLat) &
(Pokestop.longitude <= oNeLng)))
.dicts())
elif lured:
query = (query
.where(((Pokestop.last_updated >
datetime.utcfromtimestamp(timestamp / 1000))) &
((Pokestop.latitude >= swLat) &
(Pokestop.longitude >= swLng) &
(Pokestop.latitude <= neLat) &
(Pokestop.longitude <= neLng)) &
(Pokestop.active_fort_modifier.is_null(False)))
.dicts())
else:
query = (query
.where((Pokestop.latitude >= swLat) &
(Pokestop.longitude >= swLng) &
(Pokestop.latitude <= neLat) &
(Pokestop.longitude <= neLng))
.dicts())
# Performance: disable the garbage collector prior to creating a
# (potentially) large dict with append().
gc.disable()
pokestops = []
for p in query:
if args.china:
p['latitude'], p['longitude'] = \
transform_from_wgs_to_gcj(p['latitude'], p['longitude'])
pokestops.append(p)
# Re-enable the GC.
gc.enable()
return pokestops
class Gym(BaseModel):
UNCONTESTED = 0
TEAM_MYSTIC = 1
TEAM_VALOR = 2
TEAM_INSTINCT = 3
gym_id = CharField(primary_key=True, max_length=50)
team_id = SmallIntegerField()
guard_pokemon_id = SmallIntegerField()
gym_points = IntegerField()
enabled = BooleanField()
latitude = DoubleField()
longitude = DoubleField()
last_modified = DateTimeField(index=True)
last_scanned = DateTimeField(default=datetime.utcnow, index=True)
class Meta:
indexes = ((('latitude', 'longitude'), False),)
@staticmethod
def get_gyms(swLat, swLng, neLat, neLng, timestamp=0, oSwLat=None,
oSwLng=None, oNeLat=None, oNeLng=None):
if not (swLat and swLng and neLat and neLng):
results = (Gym
.select()
.dicts())
elif timestamp > 0:
# If timestamp is known only send last scanned Gyms.
results = (Gym
.select()
.where(((Gym.last_scanned >
datetime.utcfromtimestamp(timestamp / 1000)) &
(Gym.latitude >= swLat) &
(Gym.longitude >= swLng) &
(Gym.latitude <= neLat) &
(Gym.longitude <= neLng)))
.dicts())
elif oSwLat and oSwLng and oNeLat and oNeLng:
# Send gyms in view but exclude those within old boundaries. Only
# send newly uncovered gyms.
results = (Gym
.select()
.where(((Gym.latitude >= swLat) &
(Gym.longitude >= swLng) &
(Gym.latitude <= neLat) &
(Gym.longitude <= neLng)) &
~((Gym.latitude >= oSwLat) &
(Gym.longitude >= oSwLng) &
(Gym.latitude <= oNeLat) &
(Gym.longitude <= oNeLng)))
.dicts())
else:
results = (Gym
.select()
.where((Gym.latitude >= swLat) &
(Gym.longitude >= swLng) &
(Gym.latitude <= neLat) &
(Gym.longitude <= neLng))
.dicts())
# Performance: disable the garbage collector prior to creating a
# (potentially) large dict with append().
gc.disable()
gyms = {}
gym_ids = []
for g in results:
g['name'] = None
g['pokemon'] = []
gyms[g['gym_id']] = g
gym_ids.append(g['gym_id'])
if len(gym_ids) > 0:
pokemon = (GymMember
.select(
GymMember.gym_id,
GymPokemon.cp.alias('pokemon_cp'),
GymPokemon.pokemon_id,
Trainer.name.alias('trainer_name'),
Trainer.level.alias('trainer_level'))
.join(Gym, on=(GymMember.gym_id == Gym.gym_id))
.join(GymPokemon, on=(GymMember.pokemon_uid ==
GymPokemon.pokemon_uid))
.join(Trainer, on=(GymPokemon.trainer_name ==
Trainer.name))
.where(GymMember.gym_id << gym_ids)
.where(GymMember.last_scanned > Gym.last_modified)
.order_by(GymMember.gym_id, GymPokemon.cp)
.distinct()
.dicts())
for p in pokemon:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
gyms[p['gym_id']]['pokemon'].append(p)
details = (GymDetails
.select(
GymDetails.gym_id,
GymDetails.name)
.where(GymDetails.gym_id << gym_ids)
.dicts())
for d in details:
gyms[d['gym_id']]['name'] = d['name']
# Re-enable the GC.
gc.enable()
return gyms
@staticmethod
def get_gym(id):
result = (Gym
.select(Gym.gym_id,
Gym.team_id,
GymDetails.name,
GymDetails.description,
Gym.guard_pokemon_id,
Gym.gym_points,
Gym.latitude,
Gym.longitude,
Gym.last_modified,
Gym.last_scanned)
.join(GymDetails, JOIN.LEFT_OUTER,
on=(Gym.gym_id == GymDetails.gym_id))
.where(Gym.gym_id == id)
.dicts()
.get())
result['guard_pokemon_name'] = get_pokemon_name(
result['guard_pokemon_id']) if result['guard_pokemon_id'] else ''
result['pokemon'] = []
pokemon = (GymMember
.select(GymPokemon.cp.alias('pokemon_cp'),
GymPokemon.pokemon_id,
GymPokemon.pokemon_uid,
GymPokemon.move_1,
GymPokemon.move_2,
GymPokemon.iv_attack,
GymPokemon.iv_defense,
GymPokemon.iv_stamina,
Trainer.name.alias('trainer_name'),
Trainer.level.alias('trainer_level'))
.join(Gym, on=(GymMember.gym_id == Gym.gym_id))
.join(GymPokemon,
on=(GymMember.pokemon_uid == GymPokemon.pokemon_uid))
.join(Trainer, on=(GymPokemon.trainer_name == Trainer.name))
.where(GymMember.gym_id == id)
.where(GymMember.last_scanned > Gym.last_modified)
.order_by(GymPokemon.cp.desc())
.distinct()
.dicts())
for p in pokemon:
p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
p['move_1_name'] = get_move_name(p['move_1'])
p['move_1_damage'] = get_move_damage(p['move_1'])
p['move_1_energy'] = get_move_energy(p['move_1'])
p['move_1_type'] = get_move_type(p['move_1'])
p['move_2_name'] = get_move_name(p['move_2'])
p['move_2_damage'] = get_move_damage(p['move_2'])
p['move_2_energy'] = get_move_energy(p['move_2'])
p['move_2_type'] = get_move_type(p['move_2'])
result['pokemon'].append(p)
return result
class LocationAltitude(BaseModel):
cellid = CharField(primary_key=True, max_length=50)
latitude = DoubleField()
longitude = DoubleField()
last_modified = DateTimeField(index=True, default=datetime.utcnow,
null=True)
altitude = DoubleField()
class Meta:
indexes = ((('latitude', 'longitude'), False),)
# DB format of a new location altitude
@staticmethod
def new_loc(loc, altitude):
return {'cellid': cellid(loc),
'latitude': loc[0],
'longitude': loc[1],
'altitude': altitude}
# find a nearby altitude from the db
# looking for one within 140m
@classmethod
def get_nearby_altitude(cls, loc):
n, e, s, w = hex_bounds(loc, radius=0.14) # 140m
# Get all location altitudes in that box.
query = (cls
.select()
.where((cls.latitude <= n) &
(cls.latitude >= s) &
(cls.longitude >= w) &
(cls.longitude <= e))
.dicts())
altitude = None
if len(list(query)):
altitude = query[0]['altitude']
return altitude
@classmethod
def save_altitude(cls, loc, altitude):
InsertQuery(cls, rows=[cls.new_loc(loc, altitude)]).upsert().execute()
class ScannedLocation(BaseModel):
cellid = CharField(primary_key=True, max_length=50)
latitude = DoubleField()
longitude = DoubleField()
last_modified = DateTimeField(
index=True, default=datetime.utcnow, null=True)
# Marked true when all five bands have been completed.
done = BooleanField(default=False)
# Five scans/hour is required to catch all spawns.
# Each scan must be at least 12 minutes from the previous check,
# with a 2 minute window during which the scan can be done.
# Default of -1 is for bands not yet scanned.
band1 = SmallIntegerField(default=-1)
band2 = SmallIntegerField(default=-1)
band3 = SmallIntegerField(default=-1)
band4 = SmallIntegerField(default=-1)
band5 = SmallIntegerField(default=-1)
# midpoint is the center of the bands relative to band 1.
# If band 1 is 10.4 minutes, and band 4 is 34.0 minutes, midpoint
# is -0.2 minutes in minsec. Extra 10 seconds in case of delay in
# recording now time.
midpoint = SmallIntegerField(default=0)
# width is how wide the valid window is. Default is 0, max is 2 minutes.
# If band 1 is 10.4 minutes, and band 4 is 34.0 minutes, midpoint
# is 0.4 minutes in minsec.
width = SmallIntegerField(default=0)
class Meta:
indexes = ((('latitude', 'longitude'), False),)
constraints = [Check('band1 >= -1'), Check('band1 < 3600'),
Check('band2 >= -1'), Check('band2 < 3600'),
Check('band3 >= -1'), Check('band3 < 3600'),
Check('band4 >= -1'), Check('band4 < 3600'),
Check('band5 >= -1'), Check('band5 < 3600'),
Check('midpoint >= -130'), Check('midpoint <= 130'),
Check('width >= 0'), Check('width <= 130')]
@staticmethod
def get_recent(swLat, swLng, neLat, neLng, timestamp=0, oSwLat=None,
oSwLng=None, oNeLat=None, oNeLng=None):
activeTime = (datetime.utcnow() - timedelta(minutes=15))
if timestamp > 0:
query = (ScannedLocation
.select()
.where(((ScannedLocation.last_modified >=
datetime.utcfromtimestamp(timestamp / 1000))) &
(ScannedLocation.latitude >= swLat) &
(ScannedLocation.longitude >= swLng) &
(ScannedLocation.latitude <= neLat) &
(ScannedLocation.longitude <= neLng))
.dicts())
elif oSwLat and oSwLng and oNeLat and oNeLng:
# Send scannedlocations in view but exclude those within old
# boundaries. Only send newly uncovered scannedlocations.
query = (ScannedLocation
.select()
.where((((ScannedLocation.last_modified >= activeTime)) &
(ScannedLocation.latitude >= swLat) &
(ScannedLocation.longitude >= swLng) &
(ScannedLocation.latitude <= neLat) &
(ScannedLocation.longitude <= neLng)) &
~(((ScannedLocation.last_modified >= activeTime)) &
(ScannedLocation.latitude >= oSwLat) &
(ScannedLocation.longitude >= oSwLng) &
(ScannedLocation.latitude <= oNeLat) &
(ScannedLocation.longitude <= oNeLng)))
.dicts())
else:
query = (ScannedLocation
.select()
.where((ScannedLocation.last_modified >= activeTime) &
(ScannedLocation.latitude >= swLat) &
(ScannedLocation.longitude >= swLng) &
(ScannedLocation.latitude <= neLat) &
(ScannedLocation.longitude <= neLng))
.order_by(ScannedLocation.last_modified.asc())
.dicts())
return list(query)
# DB format of a new location.
@staticmethod
def new_loc(loc):
return {'cellid': cellid(loc),
'latitude': loc[0],
'longitude': loc[1],
'done': False,
'band1': -1,
'band2': -1,
'band3': -1,
'band4': -1,
'band5': -1,
'width': 0,
'midpoint': 0,
'last_modified': None}
# Used to update bands.
@staticmethod
def db_format(scan, band, nowms):
scan.update({'band' + str(band): nowms})
scan['done'] = reduce(lambda x, y: x and (
scan['band' + str(y)] > -1), range(1, 6), True)
return scan
# Shorthand helper for DB dict.
@staticmethod
def _q_init(scan, start, end, kind, sp_id=None):
return {'loc': scan['loc'], 'kind': kind, 'start': start, 'end': end,
'step': scan['step'], 'sp': sp_id}
@classmethod
def get_by_cellids(cls, cellids):
query = (cls
.select()
.where(cls.cellid << cellids)
.dicts())
d = {}
for sl in list(query):
key = "{}".format(sl['cellid'])
d[key] = sl
return d
@classmethod
def find_in_locs(cls, loc, locs):
key = "{}".format(cellid(loc))
return locs[key] if key in locs else cls.new_loc(loc)
# Return value of a particular scan from loc, or default dict if not found.
@classmethod
def get_by_loc(cls, loc):
query = (cls
.select()
.where(cls.cellid == cellid(loc))
.dicts())
return query[0] if len(list(query)) else cls.new_loc(loc)
# Check if spawnpoints in a list are in any of the existing
# spannedlocation records. Otherwise, search through the spawnpoint list
# and update scan_spawn_point dict for DB bulk upserting.
@classmethod
def link_spawn_points(cls, scans, initial, spawn_points, distance,
scan_spawn_point, force=False):
for cell, scan in scans.iteritems():
if initial[cell]['done'] and not force:
continue
# Difference in degrees at the equator for 70m is actually 0.00063
# degrees and gets smaller the further north or south you go
deg_at_lat = 0.0007 / math.cos(math.radians(scan['loc'][0]))
for sp in spawn_points:
if (abs(sp['latitude'] - scan['loc'][0]) > 0.0008 or
abs(sp['longitude'] - scan['loc'][1]) > deg_at_lat):
continue
if in_radius((sp['latitude'], sp['longitude']),
scan['loc'], distance):
scan_spawn_point[cell + sp['id']] = {
'spawnpoint': sp['id'],
'scannedlocation': cell}
# Return list of dicts for upcoming valid band times.
@classmethod
def linked_spawn_points(cls, cell):
# Unable to use a normal join, since MySQL produces foreignkey
# constraint errors when trying to upsert fields that are foreignkeys
# on another table
query = (SpawnPoint
.select()
.join(ScanSpawnPoint)
.join(cls)
.where(cls.cellid == cell).dicts())
return list(query)
# Return list of dicts for upcoming valid band times.
@classmethod
def get_cell_to_linked_spawn_points(cls, cellids, location_change_date):
# Get all spawnpoints from the hive's cells
sp_from_cells = (ScanSpawnPoint
.select(ScanSpawnPoint.spawnpoint)
.where(ScanSpawnPoint.scannedlocation << cellids)
.alias('spcells'))
# A new SL (new ones are created when the location changes) or
# it can be a cell from another active hive
one_sp_scan = (ScanSpawnPoint
.select(ScanSpawnPoint.spawnpoint,
fn.MAX(ScanSpawnPoint.scannedlocation).alias(
'cellid'))
.join(sp_from_cells, on=sp_from_cells.c.spawnpoint_id
== ScanSpawnPoint.spawnpoint)
.join(cls, on=(cls.cellid ==
ScanSpawnPoint.scannedlocation))
.where(((cls.last_modified >= (location_change_date)) &
(cls.last_modified > (
datetime.utcnow() - timedelta(minutes=60)))) |
(cls.cellid << cellids))
.group_by(ScanSpawnPoint.spawnpoint)
.alias('maxscan'))
# As scan locations overlap,spawnpoints can belong to up to 3 locations
# This sub-query effectively assigns each SP to exactly one location.
query = (SpawnPoint
.select(SpawnPoint, one_sp_scan.c.cellid)
.join(one_sp_scan, on=(SpawnPoint.id ==
one_sp_scan.c.spawnpoint_id))
.where(one_sp_scan.c.cellid << cellids)
.dicts())
l = list(query)
ret = {}
for item in l:
if item['cellid'] not in ret:
ret[item['cellid']] = []
ret[item['cellid']].append(item)
return ret
@staticmethod
def visible_forts(step_location):
distance = 0.45
n, e, s, w = hex_bounds(step_location, radius=distance * 1000)
for g in Gym.get_gyms(s, w, n, e).values():
if in_radius((g['latitude'], g['longitude']), step_location,
distance):
return True
for g in Pokestop.get_stops(s, w, n, e):
if in_radius((g['latitude'], g['longitude']), step_location,
distance):
return True
return False
# Return list of dicts for upcoming valid band times.
@classmethod
def get_times(cls, scan, now_date, scanned_locations):
s = cls.find_in_locs(scan['loc'], scanned_locations)
if s['done']:
return []
max = 3600 * 2 + 250 # Greater than maximum possible value.
min = {'end': max}
nowms = date_secs(now_date)
if s['band1'] == -1:
return [cls._q_init(scan, nowms, nowms + 3599, 'band')]
# Find next window.
basems = s['band1']
for i in range(2, 6):
ms = s['band' + str(i)]
# Skip bands already done.
if ms > -1:
continue
radius = 120 - s['width'] / 2
end = (basems + s['midpoint'] + radius + (i - 1) * 720 - 10) % 3600
end = end if end >= nowms else end + 3600
if end < min['end']:
min = cls._q_init(scan, end - radius * 2 + 10, end, 'band')
return [min] if min['end'] < max else []
# Checks if now falls within an unfilled band for a scanned location.
# Returns the updated scan location dict.
@classmethod
def update_band(cls, scan, now_date):
scan['last_modified'] = now_date
if scan['done']:
return scan
now_secs = date_secs(now_date)
if scan['band1'] == -1:
return cls.db_format(scan, 1, now_secs)
# Calculate if number falls in band with remaining points.
basems = scan['band1']
delta = (now_secs - basems - scan['midpoint']) % 3600
band = int(round(delta / 12 / 60.0) % 5) + 1
# Check if that band is already filled.
if scan['band' + str(band)] > -1:
return scan
# Check if this result falls within the band's 2 minute window.
offset = (delta + 1080) % 720 - 360
if abs(offset) > 120 - scan['width'] / 2:
return scan
# Find band midpoint/width.
scan = cls.db_format(scan, band, now_secs)
bts = [scan['band' + str(i)] for i in range(1, 6)]
bts = filter(lambda ms: ms > -1, bts)
bts_delta = map(lambda ms: (ms - basems) % 3600, bts)
bts_offsets = map(lambda ms: (ms + 1080) % 720 - 360, bts_delta)
min_scan = min(bts_offsets)
max_scan = max(bts_offsets)
scan['width'] = max_scan - min_scan
scan['midpoint'] = (max_scan + min_scan) / 2
return scan
@classmethod
def get_bands_filled_by_cellids(cls, cellids):
return int(cls
.select(fn.SUM(case(cls.band1, ((-1, 0),), 1)
+ case(cls.band2, ((-1, 0),), 1)
+ case(cls.band3, ((-1, 0),), 1)
+ case(cls.band4, ((-1, 0),), 1)
+ case(cls.band5, ((-1, 0),), 1))
.alias('band_count'))
.where(cls.cellid << cellids)
.scalar() or 0)
@classmethod
def reset_bands(cls, scan_loc):
scan_loc['done'] = False
scan_loc['last_modified'] = datetime.utcnow()
for i in range(1, 6):
scan_loc['band' + str(i)] = -1
@classmethod
def select_in_hex(cls, locs):
if locs == []:
log.warning('CAUGHT THE BLANK LOCATION, DON\'T TRY AND GET SPAWNS')
in_hex = []
return in_hex
# There should be a way to delegate this to SpawnPoint.select_in_hex,
# but w/e.
cells = []
for i, e in enumerate(locs):
cells.append(cellid(e[1]))
# Get all spawns for the locations.
sp = list(cls
.select()
.where(cls.cellid << cells)
.dicts())
# For each spawn work out if it is in the hex (clipping the diagonals).
in_hex = []
for spawn in sp:
in_hex.append(spawn)
return in_hex
class MainWorker(BaseModel):
worker_name = CharField(primary_key=True, max_length=50)
message = TextField(null=True, default="")
method = CharField(max_length=50)
last_modified = DateTimeField(index=True)
accounts_working = IntegerField()
accounts_captcha = IntegerField()
accounts_failed = IntegerField()
@staticmethod
def get_total_captchas():
return MainWorker.select(fn.SUM(MainWorker.accounts_captcha)).scalar()
@staticmethod
def get_account_stats():
account_stats = (MainWorker
.select(fn.SUM(MainWorker.accounts_working),
fn.SUM(MainWorker.accounts_captcha),
fn.SUM(MainWorker.accounts_failed))
.scalar(as_tuple=True))
dict = {'working': 0, 'captcha': 0, 'failed': 0}
if account_stats[0] is not None:
dict = {'working': int(account_stats[0]),
'captcha': int(account_stats[1]),
'failed': int(account_stats[2])}
return dict
class WorkerStatus(BaseModel):
username = CharField(primary_key=True, max_length=50)
worker_name = CharField(index=True, max_length=50)
success = IntegerField()
fail = IntegerField()
no_items = IntegerField()
skip = IntegerField()
captcha = IntegerField()
last_modified = DateTimeField(index=True)
message = CharField(max_length=255)
last_scan_date = DateTimeField(index=True)
latitude = DoubleField(null=True)
longitude = DoubleField(null=True)
@staticmethod
def db_format(status, name='status_worker_db'):
status['worker_name'] = status.get('worker_name', name)
return {'username': status['username'],
'worker_name': status['worker_name'],
'success': status['success'],
'fail': status['fail'],
'no_items': status['noitems'],
'skip': status['skip'],
'captcha': status['captcha'],
'last_modified': datetime.utcnow(),
'message': status['message'],
'last_scan_date': status.get('last_scan_date',
datetime.utcnow()),
'latitude': status.get('latitude', None),
'longitude': status.get('longitude', None)}
@staticmethod
def get_recent():
query = (WorkerStatus
.select()
.where((WorkerStatus.last_modified >=
(datetime.utcnow() - timedelta(minutes=5))))
.order_by(WorkerStatus.username)
.dicts())
status = []
for s in query:
status.append(s)
return status
@staticmethod
def get_worker(username, loc=False):
query = (WorkerStatus
.select()
.where((WorkerStatus.username == username))
.dicts())
# Sometimes is appears peewee is slow to load, and this produces
# an exception. Retry after a second to give peewee time to load.
while True:
try:
result = query[0] if len(query) else {
'username': username,
'success': 0,
'fail': 0,
'no_items': 0,
'skip': 0,
'last_modified': datetime.utcnow(),
'message': 'New account {} loaded'.format(username),
'last_scan_date': datetime.utcnow(),
'latitude': loc[0] if loc else None,
'longitude': loc[1] if loc else None
}
break
except Exception as e:
log.error('Exception in get_worker under account {}. '
'Exception message: {}'.format(username, repr(e)))
traceback.print_exc(file=sys.stdout)
time.sleep(1)
return result
class SpawnPoint(BaseModel):
id = CharField(primary_key=True, max_length=50)
latitude = DoubleField()
longitude = DoubleField()
last_scanned = DateTimeField(index=True)
# kind gives the four quartiles of the spawn, as 's' for seen
# or 'h' for hidden. For example, a 30 minute spawn is 'hhss'.
kind = CharField(max_length=4, default='hhhs')
# links shows whether a Pokemon encounter id changes between quartiles or
# stays the same. Both 1x45 and 1x60h3 have the kind of 'sssh', but the
# different links shows when the encounter id changes. Same encounter id
# is shared between two quartiles, links shows a '+'. A different
# encounter id between two quartiles is a '-'.
#
# For the hidden times, an 'h' is used. Until determined, '?' is used.
# Note index is shifted by a half. links[0] is the link between
# kind[0] and kind[1] and so on. links[3] is the link between
# kind[3] and kind[0]
links = CharField(max_length=4, default='????')
# Count consecutive times spawn should have been seen, but wasn't.
# If too high, will not be scheduled for review, and treated as inactive.
missed_count = IntegerField(default=0)
# Next 2 fields are to narrow down on the valid TTH window.
# Seconds after the hour of the latest Pokemon seen time within the hour.
latest_seen = SmallIntegerField()
# Seconds after the hour of the earliest time Pokemon wasn't seen after an
# appearance.
earliest_unseen = SmallIntegerField()
class Meta:
indexes = ((('latitude', 'longitude'), False),)
constraints = [Check('earliest_unseen >= 0'),
Check('earliest_unseen < 3600'),
Check('latest_seen >= 0'), Check('latest_seen < 3600')]
# Returns the spawnpoint dict from ID, or a new dict if not found.
@classmethod
def get_by_id(cls, id, latitude=0, longitude=0):
query = (cls
.select()
.where(cls.id == id)
.dicts())
return query[0] if query else {
'id': id,
'latitude': latitude,
'longitude': longitude,
'last_scanned': None, # Null value used as new flag.
'kind': 'hhhs',
'links': '????',
'missed_count': 0,
'latest_seen': None,
'earliest_unseen': None
}
# Confirm if tth has been found.
@staticmethod
def tth_found(sp):
# Fully indentified if no '?' in links and
# latest_seen == earliest_unseen.
return sp['latest_seen'] == sp['earliest_unseen']
# Return [start, end] in seconds after the hour for the spawn, despawn
# time of a spawnpoint.
@classmethod
def start_end(cls, sp, spawn_delay=0, links=False):
links_arg = links
links = links if links else str(sp['links'])
if links == '????': # Clean up for old data.
links = str(sp['kind'].replace('s', '?'))
# Make some assumptions if link not fully identified.
if links.count('-') == 0:
links = links[:-1] + '-'
links = links.replace('?', '+')
links = links[:-1] + '-'
plus_or_minus = links.index(
'+') if links.count('+') else links.index('-')
start = sp['earliest_unseen'] - (4 - plus_or_minus) * 900 + spawn_delay
no_tth_adjust = 60 if not links_arg and not cls.tth_found(sp) else 0
end = sp['latest_seen'] - (3 - links.index('-')) * 900 + no_tth_adjust
return [start % 3600, end % 3600]
# Return a list of dicts with the next spawn times.
@classmethod
def get_times(cls, cell, scan, now_date, scan_delay,
cell_to_linked_spawn_points, sp_by_id):
l = []
now_secs = date_secs(now_date)
linked_spawn_points = (cell_to_linked_spawn_points[cell]
if cell in cell_to_linked_spawn_points else [])
for sp in linked_spawn_points:
if sp['missed_count'] > 5:
continue
endpoints = SpawnPoint.start_end(sp, scan_delay)
cls.add_if_not_scanned('spawn', l, sp, scan,
endpoints[0], endpoints[1], now_date,
now_secs, sp_by_id)
# Check to see if still searching for valid TTH.
if cls.tth_found(sp):
continue
# Add a spawnpoint check between latest_seen and earliest_unseen.
start = sp['latest_seen']
end = sp['earliest_unseen']
# So if the gap between start and end < 89 seconds make the gap
# 89 seconds
if ((end > start and end - start < 89) or
(start > end and (end + 3600) - start < 89)):
end = (start + 89) % 3600
# So we move the search gap on 45 to within 45 and 89 seconds from
# the last scan. TTH appears in the last 90 seconds of the Spawn.
start = sp['latest_seen'] + 45
cls.add_if_not_scanned('TTH', l, sp, scan,
start, end, now_date, now_secs, sp_by_id)
return l
@classmethod
def add_if_not_scanned(cls, kind, l, sp, scan, start,
end, now_date, now_secs, sp_by_id):
# Make sure later than now_secs.
while end < now_secs:
start, end = start + 3600, end + 3600
# Ensure start before end.
while start > end:
start -= 3600
while start < 0:
start, end = start + 3600, end + 3600
last_scanned = sp_by_id[sp['id']]['last_scanned']
if ((now_date - last_scanned).total_seconds() > now_secs - start):
l.append(ScannedLocation._q_init(scan, start, end, kind, sp['id']))
# Given seconds after the hour and a spawnpoint dict, return which quartile
# of the spawnpoint the secs falls in.
@staticmethod
def get_quartile(secs, sp):
return int(((secs - sp['earliest_unseen'] + 15 * 60 + 3600 - 1) %
3600) / 15 / 60)
@classmethod
def select_in_hex_by_cellids(cls, cellids, location_change_date):
# Get all spawnpoints from the hive's cells
sp_from_cells = (ScanSpawnPoint
.select(ScanSpawnPoint.spawnpoint)
.where(ScanSpawnPoint.scannedlocation << cellids)
.alias('spcells'))
# Allocate a spawnpoint to one cell only, this can either be
# A new SL (new ones are created when the location changes) or
# it can be a cell from another active hive
one_sp_scan = (ScanSpawnPoint
.select(ScanSpawnPoint.spawnpoint,
fn.MAX(ScanSpawnPoint.scannedlocation).alias(
'Max_ScannedLocation_id'))
.join(sp_from_cells, on=sp_from_cells.c.spawnpoint_id
== ScanSpawnPoint.spawnpoint)
.join(
ScannedLocation,
on=(ScannedLocation.cellid
== ScanSpawnPoint.scannedlocation))
.where(((ScannedLocation.last_modified
>= (location_change_date)) & (
ScannedLocation.last_modified > (
datetime.utcnow() - timedelta(minutes=60)))) |
(ScannedLocation.cellid << cellids))
.group_by(ScanSpawnPoint.spawnpoint)
.alias('maxscan'))
query = (cls
.select(cls)
.join(one_sp_scan,
on=(one_sp_scan.c.spawnpoint_id == cls.id))
.where(one_sp_scan.c.Max_ScannedLocation_id << cellids)
.dicts())
in_hex = []
for spawn in list(query):
in_hex.append(spawn)
return in_hex
@classmethod
def select_in_hex_by_location(cls, center, steps):
R = 6378.1 # KM radius of the earth
hdist = ((steps * 120.0) - 50.0) / 1000.0
n, e, s, w = hex_bounds(center, steps)
# Get all spawns in that box.
sp = list(cls
.select()
.where((cls.latitude <= n) &
(cls.latitude >= s) &
(cls.longitude >= w) &
(cls.longitude <= e))
.dicts())
# For each spawn work out if it is in the hex (clipping the diagonals).
in_hex = []
for spawn in sp:
# Get the offset from the center of each spawn in km.
offset = [math.radians(spawn['latitude'] - center[0]) * R,
math.radians(spawn['longitude'] - center[1]) *
(R * math.cos(math.radians(center[0])))]
# Check against the 4 lines that make up the diagonals.
if (offset[1] + (offset[0] * 0.5)) > hdist: # Too far NE
continue
if (offset[1] - (offset[0] * 0.5)) > hdist: # Too far SE
continue
if ((offset[0] * 0.5) - offset[1]) > hdist: # Too far NW
continue
if ((0 - offset[1]) - (offset[0] * 0.5)) > hdist: # Too far SW
continue
# If it gets to here it's a good spawn.
in_hex.append(spawn)
return in_hex
class ScanSpawnPoint(BaseModel):
scannedlocation = ForeignKeyField(ScannedLocation, null=True)
spawnpoint = ForeignKeyField(SpawnPoint, null=True)
class Meta:
primary_key = CompositeKey('spawnpoint', 'scannedlocation')
class SpawnpointDetectionData(BaseModel):
id = CharField(primary_key=True, max_length=54)
# Removed ForeignKeyField since it caused MySQL issues.
encounter_id = CharField(max_length=54)
# Removed ForeignKeyField since it caused MySQL issues.
spawnpoint_id = CharField(max_length=54, index=True)
scan_time = DateTimeField()
tth_secs = SmallIntegerField(null=True)
@staticmethod
def set_default_earliest_unseen(sp):
sp['earliest_unseen'] = (sp['latest_seen'] + 15 * 60) % 3600
@classmethod
def classify(cls, sp, scan_loc, now_secs, sighting=None):
# Get past sightings.
query = list(cls.select()
.where(cls.spawnpoint_id == sp['id'])
.order_by(cls.scan_time.asc())
.dicts())
if sighting:
query.append(sighting)
tth_found = False
for s in query:
if s['tth_secs'] is not None:
tth_found = True
tth_secs = (s['tth_secs'] - 1) % 3600
# To reduce CPU usage, give an intial reading of 15 minute spawns if
# not done with initial scan of location.
if not scan_loc['done']:
# We only want to reset a SP if it is new and not due the
# location changing (which creates new Scannedlocations)
if not tth_found:
sp['kind'] = 'hhhs'
if not sp['earliest_unseen']:
sp['latest_seen'] = now_secs
cls.set_default_earliest_unseen(sp)
elif clock_between(sp['latest_seen'], now_secs,
sp['earliest_unseen']):
sp['latest_seen'] = now_secs
return
# Make a record of links, so we can reset earliest_unseen
# if it changes.
old_kind = str(sp['kind'])
# Make a sorted list of the seconds after the hour.
seen_secs = sorted(map(lambda x: date_secs(x['scan_time']), query))
# Include and entry for the TTH if it found
if tth_found:
seen_secs.append(tth_secs)
seen_secs.sort()
# Add the first seen_secs to the end as a clock wrap around.
if seen_secs:
seen_secs.append(seen_secs[0] + 3600)
# Make a list of gaps between sightings.
gap_list = [seen_secs[i + 1] - seen_secs[i]
for i in range(len(seen_secs) - 1)]
max_gap = max(gap_list)
# An hour minus the largest gap in minutes gives us the duration the
# spawn was there. Round up to the nearest 15 minute interval for our
# current best guess duration.
duration = (int((60 - max_gap / 60.0) / 15) + 1) * 15
# If the second largest gap is larger than 15 minutes, then there are
# two gaps greater than 15 minutes. It must be a double spawn.
if len(gap_list) > 4 and sorted(gap_list)[-2] > 900:
sp['kind'] = 'hshs'
sp['links'] = 'h?h?'
else:
# Convert the duration into a 'hhhs', 'hhss', 'hsss', 'ssss' string
# accordingly. 's' is for seen, 'h' is for hidden.
sp['kind'] = ''.join(
['s' if i > (3 - duration / 15) else 'h' for i in range(0, 4)])
# Assume no hidden times.
sp['links'] = sp['kind'].replace('s', '?')
if sp['kind'] != 'ssss':
if (not sp['earliest_unseen'] or
sp['earliest_unseen'] != sp['latest_seen'] or
not tth_found):
# New latest_seen will be just before max_gap.
sp['latest_seen'] = seen_secs[gap_list.index(max_gap)]
# if we don't have a earliest_unseen yet or if the kind of
# spawn has changed, reset to latest_seen + 14 minutes.
if not sp['earliest_unseen'] or sp['kind'] != old_kind:
cls.set_default_earliest_unseen(sp)
return
# Only ssss spawns from here below.
sp['links'] = '+++-'
if sp['earliest_unseen'] == sp['latest_seen']:
return
# Make a sight_list of dicts:
# {date: first seen time,
# delta: duration of sighting,
# same: whether encounter ID was same or different over that time}
#
# For 60 minute spawns ('ssss'), the largest gap doesn't give the
# earliest spawnpoint because a Pokemon is always there. Use the union
# of all intervals where the same encounter ID was seen to find the
# latest_seen. If a different encounter ID was seen, then the
# complement of that interval was the same ID, so union that
# complement as well.
sight_list = [{'date': query[i]['scan_time'],
'delta': query[i + 1]['scan_time'] -
query[i]['scan_time'],
'same': query[i + 1]['encounter_id'] ==
query[i]['encounter_id']
}
for i in range(len(query) - 1)
if query[i + 1]['scan_time'] - query[i]['scan_time'] <
timedelta(hours=1)
]
start_end_list = []
for s in sight_list:
if s['same']:
# Get the seconds past the hour for start and end times.
start = date_secs(s['date'])
end = (start + int(s['delta'].total_seconds())) % 3600
else:
# Convert diff range to same range by taking the clock
# complement.
start = date_secs(s['date'] + s['delta']) % 3600
end = date_secs(s['date'])
start_end_list.append([start, end])
# Take the union of all the ranges.
while True:
# union is list of unions of ranges with the same encounter id.
union = []
for start, end in start_end_list:
if not union:
union.append([start, end])
continue
# Cycle through all ranges in union, since it might overlap
# with any of them.
for u in union:
if clock_between(u[0], start, u[1]):
u[1] = end if not(clock_between(
u[0], end, u[1])) else u[1]
elif clock_between(u[0], end, u[1]):
u[0] = start if not(clock_between(
u[0], start, u[1])) else u[0]
elif union.count([start, end]) == 0:
union.append([start, end])
# Are no more unions possible?
if union == start_end_list:
break
start_end_list = union # Make another pass looking for unions.
# If more than one disparate union, take the largest as our starting
# point.
union = reduce(lambda x, y: x if (x[1] - x[0]) % 3600 >
(y[1] - y[0]) % 3600 else y, union, [0, 3600])
sp['latest_seen'] = union[1]
sp['earliest_unseen'] = union[0]
log.info('1x60: appear %d, despawn %d, duration: %d min.',
union[0], union[1], ((union[1] - union[0]) % 3600) / 60)
# Expand the seen times for 30 minute spawnpoints based on scans when spawn
# wasn't there. Return true if spawnpoint dict changed.
@classmethod
def unseen(cls, sp, now_secs):
# Return if we already have a tth.
if sp['latest_seen'] == sp['earliest_unseen']:
return False
# If now_secs is later than the latest seen return.
if not clock_between(sp['latest_seen'], now_secs,
sp['earliest_unseen']):
return False
sp['earliest_unseen'] = now_secs
return True
# Expand a 30 minute spawn with a new seen point based on which endpoint it
# is closer to. Return true if sp changed.
@classmethod
def clock_extend(cls, sp, new_secs):
# Check if this is a new earliest time.
if clock_between(sp['earliest_seen'], new_secs, sp['latest_seen']):
return False
# Extend earliest or latest seen depending on which is closer to the
# new point.
if (secs_between(new_secs, sp['earliest_seen']) <
secs_between(new_secs, sp['latest_seen'])):
sp['earliest_seen'] = new_secs
else:
sp['latest_seen'] = new_secs
return True
class Versions(flaskDb.Model):
key = CharField()
val = SmallIntegerField()
class Meta:
primary_key = False
class GymMember(BaseModel):
gym_id = CharField(index=True)
pokemon_uid = CharField(index=True)
last_scanned = DateTimeField(default=datetime.utcnow, index=True)
class Meta:
primary_key = False
class GymPokemon(BaseModel):
pokemon_uid = CharField(primary_key=True, max_length=50)
pokemon_id = SmallIntegerField()
cp = SmallIntegerField()
trainer_name = CharField(index=True)
num_upgrades = SmallIntegerField(null=True)
move_1 = SmallIntegerField(null=True)
move_2 = SmallIntegerField(null=True)
height = FloatField(null=True)
weight = FloatField(null=True)
stamina = SmallIntegerField(null=True)
stamina_max = SmallIntegerField(null=True)
cp_multiplier = FloatField(null=True)
additional_cp_multiplier = FloatField(null=True)
iv_defense = SmallIntegerField(null=True)
iv_stamina = SmallIntegerField(null=True)
iv_attack = SmallIntegerField(null=True)
last_seen = DateTimeField(default=datetime.utcnow)
class Trainer(BaseModel):
name = CharField(primary_key=True, max_length=50)
team = SmallIntegerField()
level = SmallIntegerField()
last_seen = DateTimeField(default=datetime.utcnow)
class GymDetails(BaseModel):
gym_id = CharField(primary_key=True, max_length=50)
name = CharField()
description = TextField(null=True, default="")
url = CharField()
last_scanned = DateTimeField(default=datetime.utcnow)
class Token(flaskDb.Model):
token = TextField()
last_updated = DateTimeField(default=datetime.utcnow, index=True)
@staticmethod
def get_valid(limit=15):
# Make sure we don't grab more than we can process
if limit > 15:
limit = 15
valid_time = datetime.utcnow() - timedelta(seconds=30)
token_ids = []
tokens = []
try:
with flaskDb.database.transaction():
query = (Token
.select()
.where(Token.last_updated > valid_time)
.order_by(Token.last_updated.asc())
.limit(limit))
for t in query:
token_ids.append(t.id)
tokens.append(t.token)
if tokens:
log.debug('Retrived Token IDs: {}'.format(token_ids))
result = DeleteQuery(Token).where(
Token.id << token_ids).execute()
log.debug('Deleted {} tokens.'.format(result))
except OperationalError as e:
log.error('Failed captcha token transactional query: {}'.format(e))
return tokens
def hex_bounds(center, steps=None, radius=None):
# Make a box that is (70m * step_limit * 2) + 70m away from the
# center point. Rationale is that you need to travel.
sp_dist = 0.07 * (2 * steps + 1) if steps else radius
n = get_new_coords(center, sp_dist, 0)[0]
e = get_new_coords(center, sp_dist, 90)[1]
s = get_new_coords(center, sp_dist, 180)[0]
w = get_new_coords(center, sp_dist, 270)[1]
return (n, e, s, w)
def geofence(step_location, geofence_file, forbidden=False):
geofence = []
with open(geofence_file) as f:
for line in f:
if len(line.strip()) == 0 or line.startswith('#'):
continue
geofence.append(literal_eval(line.strip()))
# if forbidden:
# log.info('Loaded %d geofence-forbidden coordinates. ' +
# 'Applying...', len(geofence))
# else:
# log.info('Loaded %d geofence coordinates. Applying...',
# len(geofence))
# log.info(geofence)
p = Path(geofence)
step_location_geofenced = []
result_x, result_y, result_z = step_location
if p.contains_point([step_location[0], step_location[1]]) ^ forbidden:
step_location_geofenced.append((result_x, result_y, result_z))
# log.warning('FOUND IN THE GEOFENCE, LURING: %s, %s', result_x, result_y)
return step_location_geofenced
def check_login(args, account, api, position, proxy_url):
# Logged in? Enough time left? Cool!
if api._auth_provider and api._auth_provider._ticket_expire:
remaining_time = api._auth_provider._ticket_expire / 1000 - time.time()
if remaining_time > 60:
log.debug(
'Credentials remain valid for another %f seconds.',
remaining_time)
return
# Try to login. Repeat a few times, but don't get stuck here.
i = 0
while i < args.login_retries:
try:
if proxy_url:
api.set_authentication(
provider=account['auth_service'],
username=account['username'],
password=account['password'],
proxy_config={'http': proxy_url, 'https': proxy_url})
else:
api.set_authentication(
provider=account['auth_service'],
username=account['username'],
password=account['password'])
break
except:
if i >= args.login_retries:
raise TooManyLoginAttempts('Exceeded login attempts.')
else:
i += 1
log.error(
'Failed to login to Pokemon Go with account %s. ' +
'Trying again in %g seconds.',
account['username'], args.login_delay)
time.sleep(args.login_delay)
log.debug('Login for account %s successful.', account['username'])
time.sleep(args.scan_delay)
def get_encounter_details(p, step_location, key_scheduler, encountering_lvl):
# print(p)
# print(step_location)
# print(key_scheduler)
# print(encountering_lvl)
cp = None
time.sleep(args.encounter_delay)
device_info = generate_device_info()
api = PGoApi(device_info=device_info)
if args.proxy is not None and len(args.proxy) > 0:
api.set_proxy({'http': args.proxy[0], 'https': args.proxy[0]})
key = key_scheduler.next()
api.activate_hash_server(key)
api.set_position(*step_location)
proxyurl = False
if args.proxy is not None and len(args.proxy) > 0:
proxyurl = args.proxy[0]
if encountering_lvl == 30:
account = cp_account_queue.get()
elif encountering_lvl == 25:
account = iv_account_queue.get()
# print(account)
log.info(
'Requesting encounter details for Pokemon (ID #%s) using account %s',
str(p['pokemon_data']['pokemon_id']), account['username'])
# Think about what happens when login fails.
# Right now its silently discarded from the queue
check_login(args, account, api, step_location, proxyurl)
try:
req = api.create_request()
encounter_result = req.encounter(
encounter_id=p['encounter_id'],
spawn_point_id=p['spawn_point_id'],
player_latitude=step_location[0],
player_longitude=step_location[1])
encounter_result = req.check_challenge()
encounter_result = req.get_hatched_eggs()
encounter_result = req.get_inventory()
encounter_result = req.check_awarded_badges()
encounter_result = req.download_settings()
encounter_result = req.get_buddy_walked()
encounter_result = req.call()
if encounter_result is not None:
captcha_url = encounter_result['responses']['CHECK_CHALLENGE'][
'challenge_url'] # Check for captcha
if len(captcha_url) > 1: # Throw warning but finish parsing
log.warning('Account %s encountered a reCaptcha.', account)
elif 'wild_pokemon' in encounter_result['responses']['ENCOUNTER']:
pokemon_info = encounter_result[
'responses']['ENCOUNTER']['wild_pokemon']['pokemon_data']
log.info(
'Encountered Pokemon #%s for details.',
p['pokemon_data']['pokemon_id'])
else:
log.warning(
'Pokemon #%s could not be encountered for details.',
p['pokemon_data']['pokemon_id'])
except Exception, e:
log.error('Unexpected error trying to get CP: %s', repr(e))
if encountering_lvl is 30:
account = cp_account_queue.put(account)
elif encountering_lvl is 25:
account = iv_account_queue.put(account)
return pokemon_info
# todo: this probably shouldn't _really_ be in "models" anymore, but w/e.
def parse_map(args, map_dict, step_location, db_update_queue, wh_update_queue,
api, now_date, account, key_scheduler):
pokemon = {}
pokestops = {}
gyms = {}
skipped = 0
stopsskipped = 0
alreadyLeveled = False
level = 0
USELESS = [101, 102, 103, 104, 201, 202, 602, 603, 604, 701, 702, 703, 704, 705]
dittomons = [16, 19, 41, 129, 161, 163, 193]
forbidden = False
forts = []
forts_count = 0
wild_pokemon = []
wild_pokemon_count = 0
nearby_pokemon = 0
spawn_points = {}
scan_spawn_points = {}
sightings = {}
new_spawn_points = []
sp_id_list = []
captcha_url = ''
# Consolidate the individual lists in each cell into two lists of Pokemon
# and a list of forts.
cells = map_dict['responses']['GET_MAP_OBJECTS']['map_cells']
for i, cell in enumerate(cells):
# If we have map responses then use the time from the request
if i == 0:
now_date = datetime.utcfromtimestamp(
cell['current_timestamp_ms'] / 1000)
nearby_pokemon += len(cell.get('nearby_pokemons', []))
# Parse everything for stats (counts). Future enhancement -- we don't
# necessarily need to know *how many* forts/wild/nearby were found but
# we'd like to know whether or not *any* were found to help determine
# if a scan was actually bad.
if config['parse_pokemon']:
wild_pokemon += cell.get('wild_pokemons', [])
if config['parse_pokestops'] or config['parse_gyms']:
forts += cell.get('forts', [])
# Update count regardless of Pokémon parsing or not, we need the count.
# Length is O(1).
wild_pokemon_count += len(cell.get('wild_pokemons', []))
forts_count += len(cell.get('forts', []))
now_secs = date_secs(now_date)
if wild_pokemon:
wild_pokemon_count = len(wild_pokemon)
if forts:
forts_count = len(forts)
totalDisks = 0
for items in map_dict['responses']['GET_INVENTORY']['inventory_delta']['inventory_items']:
inventory_item_data = items['inventory_item_data']
if 'player_stats' in inventory_item_data:
level = inventory_item_data['player_stats']['level']
currentExp = inventory_item_data['player_stats']['experience']
nextLevel = inventory_item_data['player_stats']['next_level_xp']
if 'item' in inventory_item_data and inventory_item_data['item']['item_id'] == 501:
totalDisks = inventory_item_data['item'].get('count', 0)
log.debug('@@@LURE@@@ FOUND LURES: %s IN TOTAL', totalDisks)
del map_dict['responses']['GET_INVENTORY']
now_secs = date_secs(now_date)
# If there are no wild or nearby Pokemon . . .
if not wild_pokemon and not nearby_pokemon:
# . . . and there are no gyms/pokestops then it's unusable/bad.
if not forts:
log.warning('Bad scan. Parsing found absolutely nothing.')
log.info('Common causes: captchas or IP bans.')
else:
# No wild or nearby Pokemon but there are forts. It's probably
# a speed violation.
log.warning('No nearby or wild Pokemon but there are visible gyms '
'or pokestops. Possible speed violation.')
scan_loc = ScannedLocation.get_by_loc(step_location)
done_already = scan_loc['done']
ScannedLocation.update_band(scan_loc, now_date)
just_completed = not done_already and scan_loc['done']
if wild_pokemon and config['parse_pokemon']:
encounter_ids = [b64encode(str(p['encounter_id']))
for p in wild_pokemon]
# For all the wild Pokemon we found check if an active Pokemon is in
# the database.
query = (Pokemon
.select(Pokemon.encounter_id, Pokemon.spawnpoint_id)
.where((Pokemon.disappear_time >= now_date) &
(Pokemon.encounter_id << encounter_ids))
.dicts())
# Store all encounter_ids and spawnpoint_ids for the Pokemon in query.
# All of that is needed to make sure it's unique.
encountered_pokemon = [
(p['encounter_id'], p['spawnpoint_id']) for p in query]
for p in wild_pokemon:
sp = SpawnPoint.get_by_id(p['spawn_point_id'], p[
'latitude'], p['longitude'])
spawn_points[p['spawn_point_id']] = sp
sp['missed_count'] = 0
sighting = {
'id': b64encode(str(p['encounter_id'])) + '_' + str(now_secs),
'encounter_id': b64encode(str(p['encounter_id'])),
'spawnpoint_id': p['spawn_point_id'],
'scan_time': now_date,
'tth_secs': None
}
# Keep a list of sp_ids to return.
sp_id_list.append(p['spawn_point_id'])
# time_till_hidden_ms was overflowing causing a negative integer.
# It was also returning a value above 3.6M ms.
if 0 < p['time_till_hidden_ms'] < 3600000:
d_t_secs = date_secs(datetime.utcfromtimestamp(
(p['last_modified_timestamp_ms'] +
p['time_till_hidden_ms']) / 1000.0))
if (sp['latest_seen'] != sp['earliest_unseen'] or
not sp['last_scanned']):
log.info('TTH found for spawnpoint %s.', sp['id'])
sighting['tth_secs'] = d_t_secs
# Only update when TTH is seen for the first time.
# Just before Pokemon migrations, Niantic sets all TTH
# to the exact time of the migration, not the normal
# despawn time.
sp['latest_seen'] = d_t_secs
sp['earliest_unseen'] = d_t_secs
scan_spawn_points[scan_loc['cellid'] + sp['id']] = {
'spawnpoint': sp['id'],
'scannedlocation': scan_loc['cellid']}
if not sp['last_scanned']:
log.info('New Spawn Point found.')
new_spawn_points.append(sp)
# If we found a new spawnpoint after the location was already
# fully scanned then either it's new, or we had a bad scan.
# Either way, rescan the location.
if scan_loc['done'] and not just_completed:
log.warning('Location was fully scanned, and yet a brand '
'new spawnpoint found.')
log.warning('Redoing scan of this location to identify '
'new spawnpoint.')
ScannedLocation.reset_bands(scan_loc)
if (not SpawnPoint.tth_found(sp) or sighting['tth_secs'] or
not scan_loc['done'] or just_completed):
SpawnpointDetectionData.classify(sp, scan_loc, now_secs,
sighting)
sightings[p['encounter_id']] = sighting
sp['last_scanned'] = datetime.utcfromtimestamp(
p['last_modified_timestamp_ms'] / 1000.0)
if ((b64encode(str(p['encounter_id'])), p['spawn_point_id'])
in encountered_pokemon):
# If Pokemon has been encountered before don't process it.
skipped += 1
continue
start_end = SpawnPoint.start_end(sp, 1)
seconds_until_despawn = (start_end[1] - now_secs) % 3600
disappear_time = now_date + \
timedelta(seconds=seconds_until_despawn)
pid = p['pokemon_data']['pokemon_id']
printPokemon(pid, p[
'latitude'], p['longitude'], disappear_time)
# Scan for IVs and moves.
encounter_result = None
if (args.encounter and (p['pokemon_data']['pokemon_id']
in args.encounter_whitelist or
p['pokemon_data']['pokemon_id']
not in args.encounter_blacklist and
not args.encounter_whitelist)):
time.sleep(args.encounter_delay)
# Setup encounter request envelope.
req = api.create_request()
encounter_result = req.encounter(
encounter_id=p['encounter_id'],
spawn_point_id=p['spawn_point_id'],
player_latitude=step_location[0],
player_longitude=step_location[1])
req.check_challenge()
req.get_hatched_eggs()
req.get_inventory()
req.check_awarded_badges()
req.download_settings()
req.get_buddy_walked()
encounter_result = req.call()
captcha_url = encounter_result['responses']['CHECK_CHALLENGE'][
'challenge_url'] # Check for captcha
if len(captcha_url) > 1: # Throw warning but finish parsing
log.warning('Account encountered a reCaptcha.')
previous_id = p['pokemon_data']['pokemon_id']
pokemon[p['encounter_id']] = {
'encounter_id': b64encode(str(p['encounter_id'])),
'spawnpoint_id': p['spawn_point_id'],
'pokemon_id': p['pokemon_data']['pokemon_id'],
'latitude': p['latitude'],
'longitude': p['longitude'],
'disappear_time': disappear_time,
'individual_attack': None,
'individual_defense': None,
'individual_stamina': None,
'move_1': None,
'move_2': None,
'height': None,
'weight': None,
'gender': None,
'form': None,
'cp': None,
'cp_multiplier': None,
}
if (encounter_result is not None and 'wild_pokemon'
in encounter_result['responses']['ENCOUNTER']):
pokemon_info = encounter_result['responses'][
'ENCOUNTER']['wild_pokemon']['pokemon_data']
pokemon[p['encounter_id']].update({
'gender': pokemon_info['pokemon_display']['gender'],
})
# Check for Unown's alphabetic character
if pokemon_info['pokemon_id'] == 201:
pokemon[p['encounter_id']]['form'] = pokemon_info[
'pokemon_display'].get('form', None)
if int(level) > 24:
pokemon[p['encounter_id']].update({
'individual_attack': pokemon_info.get(
'individual_attack', 0),
'individual_defense': pokemon_info.get(
'individual_defense', 0),
'individual_stamina': pokemon_info.get(
'individual_stamina', 0),
'move_1': pokemon_info['move_1'],
'move_2': pokemon_info['move_2'],
'height': pokemon_info['height_m'],
'weight': pokemon_info['weight_kg'],
})
if int(level) > 29:
pokemon[p['encounter_id']].update({
'individual_attack': pokemon_info.get(
'individual_attack', 0),
'individual_defense': pokemon_info.get(
'individual_defense', 0),
'individual_stamina': pokemon_info.get(
'individual_stamina', 0),
'move_1': pokemon_info['move_1'],
'move_2': pokemon_info['move_2'],
'height': pokemon_info['height_m'],
'weight': pokemon_info['weight_kg'],
'cp': pokemon_info.get('cp', None),
'cp_multiplier': pokemon_info.get('cp_multiplier', None),
})
pokeball_count = 0
greatball_count = 0
ultraball_count = 0
inventory = encounter_result['responses']['GET_INVENTORY']['inventory_delta']['inventory_items']
for item in inventory:
inventory_item_data = item['inventory_item_data']
if not inventory_item_data:
continue
if 'item' in inventory_item_data and inventory_item_data['item']['item_id'] is 1:
pokeball_count = inventory_item_data['item'].get('count', 0)
# log.info('@@@INVENTORY@@@ there are %s pokeballs', pokeball_count)
elif 'item' in inventory_item_data and inventory_item_data['item']['item_id'] is 2:
greatball_count = inventory_item_data['item'].get('count', 0)
# log.info('@@@INVENTORY@@@ there are %s pokeballs', greatball_count)
elif 'item' in inventory_item_data and inventory_item_data['item']['item_id'] is 3:
ultraball_count = inventory_item_data['item'].get('count', 0)
# log.info('@@@INVENTORY@@@ there are %s pokeballs', ultraball_count)
if ultraball_count == 0 and greatball_count == 0 and pokeball_count == 0:
log.info('***CATCHING DUDES***No balls! Not gonna try and catch')
catch_pid = 'blueballs'
# Now catch it if it's a ditto-mon (16, 19, 21, 41, 129)
catch_pid = None
if args.ditto is True and int(level) < int(args.level_cap):
if p['pokemon_data']['pokemon_id'] in dittomons:
log.info('***CATCHING DUDES***Ditto pokemon found, catching - EncID:%s', b64encode(str(p['encounter_id'])))
if pokeball_count != 0:
current_ball = 1
else:
if greatball_count != 0:
current_ball = 2
else:
current_ball = 3
while catch_pid is None:
time.sleep(2.10)
random_throw = 1.5 + 0.25 * random()
random_spin = 0.8 + 0.1 * random()
req = api.create_request()
catch_result = req.check_challenge()
catch_result = req.get_hatched_eggs()
catch_result = req.get_inventory()
catch_result = req.check_awarded_badges()
catch_result = req.download_settings()
catch_result = req.get_buddy_walked()
catch_result = req.catch_pokemon(encounter_id=p['encounter_id'],
pokeball=current_ball,
normalized_reticle_size=random_throw,
spawn_point_id=p['spawn_point_id'],
hit_pokemon=1,
spin_modifier=random_spin,
normalized_hit_position=1.0)
catch_result = req.call()
# REMEMBER TO CHECK FOR CAPTCHAS WITH EVERY REQUEST
captcha_url = catch_result['responses']['CHECK_CHALLENGE']['challenge_url']
if len(captcha_url) > 1:
log.info('fuck, captcha\'d, **DURING CATCHING** now return Bad Scan so that this can be re-scanned')
return {
'count': 0,
'gyms': gyms,
'spawn_points': spawn_points,
'bad_scan': True,
}
try:
catch_result['responses']['CATCH_POKEMON']['status']
except Exception as e:
log.warning('***CATCHING DUDES***Catch request failed: %s', e)
catch_result = False
if not catch_result:
log.info('***CATCHING DUDES***Catch request failed!! Waiting 10 then trying to catch again')
catch_response = 2
time.sleep(10)
else:
# log.warning('***CATCHING DUDES*** IMPORDANT %s', catch_result['responses']['CATCH_POKEMON']['status'])
catch_response = catch_result['responses']['CATCH_POKEMON']['status']
if catch_response is 1:
log.info('***CATCHING DUDES***Catch SUCC-cess')
awardedExp = 0
for number in catch_result['responses']['CATCH_POKEMON']['capture_award']['xp']:
awardedExp = awardedExp + number
log.warning('$$$PLAYERSTATS$$$ xp is : %s', awardedExp)
oldExp = currentExp
currentExp = currentExp + awardedExp
log.warning('$$$PLAYERSTATS$$$ Caught pokemon so increased XP by %s, old XP was, %s now is %s, next level at %s', awardedExp, oldExp, currentExp, nextLevel)
if currentExp == nextLevel or currentExp > nextLevel:
log.warning('$$$PLAYERSTATS$$$ LEVEL UP DETECTED OH SHIT')
alreadyLeveled = True
levelup = level + 1
levelStatus = None
while levelStatus is None:
req = api.create_request()
levelResponse = req.level_up_rewards(level=levelup)
time.sleep(1)
levelResponse = req.call()
# log.warning('$$$LEVELUP$$$ %s', levelResponse['responses'])
levelStatus = levelResponse['responses']['LEVEL_UP_REWARDS']['result']
if levelStatus == 0:
log.warning('$$$PLAYERSTATS$$$ SHIT IT\'S UNSET WHAT DOES THAT MEAN')
elif levelStatus == 1:
log.warning('$$$PLAYERSTATS$$$ Level up rewards SUCC CESS')
elif levelStatus == 2:
log.warning('$$$PLAYERSTATS$$$ Level up reward ALREADY TAKEN the code is BROKE')
else:
log.warning('$$$PLAYERSTATS$$$ UNKNOWN, SHIT IS BLANK')
catch_pid = catch_result['responses']['CATCH_POKEMON']['captured_pokemon_id']
# log.info('***CATCHING DUDES***PID:%s', catch_pid)
elif catch_response is 3:
catch_pid = 'ran'
log.info('***CATCHING DUDES***Pokemon ran!')
elif catch_response is 2:
if current_ball == 1:
pokeball_count = pokeball_count - 1
elif current_ball == 2:
greatball_count = greatball_count - 1
else:
ultraball_count = ultraball_count - 1
log.info('***CATCHING DUDES***Catch failed, balling up if possible - no razz tho')
if ultraball_count > 0:
current_ball = 3
elif ultraball_count == 0 and greatball_count > 0:
current_ball = 2
elif ultraball_count == 0 and greatball_count == 0 and pokeball_count > 0:
current_ball = 1
else:
log.info('***CATCHING DUDES***Out of pokeballs!')
catch_pid = 'blueballs'
else:
continue
if catch_pid is 'ran':
log.info('***CATCHING DUDES***Since he gone, gonna requeue (lol jk im not that good yet)')
elif catch_pid is 'blueballs':
log.info('***CATCHING DUDES***Get more pokeballs! Queued up (lol jk im jk lol)')
else:
# check inventory again and see if ditto - wait a second first to avoid throttling
time.sleep(10) # lol
req = api.create_request()
new_inv_get = req.check_challenge()
new_inv_get = req.get_hatched_eggs()
new_inv_get = req.get_inventory()
new_inv_get = req.check_awarded_badges()
new_inv_get = req.download_settings()
new_inv_get = req.get_buddy_walked()
new_inv_get = req.call()
# https://github.com/norecha/PokeInventory/blob/master/inventory.py
inventory = new_inv_get['responses']['GET_INVENTORY']['inventory_delta']['inventory_items']
# REMEMBER TO CHECK FOR CAPTCHAS WITH EVERY REQUEST
captcha_url = new_inv_get['responses']['CHECK_CHALLENGE']['challenge_url']
if len(captcha_url) > 1:
log.info('fuck, captcha\'d, **DURING DITTO CHECK** now return Bad Scan so that this can be re-scanned')
return {
'count': 0,
'gyms': gyms,
'spawn_points': spawn_points,
'bad_scan': True,
}
for item in inventory:
inventory_item_data = item['inventory_item_data']
if not inventory_item_data:
continue
if 'pokemon_data' in inventory_item_data:
pokemonItem = inventory_item_data['pokemon_data']
# log.info('***CATCHING DUDES***dump-PID%s:%s, the pokemon[\'id\'] is %s', index, pokemon['pokemon_id'], pokemon['id'])
if 'is_egg' in pokemonItem and pokemonItem['is_egg']:
continue
if pokemonItem['id'] == catch_pid:
# this pokemonItem is the most recent caught - is it ditto-mon
if pokemonItem['pokemon_id'] == 132:
log.info('***CATCHING DUDES***DITTO FOUND OH SHIT')
pokemon[p['encounter_id']].update({
'pokemon_id': '132',
})
# keep it dittos are lit
break
else:
log.info('***CATCHING DUDES***It\'s not a ditto')
# destroy it
release_get_result = 0 # lol
while release_get_result != 1:
time.sleep(10) # lol
req = api.create_request()
release_get = req.check_challenge()
release_get = req.get_hatched_eggs()
release_get = req.get_inventory()
release_get = req.check_awarded_badges()
release_get = req.download_settings()
release_get = req.get_buddy_walked()
release_get = req.release_pokemon(pokemon_id=pokemonItem['id'])
release_get = req.call()
release_get_result = release_get['responses']['RELEASE_POKEMON']['result']
# REMEMBER TO CHECK FOR CAPTCHAS WITH EVERY REQUEST
captcha_url = release_get['responses']['CHECK_CHALLENGE']['challenge_url']
if len(captcha_url) > 1:
log.info('fuck, captcha\'d, **DURING DITTO CHECK** now return Bad Scan so that this can be re-scanned')
return {
'count': 0,
'gyms': gyms,
'spawn_points': spawn_points,
'bad_scan': True,
}
if release_get_result == 1:
log.info('***CATCHING DUDES***Non-ditto disposed')
break
else:
log.info('***CATCHING DUDES***Non-ditto disposing failed - trying again in 10 sec')
encounter_result = clear_dict_response(encounter_result)
ivPercentage = 100
if p['pokemon_data']['pokemon_id'] in args.iv_whitelist and int(level) < 25 and len(args.iv_accountcsv) > 1:
# Provide IVs, movesets, height, weight, gender of lvl 25+
pokemon_info = get_encounter_details(
p, step_location, key_scheduler, 25)
iva, ivb, ivc = int(pokemon_info.get('individual_attack', None)), int(pokemon_info.get('individual_defense', None)), int(pokemon_info.get('individual_stamina', None))
ivPercentage = ((iva + ivb + ivc) / float(45)) * 100
# print(ivPercentage)
pokemon[p['encounter_id']].update({
'individual_attack': pokemon_info.get(
'individual_attack', None),
'individual_defense': pokemon_info.get(
'individual_defense', None),
'individual_stamina': pokemon_info.get(
'individual_stamina', None),
'move_1': pokemon_info.get('move_1', None),
'move_2': pokemon_info.get('move_2', None),
'height': pokemon_info.get('height_m', None),
'weight': pokemon_info.get('weight_kg', None),
'gender': pokemon_info['pokemon_display'].get(
'gender', None)
})
# Retrieve high level encounter details if requested
if p['pokemon_data']['pokemon_id'] in args.cp_whitelist and int(level) < 30 and int(ivPercentage) > int(args.minimum_cp_ivs) and len(args.cp_accountcsv) > 1:
# Add lvl 30+ CP to the general encounter details of lvl 25+
pokemon_info = get_encounter_details(
p, step_location, key_scheduler, 30)
pokemon[p['encounter_id']].update({
'individual_attack': pokemon_info.get(
'individual_attack', None),
'individual_defense': pokemon_info.get(
'individual_defense', None),
'individual_stamina': pokemon_info.get(
'individual_stamina', None),
'move_1': pokemon_info.get('move_1', None),
'move_2': pokemon_info.get('move_2', None),
'height': pokemon_info.get('height_m', None),
'weight': pokemon_info.get('weight_kg', None),
'gender': pokemon_info['pokemon_display'].get(
'gender', None),
'cp': pokemon_info.get('cp', None),
'cp_multiplier': pokemon_info.get('cp_multiplier', None),
})
# Check for Unown's alphabetic character.
if pokemon_info['pokemon_id'] == 201:
pokemon[p['encounter_id']]['form'] = pokemon_info[
'pokemon_display'].get('form', None)
if args.webhooks:
pokemon_id = p['pokemon_data']['pokemon_id']
if (pokemon_id in args.webhook_whitelist or
(not args.webhook_whitelist and pokemon_id
not in args.webhook_blacklist)):
wh_poke = pokemon[p['encounter_id']].copy()
wh_poke.update({
'disappear_time': calendar.timegm(
disappear_time.timetuple()),
'last_modified_time': p['last_modified_timestamp_ms'],
'time_until_hidden_ms': p['time_till_hidden_ms'],
'verified': SpawnPoint.tth_found(sp),
'seconds_until_despawn': seconds_until_despawn,
'spawn_start': start_end[0],
'spawn_end': start_end[1],
'previous_id': previous_id,
'player_level': level
})
wh_update_queue.put(('pokemon', wh_poke))
if forts and (config['parse_pokestops'] or config['parse_gyms']):
if config['parse_pokestops']:
stop_ids = [f['id'] for f in forts if f.get('type') == 1]
if stop_ids:
query = (Pokestop
.select(Pokestop.pokestop_id, Pokestop.last_modified)
.where((Pokestop.pokestop_id << stop_ids))
.dicts())
encountered_pokestops = [(f['pokestop_id'], int(
(f['last_modified'] -
datetime(1970, 1, 1)).total_seconds())) for f in query]
# Complete tutorial with a Pokestop spin
if args.complete_tutorial and not (len(captcha_url) > 1) and args.pokestop_spinning is False and int(level) < int(args.level_cap):
if config['parse_pokestops']:
tutorial_pokestop_spin(
api, level, forts, step_location, account)
else:
log.error(
'Pokestop can not be spun since parsing Pokestops is ' +
'not active. Check if \'-nk\' flag is accidentally set.')
for f in forts:
if config['parse_pokestops'] and f.get('type') == 1: # Pokestops.
distance = 0.04
egg = None
bater = None
breakableId = None
unbreakableId = None
monID = None
monCount = 0
usedIncubatorCount = 0
if in_radius((f['latitude'], f['longitude']), step_location, distance) and args.pokestop_spinning is True:
spin_result = None
req = api.create_request()
log.warning('Pokestop ID: %s', f['id'])
while spin_result is None:
spin_response = req.fort_search(fort_id=f['id'],
fort_latitude=f['latitude'],
fort_longitude=f['longitude'],
player_latitude=step_location[0],
player_longitude=step_location[1]
)
spin_response = req.check_challenge()
spin_response = req.get_hatched_eggs()
spin_response = req.get_inventory()
spin_response = req.check_awarded_badges()
spin_response = req.download_settings()
spin_response = req.get_buddy_walked()
time.sleep(10)
spin_response = req.call()
# REMEMBER TO CHECK FOR CAPTCHAS WITH EVERY REQUEST
captcha_url = spin_response['responses']['CHECK_CHALLENGE']['challenge_url']
if len(captcha_url) > 1:
log.info('fuck, captcha\'d, now return Bad Scan so that this can be re-scanned')
return {
'count': 0,
'gyms': gyms,
'spawn_points': spawn_points,
'bad_scan': True,
'captcha': True,
'failed': 'pokestop'
}
if spin_response['responses']['FORT_SEARCH']['result'] is 1:
log.info('&&&SPINNING&&&Spin stop attempt success')
spin_result = 1
awardedExp = spin_response['responses']['FORT_SEARCH']['experience_awarded']
log.warning('$$$PLAYERSTATS$$$ xp is : %s', awardedExp)
oldExp = currentExp
currentExp = currentExp + awardedExp
log.warning('$$$PLAYERSTATS$$$ Spun pokestop so increased XP by %s, old XP was, %s now is %s, next level at %s', awardedExp, oldExp, currentExp, nextLevel)
if currentExp == nextLevel or currentExp > nextLevel:
log.warning('$$$PLAYERSTATS$$$ LEVEL UP DETECTED OH SHIT')
levelup = level + 1
levelStatus = None
while levelStatus is None:
if alreadyLeveled is True:
log.warning('$$$PLAYERSTATS$$$ But actually we already leveled up fam. Nvm.')
break
req = api.create_request()
levelResponse = req.level_up_rewards(level=levelup)
time.sleep(1)
levelResponse = req.call()
if levelResponse['responses']['LEVEL_UP_REWARDS']['result']:
levelStatus = levelResponse['responses']['LEVEL_UP_REWARDS']['result']
if levelStatus == 0:
log.warning('$$$PLAYERSTATS$$$ SHIT IT\'S UNSET WHAT DOES THAT MEAN')
elif levelStatus == 1:
log.warning('$$$PLAYERSTATS$$$ Level up SUCC CESS - Now level %s', levelup)
elif levelStatus == 2:
log.warning('$$$PLAYERSTATS$$$ Level up reward ALREADY TAKEN the code is BROKE')
else:
log.warning('$$$PLAYERSTATS$$$ UNKNOWN, SHIT IS BLANK')
else:
log.warning('$$$PLAYERSTATS$$$ LEVEL UP FAILED! Level up has already been done')
levelStatus = 0
elif spin_response['responses']['FORT_SEARCH']['result'] is 2:
log.info('&&&SPINNING&&&Stop is out of range - this formula needs fixing')
spin_result = 'Failed'
elif spin_response['responses']['FORT_SEARCH']['result'] is 3:
log.info('&&&SPINNING&&&Already spun this stop - check for this one day')
spin_result = 'Failed'
elif spin_response['responses']['FORT_SEARCH']['result'] is 4:
log.info('&&&SPINNING&&&Inventory is full (idk how you managed this one)')
spin_result = 'Failed'
elif spin_response['responses']['FORT_SEARCH']['result'] is 5:
log.info('&&&SPINNING&&&Maximum spun stops for the day - idk how you managed this either')
spin_result = 'Failed'
else:
log.info('&&&SPINNING&&&No result set - weird error - abort mission')
spin_result = 'Failed'
inventory = spin_response['responses']['GET_INVENTORY']['inventory_delta']['inventory_items']
for item in inventory:
inventory_item_data = item['inventory_item_data']
if not inventory_item_data:
continue
if 'pokemon_data' in inventory_item_data:
pokememe = inventory_item_data['pokemon_data']
if 'is_egg' in pokememe and pokememe['is_egg'] and 'egg_incubator_id' not in pokememe:
log.warning('###EGGS### FOUND AN EGG GONNA BATE IT: %s', pokememe['id'])
egg = pokememe['id']
else:
log.warning('###EGGS### FOUND A MON WHAT EVER: %s', pokememe['id'])
monCount += 1
monID = pokememe['id']
if 'egg_incubators' in inventory_item_data:
incubators = inventory_item_data['egg_incubators']
count = -1
for incubator in incubators:
itemid = inventory_item_data['egg_incubators']['egg_incubator'][count]['item_id']
count += 1
log.warning('###EGGS### THE ITEM ID %s', itemid)
if 'pokemon_id' in inventory_item_data['egg_incubators']['egg_incubator'][count]:
log.warning('###EGGS### FOUND A USED BATOR %s', itemid)
usedIncubatorCount += 1
else:
if itemid == 901:
unbreakableId = inventory_item_data['egg_incubators']['egg_incubator'][count]['id']
log.warning('###EGGS### HAVE A BATOR GONNA BATE IT: %s', unbreakableId)
else:
breakableId = inventory_item_data['egg_incubators']['egg_incubator'][count]['id']
log.warning('###EGGS### HAVE A breakorRRR GONNA breaGk IT: %s', breakableId)
if 'item' in inventory_item_data and inventory_item_data['item']['item_id'] is 1:
if inventory_item_data['item'].get('count', 0) > 200:
log.info('@@@INVENTORY@@@ we have 200+ balls! Trash 69 and move on')
trash_status = None
while trash_status is None:
req = api.create_request()
trash_result = req.check_challenge()
trash_result = req.get_hatched_eggs()
trash_result = req.get_inventory()
trash_result = req.check_awarded_badges()
trash_result = req.download_settings()
trash_result = req.get_buddy_walked()
trash_result = req.recycle_inventory_item(item_id=inventory_item_data['item']['item_id'],
count=69)
time.sleep(4.20)
trash_result = req.call()
# REMEMBER TO CHECK FOR CAPTCHAS WITH EVERY REQUEST
captcha_url = trash_result['responses']['CHECK_CHALLENGE']['challenge_url']
if len(captcha_url) > 1:
log.info('fuck, captcha\'d, **DURING ITEM TRASHING** now return Bad Scan so that this can be re-scanned')
return {
'count': 0,
'gyms': gyms,
'spawn_points': spawn_points,
'bad_scan': True,
'captcha': True,
'failed': 'trashing'
}
# log.info('@@@INVENTORY@@@ %s remaining of ID: %s', trash_result['responses']['RECYCLE_INVENTORY_ITEM']['new_count'], inventory_item_data['item']['item_id'])
if trash_result['responses']['RECYCLE_INVENTORY_ITEM']['result'] is 1:
log.info('@@@INVENTORY@@@ recycle success')
trash_status = 1
elif trash_result['responses']['RECYCLE_INVENTORY_ITEM']['result'] is 2:
log.info('@@@INVENTORY@@@ not enough items to trash - parsing messed up')
trash_status = 1
elif trash_result['responses']['RECYCLE_INVENTORY_ITEM']['result'] is 3:
trash_status = 1
log.info('@@@INVENTORY@@@ tried to recycle incubator - parsing messed up again')
else:
log.warning('trashing failed - panic')
trash_status = 1
if 'item' in inventory_item_data and inventory_item_data['item']['item_id'] in USELESS:
if inventory_item_data['item'].get('count', 0) > 10:
log.info('@@@INVENTORY@@@ too many potions, recyling all, subtract 7')
totalPotions = inventory_item_data['item'].get('count', 0)
trashingPotions = totalPotions - 7
trash_status = None
while trash_status is None:
req = api.create_request()
trash_result = req.check_challenge()
trash_result = req.get_hatched_eggs()
trash_result = req.get_inventory()
trash_result = req.check_awarded_badges()
trash_result = req.download_settings()
trash_result = req.get_buddy_walked()
trash_result = req.recycle_inventory_item(item_id=inventory_item_data['item']['item_id'],
count=trashingPotions)
time.sleep(4.20)
trash_result = req.call()
# REMEMBER TO CHECK FOR CAPTCHAS WITH EVERY REQUEST
captcha_url = trash_result['responses']['CHECK_CHALLENGE']['challenge_url']
if len(captcha_url) > 1:
log.info('fuck, captcha\'d, **DURING ITEM TRASHING** now return Bad Scan so that this can be re-scanned')
return {
'count': 0,
'gyms': gyms,
'spawn_points': spawn_points,
'bad_scan': True,
'captcha': True,
'failed': 'trashing'
}
# log.info('@@@INVENTORY@@@ %s remaining of ID: %s', trash_result['responses']['RECYCLE_INVENTORY_ITEM']['new_count'], inventory_item_data['item']['item_id'])
if trash_result['responses']['RECYCLE_INVENTORY_ITEM']['result'] is 1:
log.info('@@@INVENTORY@@@ recycle success')
trash_status = 1
elif trash_result['responses']['RECYCLE_INVENTORY_ITEM']['result'] is 2:
log.info('@@@INVENTORY@@@ not enough items to trash - parsing messed up')
trash_status = 1
elif trash_result['responses']['RECYCLE_INVENTORY_ITEM']['result'] is 3:
trash_status = 1
log.info('@@@INVENTORY@@@ tried to recycle incubator - parsing messed up again')
else:
log.warning('trashing failed - panic')
trash_status = 1
if breakableId is not None and egg is not None and args.incubate_eggs is True and int(level) < int(args.level_cap) or unbreakableId is not None and egg is not None and args.incubate_eggs is True and int(level) < int(args.level_cap):
if breakableId is None:
bater = unbreakableId
else:
bater = breakableId
egg_status = None
while egg_status is None:
req = api.create_request()
egg_request = req.use_item_egg_incubator(item_id=bater,
pokemon_id=egg)
time.sleep(4.20)
egg_request = req.call()
egg_status = egg_request['responses']['USE_ITEM_EGG_INCUBATOR']['result']
if egg_status is 0:
log.warning('###EGG### Server responded with "unset" - what the fukc')
elif egg_status is 1:
log.info('###EGG### Egg incubation success - egg set')
breakableId = None
unbreakableId = None
break
elif egg_status is 2:
log.warning('###EGG### Incubator not found! Parsing issues with above!')
elif egg_status is 3:
log.warning('###EGG### Egg not found! Parsing issues with above! Egg: %s', egg)
elif egg_status is 4:
log.warning('###EGG### Given ID does not point to EGG! Parsing issues!')
elif egg_status is 5:
log.warning('###EGG### Incubator in use! Still Parsing issues!!')
elif egg_status is 6:
log.warning('###EGG### Egg already incubating! These parsing Issues!!')
elif egg_status is 7:
log.warning('###EGG### This incubator is broken! Somehow used old inventory? PARSING')
if monCount > 69:
release_status = None
while release_status is None:
req = api.create_request()
release_request = req.release_pokemon(pokemon_id=monID)
time.sleep(4.20)
release_request = req.call()
release_status = release_request['responses']['RELEASE_POKEMON']['result']
if release_status == 1:
log.info('###EGG### Excess pokemon removed')
break
else:
log.warning('###EGG### Excess pokemon removal failed - trying again in 10 sec')
if 'active_fort_modifier' in f:
lure_expiration = (datetime.utcfromtimestamp(
f['last_modified_timestamp_ms'] / 1000.0) +
timedelta(minutes=30))
active_fort_modifier = f['active_fort_modifier']
if args.webhooks and args.webhook_updates_only:
wh_update_queue.put(('pokestop', {
'pokestop_id': b64encode(str(f['id'])),
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified_time': f[
'last_modified_timestamp_ms'],
'lure_expiration': calendar.timegm(
lure_expiration.timetuple()),
'active_fort_modifier': active_fort_modifier
}))
else:
distance = 0.04
if in_radius((f['latitude'], f['longitude']), step_location, distance):
if args.setLure is True:
if args.lureFence is not None:
allowed = geofence(step_location, args.lureFence)
log.warning('FENCE: %s', allowed)
if allowed == []:
log.warning('STOP IS FORBIDDEN')
forbidden = True
else:
log.warning('STOP IS GOOD')
forbidden = False
if args.nolureFence is not None:
forbidden = geofence(step_location, args.nolureFence, forbidden=True)
log.warning('DI-ALLOWFENCE: %s', forbidden)
if forbidden == []:
log.warning('STOP IS GOOD')
forbidden = False
else:
forbidden = True
log.warning('STOP IS FORBIDDEN')
lure_status = None
lure_id = 501
if totalDisks == 0:
log.warning('FINNA TRYNA LURE BUT I AINT GOT NONE')
forbidden = True
log.warning('This account has no lures! Thowing exception!')
meme = spin_response['NoLures!']
while lure_status is None and forbidden is False:
req = api.create_request()
lure_request = req.add_fort_modifier(modifier_type=lure_id,
fort_id=f['id'],
player_latitude=step_location[0],
player_longitude=step_location[1])
time.sleep(4.20)
lure_request = req.call()
log.warning('@@@LURE RESPONSE@@@ %s', lure_request['responses'])
lure_status = lure_request['responses']['ADD_FORT_MODIFIER']['result']
if lure_status is 0:
log.warning('███Lure was unset! Shiet son███')
lure_status = 'Failed'
elif lure_status is 1:
log.warning('███Lure successfully set! holy SHEIT███')
lure_status = 'Win'
elif lure_status is 2:
log.warning('███Stop already has lure!!███')
lure_status = 'Panic'
elif lure_status is 3:
log.warning('███Out of range to set lure! (how?)███')
lure_status = 'Range'
elif lure_status is 4:
log.warning('███Account has no lures!███')
lure_status = 'empty'
lure_expiration, active_fort_modifier = None, None
# Send all pokestops to webhooks.
if args.webhooks and not args.webhook_updates_only:
# Explicitly set 'webhook_data', in case we want to change
# the information pushed to webhooks. Similar to above and
# previous commits.
l_e = None
if lure_expiration is not None:
l_e = calendar.timegm(lure_expiration.timetuple())
wh_update_queue.put(('pokestop', {
'pokestop_id': b64encode(str(f['id'])),
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified_time': f['last_modified_timestamp_ms'],
'lure_expiration': l_e,
'active_fort_modifier': active_fort_modifier
}))
if ((f['id'], int(f['last_modified_timestamp_ms'] / 1000.0))
in encountered_pokestops):
# If pokestop has been encountered before and hasn't
# changed don't process it.
stopsskipped += 1
continue
pokestops[f['id']] = {
'pokestop_id': f['id'],
'enabled': f.get('enabled', 0),
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified': datetime.utcfromtimestamp(
f['last_modified_timestamp_ms'] / 1000.0),
'lure_expiration': lure_expiration,
'active_fort_modifier': active_fort_modifier
}
# Currently, there are only stops and gyms.
elif config['parse_gyms'] and f.get('type') is None:
# Send gyms to webhooks.
if args.webhooks and not args.webhook_updates_only:
# Explicitly set 'webhook_data', in case we want to change
# the information pushed to webhooks. Similar to above
# and previous commits.
wh_update_queue.put(('gym', {
'gym_id': b64encode(str(f['id'])),
'team_id': f.get('owned_by_team', 0),
'guard_pokemon_id': f.get('guard_pokemon_id', 0),
'gym_points': f.get('gym_points', 0),
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified': f['last_modified_timestamp_ms']
}))
gyms[f['id']] = {
'gym_id': f['id'],
'team_id': f.get('owned_by_team', 0),
'guard_pokemon_id': f.get('guard_pokemon_id', 0),
'gym_points': f.get('gym_points', 0),
'enabled': f['enabled'],
'latitude': f['latitude'],
'longitude': f['longitude'],
'last_modified': datetime.utcfromtimestamp(
f['last_modified_timestamp_ms'] / 1000.0),
}
# Helping out the GC.
del forts
log.info('Parsing found Pokemon: %d, nearby: %d, pokestops: %d, gyms: %d.',
len(pokemon) + skipped,
nearby_pokemon,
len(pokestops) + stopsskipped,
len(gyms))
log.debug('Skipped Pokemon: %d, pokestops: %d.', skipped, stopsskipped)
# Look for spawnpoints within scan_loc that are not here to see if we
# can narrow down tth window.
for sp in ScannedLocation.linked_spawn_points(scan_loc['cellid']):
if sp['id'] in sp_id_list:
# Don't overwrite changes from this parse with DB version.
sp = spawn_points[sp['id']]
else:
# If the cell has completed, we need to classify all
# the SPs that were not picked up in the scan
if just_completed:
SpawnpointDetectionData.classify(sp, scan_loc, now_secs)
spawn_points[sp['id']] = sp
if SpawnpointDetectionData.unseen(sp, now_secs):
spawn_points[sp['id']] = sp
endpoints = SpawnPoint.start_end(sp, args.spawn_delay)
if clock_between(endpoints[0], now_secs, endpoints[1]):
sp['missed_count'] += 1
spawn_points[sp['id']] = sp
log.warning('%s kind spawnpoint %s has no Pokemon %d times'
' in a row.',
sp['kind'], sp['id'], sp['missed_count'])
log.info('Possible causes: Still doing initial scan, super'
' rare double spawnpoint during')
log.info('hidden period, or Niantic has removed '
'spawnpoint.')
if (not SpawnPoint.tth_found(sp) and scan_loc['done'] and
(now_secs - sp['latest_seen'] -
args.spawn_delay) % 3600 < 60):
log.warning('Spawnpoint %s was unable to locate a TTH, with '
'only %ss after Pokemon last seen.', sp['id'],
(now_secs - sp['latest_seen']) % 3600)
log.info('Restarting current 15 minute search for TTH.')
if sp['id'] not in sp_id_list:
SpawnpointDetectionData.classify(sp, scan_loc, now_secs)
sp['latest_seen'] = (sp['latest_seen'] - 60) % 3600
sp['earliest_unseen'] = (
sp['earliest_unseen'] + 14 * 60) % 3600
spawn_points[sp['id']] = sp
db_update_queue.put((ScannedLocation, {0: scan_loc}))
if pokemon:
db_update_queue.put((Pokemon, pokemon))
if pokestops:
db_update_queue.put((Pokestop, pokestops))
if gyms:
db_update_queue.put((Gym, gyms))
if spawn_points:
db_update_queue.put((SpawnPoint, spawn_points))
db_update_queue.put((ScanSpawnPoint, scan_spawn_points))
if sightings:
db_update_queue.put((SpawnpointDetectionData, sightings))
if not nearby_pokemon and not wild_pokemon:
# After parsing the forts, we'll mark this scan as bad due to
# a possible speed violation.
return {
'count': wild_pokemon_count + forts_count,
'gyms': gyms,
'sp_id_list': sp_id_list,
'bad_scan': True,
'scan_secs': now_secs
}
return {
'count': wild_pokemon_count + forts_count,
'gyms': gyms,
'sp_id_list': sp_id_list,
'bad_scan': False,
'scan_secs': now_secs
}
def parse_gyms(args, gym_responses, wh_update_queue, db_update_queue):
gym_details = {}
gym_members = {}
gym_pokemon = {}
trainers = {}
i = 0
for g in gym_responses.values():
gym_state = g['gym_state']
gym_id = gym_state['fort_data']['id']
gym_details[gym_id] = {
'gym_id': gym_id,
'name': g['name'],
'description': g.get('description'),
'url': g['urls'][0],
}
if args.webhooks:
webhook_data = {
'id': b64encode(str(gym_id)),
'latitude': gym_state['fort_data']['latitude'],
'longitude': gym_state['fort_data']['longitude'],
'team': gym_state['fort_data'].get('owned_by_team', 0),
'name': g['name'],
'description': g.get('description'),
'url': g['urls'][0],
'pokemon': [],
}
for member in gym_state.get('memberships', []):
gym_members[i] = {
'gym_id': gym_id,
'pokemon_uid': member['pokemon_data']['id'],
}
gym_pokemon[i] = {
'pokemon_uid': member['pokemon_data']['id'],
'pokemon_id': member['pokemon_data']['pokemon_id'],
'cp': member['pokemon_data']['cp'],
'trainer_name': member['trainer_public_profile']['name'],
'num_upgrades': member['pokemon_data'].get('num_upgrades', 0),
'move_1': member['pokemon_data'].get('move_1'),
'move_2': member['pokemon_data'].get('move_2'),
'height': member['pokemon_data'].get('height_m'),
'weight': member['pokemon_data'].get('weight_kg'),
'stamina': member['pokemon_data'].get('stamina'),
'stamina_max': member['pokemon_data'].get('stamina_max'),
'cp_multiplier': member['pokemon_data'].get('cp_multiplier'),
'additional_cp_multiplier': member['pokemon_data'].get(
'additional_cp_multiplier', 0),
'iv_defense': member['pokemon_data'].get(
'individual_defense', 0),
'iv_stamina': member['pokemon_data'].get(
'individual_stamina', 0),
'iv_attack': member['pokemon_data'].get(
'individual_attack', 0),
'last_seen': datetime.utcnow(),
}
trainers[i] = {
'name': member['trainer_public_profile']['name'],
'team': gym_state['fort_data']['owned_by_team'],
'level': member['trainer_public_profile']['level'],
'last_seen': datetime.utcnow(),
}
if args.webhooks:
webhook_data['pokemon'].append({
'pokemon_uid': member['pokemon_data']['id'],
'pokemon_id': member['pokemon_data']['pokemon_id'],
'cp': member['pokemon_data']['cp'],
'num_upgrades': member['pokemon_data'].get(
'num_upgrades', 0),
'move_1': member['pokemon_data'].get('move_1'),
'move_2': member['pokemon_data'].get('move_2'),
'height': member['pokemon_data'].get('height_m'),
'weight': member['pokemon_data'].get('weight_kg'),
'stamina': member['pokemon_data'].get('stamina'),
'stamina_max': member['pokemon_data'].get('stamina_max'),
'cp_multiplier': member['pokemon_data'].get(
'cp_multiplier'),
'additional_cp_multiplier': member['pokemon_data'].get(
'additional_cp_multiplier', 0),
'iv_defense': member['pokemon_data'].get(
'individual_defense', 0),
'iv_stamina': member['pokemon_data'].get(
'individual_stamina', 0),
'iv_attack': member['pokemon_data'].get(
'individual_attack', 0),
'trainer_name': member['trainer_public_profile']['name'],
'trainer_level': member['trainer_public_profile']['level'],
})
i += 1
if args.webhooks:
wh_update_queue.put(('gym_details', webhook_data))
# All this database stuff is synchronous (not using the upsert queue) on
# purpose. Since the search workers load the GymDetails model from the
# database to determine if a gym needs to be rescanned, we need to be sure
# the GymDetails get fully committed to the database before moving on.
#
# We _could_ synchronously upsert GymDetails, then queue the other tables
# for upsert, but that would put that Gym's overall information in a weird
# non-atomic state.
# Upsert all the models.
if gym_details:
db_update_queue.put((GymDetails, gym_details))
if gym_pokemon:
db_update_queue.put((GymPokemon, gym_pokemon))
if trainers:
db_update_queue.put((Trainer, trainers))
# This needs to be completed in a transaction, because we don't wany any
# other thread or process to mess with the GymMembers for the gyms we're
# updating while we're updating the bridge table.
with flaskDb.database.transaction():
# Get rid of all the gym members, we're going to insert new records.
if gym_details:
DeleteQuery(GymMember).where(
GymMember.gym_id << gym_details.keys()).execute()
# Insert new gym members.
if gym_members:
db_update_queue.put((GymMember, gym_members))
log.info('Upserted gyms: %d, gym members: %d.',
len(gym_details),
len(gym_members))
def db_updater(args, q, db):
# The forever loop.
while True:
try:
while True:
try:
flaskDb.connect_db()
break
except Exception as e:
log.warning('%s... Retrying...', repr(e))
time.sleep(5)
# Loop the queue.
while True:
last_upsert = default_timer()
model, data = q.get()
bulk_upsert(model, data, db)
q.task_done()
log.debug('Upserted to %s, %d records (upsert queue '
'remaining: %d) in %.2f seconds.',
model.__name__,
len(data),
q.qsize(),
default_timer() - last_upsert)
# Helping out the GC.
del model
del data
if q.qsize() > 50:
log.warning(
"DB queue is > 50 (@%d); try increasing --db-threads.",
q.qsize())
except Exception as e:
log.exception('Exception in db_updater: %s', repr(e))
time.sleep(5)
def clean_db_loop(args):
while True:
try:
query = (MainWorker
.delete()
.where((MainWorker.last_modified <
(datetime.utcnow() - timedelta(minutes=30)))))
query.execute()
query = (WorkerStatus
.delete()
.where((WorkerStatus.last_modified <
(datetime.utcnow() - timedelta(minutes=30)))))
query.execute()
# Remove active modifier from expired lured pokestops.
query = (Pokestop
.update(lure_expiration=None, active_fort_modifier=None)
.where(Pokestop.lure_expiration < datetime.utcnow()))
query.execute()
# Remove old (unusable) captcha tokens
query = (Token
.delete()
.where((Token.last_updated <
(datetime.utcnow() - timedelta(minutes=2)))))
query.execute()
# If desired, clear old Pokemon spawns.
if args.purge_data > 0:
log.info('Beginning purge of old Pokemon spawns.')
start = datetime.utcnow()
query = (Pokemon
.delete()
.where((Pokemon.disappear_time <
(datetime.utcnow() -
timedelta(hours=args.purge_data)))))
rows = query.execute()
end = datetime.utcnow()
diff = end - start
log.info("Completed purge of old Pokemon spawns. "
"%i deleted in %f seconds.",
rows, diff.total_seconds())
log.info('Regular database cleaning complete.')
time.sleep(60)
except Exception as e:
log.exception('Exception in clean_db_loop: %s', repr(e))
def bulk_upsert(cls, data, db):
num_rows = len(data.values())
i = 0
if args.db_type == 'mysql':
step = 250
else:
# SQLite has a default max number of parameters of 999,
# so we need to limit how many rows we insert for it.
step = 50
with db.atomic():
while i < num_rows:
log.debug('Inserting items %d to %d.', i, min(i + step, num_rows))
try:
# Turn off FOREIGN_KEY_CHECKS on MySQL, because apparently it's
# unable to recognize strings to update unicode keys for
# foreign key fields, thus giving lots of foreign key
# constraint errors.
if args.db_type == 'mysql':
db.execute_sql('SET FOREIGN_KEY_CHECKS=0;')
# Use peewee's own implementation of the insert_many() method.
InsertQuery(cls, rows=data.values()[
i:min(i + step, num_rows)]).upsert().execute()
if args.db_type == 'mysql':
db.execute_sql('SET FOREIGN_KEY_CHECKS=1;')
except Exception as e:
# If there is a DB table constraint error, dump the data and
# don't retry.
#
# Unrecoverable error strings:
unrecoverable = ['constraint', 'has no attribute',
'peewee.IntegerField object at']
has_unrecoverable = filter(
lambda x: x in str(e), unrecoverable)
if has_unrecoverable:
log.warning('%s. Data is:', repr(e))
log.warning(data.items())
else:
log.warning('%s... Retrying...', repr(e))
time.sleep(1)
continue
i += step
def create_tables(db):
db.connect()
verify_database_schema(db)
tables = [Pokemon, Pokestop, Gym, ScannedLocation, GymDetails,
GymMember, GymPokemon, Trainer, MainWorker, WorkerStatus,
SpawnPoint, ScanSpawnPoint, SpawnpointDetectionData,
Token, LocationAltitude]
for table in tables:
if not table.table_exists():
log.info('Creating table: %s', table.__name__)
db.create_tables([table], safe=True)
else:
log.debug('Skipping table %s, it already exists.', table.__name__)
db.close()
def drop_tables(db):
tables = [Pokemon, Pokestop, Gym, ScannedLocation, Versions,
GymDetails, GymMember, GymPokemon, Trainer, MainWorker,
WorkerStatus, SpawnPoint, ScanSpawnPoint,
SpawnpointDetectionData, LocationAltitude,
Token]
db.connect()
db.execute_sql('SET FOREIGN_KEY_CHECKS=0;')
for table in tables:
if table.table_exists():
log.info('Dropping table: %s', table.__name__)
db.drop_tables([table], safe=True)
db.execute_sql('SET FOREIGN_KEY_CHECKS=1;')
db.close()
def verify_database_schema(db):
if not Versions.table_exists():
db.create_tables([Versions])
if ScannedLocation.table_exists():
# Versions table doesn't exist, but there are tables. This must
# mean the user is coming from a database that existed before we
# started tracking the schema version. Perform a full upgrade.
InsertQuery(Versions, {Versions.key: 'schema_version',
Versions.val: 0}).execute()
database_migrate(db, 0)
else:
InsertQuery(Versions, {Versions.key: 'schema_version',
Versions.val: db_schema_version}).execute()
else:
db_ver = Versions.get(Versions.key == 'schema_version').val
if db_ver < db_schema_version:
database_migrate(db, db_ver)
elif db_ver > db_schema_version:
log.error('Your database version (%i) appears to be newer than '
'the code supports (%i).', db_ver, db_schema_version)
log.error('Please upgrade your code base or drop all tables in '
'your database.')
sys.exit(1)
def database_migrate(db, old_ver):
# Update database schema version.
Versions.update(val=db_schema_version).where(
Versions.key == 'schema_version').execute()
log.info('Detected database version %i, updating to %i...',
old_ver, db_schema_version)
# Perform migrations here.
migrator = None
if args.db_type == 'mysql':
migrator = MySQLMigrator(db)
else:
migrator = SqliteMigrator(db)
if old_ver < 2:
migrate(migrator.add_column('pokestop', 'encounter_id',
CharField(max_length=50, null=True)))
if old_ver < 3:
migrate(
migrator.add_column('pokestop', 'active_fort_modifier',
CharField(max_length=50, null=True)),
migrator.drop_column('pokestop', 'encounter_id'),
migrator.drop_column('pokestop', 'active_pokemon_id')
)
if old_ver < 4:
db.drop_tables([ScannedLocation])
if old_ver < 5:
# Some Pokemon were added before the 595 bug was "fixed".
# Clean those up for a better UX.
query = (Pokemon
.delete()
.where(Pokemon.disappear_time >
(datetime.utcnow() - timedelta(hours=24))))
query.execute()
if old_ver < 6:
migrate(
migrator.add_column('gym', 'last_scanned',
DateTimeField(null=True)),
)
if old_ver < 7:
migrate(
migrator.drop_column('gymdetails', 'description'),
migrator.add_column('gymdetails', 'description',
TextField(null=True, default=""))
)
if old_ver < 8:
migrate(
migrator.add_column('pokemon', 'individual_attack',
IntegerField(null=True, default=0)),
migrator.add_column('pokemon', 'individual_defense',
IntegerField(null=True, default=0)),
migrator.add_column('pokemon', 'individual_stamina',
IntegerField(null=True, default=0)),
migrator.add_column('pokemon', 'move_1',
IntegerField(null=True, default=0)),
migrator.add_column('pokemon', 'move_2',
IntegerField(null=True, default=0))
)
if old_ver < 9:
migrate(
migrator.add_column('pokemon', 'last_modified',
DateTimeField(null=True, index=True)),
migrator.add_column('pokestop', 'last_updated',
DateTimeField(null=True, index=True))
)
if old_ver < 10:
# Information in ScannedLocation and Member Status is probably
# out of date. Drop and recreate with new schema.
db.drop_tables([ScannedLocation])
db.drop_tables([WorkerStatus])
if old_ver < 11:
db.drop_tables([ScanSpawnPoint])
if old_ver < 13:
db.drop_tables([WorkerStatus])
db.drop_tables([MainWorker])
if old_ver < 14:
migrate(
migrator.add_column('pokemon', 'weight',
DoubleField(null=True, default=0)),
migrator.add_column('pokemon', 'height',
DoubleField(null=True, default=0)),
migrator.add_column('pokemon', 'gender',
IntegerField(null=True, default=0))
)
if old_ver < 15:
# we don't have to touch sqlite because it has REAL and INTEGER only
if args.db_type == 'mysql':
db.execute_sql('ALTER TABLE `pokemon` '
'MODIFY COLUMN `weight` FLOAT NULL DEFAULT NULL,'
'MODIFY COLUMN `height` FLOAT NULL DEFAULT NULL,'
'MODIFY COLUMN `gender` SMALLINT NULL DEFAULT NULL'
';')
if old_ver < 16:
log.info('This DB schema update can take some time. '
'Please be patient.')
# change some column types from INT to SMALLINT
# we don't have to touch sqlite because it has INTEGER only
if args.db_type == 'mysql':
db.execute_sql(
'ALTER TABLE `pokemon` '
'MODIFY COLUMN `pokemon_id` SMALLINT NOT NULL,'
'MODIFY COLUMN `individual_attack` SMALLINT '
'NULL DEFAULT NULL,'
'MODIFY COLUMN `individual_defense` SMALLINT '
'NULL DEFAULT NULL,'
'MODIFY COLUMN `individual_stamina` SMALLINT '
'NULL DEFAULT NULL,'
'MODIFY COLUMN `move_1` SMALLINT NULL DEFAULT NULL,'
'MODIFY COLUMN `move_2` SMALLINT NULL DEFAULT NULL;'
)
db.execute_sql(
'ALTER TABLE `gym` '
'MODIFY COLUMN `team_id` SMALLINT NOT NULL,'
'MODIFY COLUMN `guard_pokemon_id` SMALLINT NOT NULL;'
)
db.execute_sql(
'ALTER TABLE `scannedlocation` '
'MODIFY COLUMN `band1` SMALLINT NOT NULL,'
'MODIFY COLUMN `band2` SMALLINT NOT NULL,'
'MODIFY COLUMN `band3` SMALLINT NOT NULL,'
'MODIFY COLUMN `band4` SMALLINT NOT NULL,'
'MODIFY COLUMN `band5` SMALLINT NOT NULL,'
'MODIFY COLUMN `midpoint` SMALLINT NOT NULL,'
'MODIFY COLUMN `width` SMALLINT NOT NULL;'
)
db.execute_sql(
'ALTER TABLE `spawnpoint` '
'MODIFY COLUMN `latest_seen` SMALLINT NOT NULL,'
'MODIFY COLUMN `earliest_unseen` SMALLINT NOT NULL;'
)
db.execute_sql(
'ALTER TABLE `spawnpointdetectiondata` '
'MODIFY COLUMN `tth_secs` SMALLINT NULL DEFAULT NULL;'
)
db.execute_sql(
'ALTER TABLE `versions` '
'MODIFY COLUMN `val` SMALLINT NOT NULL;'
)
db.execute_sql(
'ALTER TABLE `gympokemon` '
'MODIFY COLUMN `pokemon_id` SMALLINT NOT NULL,'
'MODIFY COLUMN `cp` SMALLINT NOT NULL,'
'MODIFY COLUMN `num_upgrades` SMALLINT NULL DEFAULT NULL,'
'MODIFY COLUMN `move_1` SMALLINT NULL DEFAULT NULL,'
'MODIFY COLUMN `move_2` SMALLINT NULL DEFAULT NULL,'
'MODIFY COLUMN `stamina` SMALLINT NULL DEFAULT NULL,'
'MODIFY COLUMN `stamina_max` SMALLINT NULL DEFAULT NULL,'
'MODIFY COLUMN `iv_defense` SMALLINT NULL DEFAULT NULL,'
'MODIFY COLUMN `iv_stamina` SMALLINT NULL DEFAULT NULL,'
'MODIFY COLUMN `iv_attack` SMALLINT NULL DEFAULT NULL;'
)
db.execute_sql(
'ALTER TABLE `trainer` '
'MODIFY COLUMN `team` SMALLINT NOT NULL,'
'MODIFY COLUMN `level` SMALLINT NOT NULL;'
)
# add some missing indexes
migrate(
migrator.add_index('gym', ('last_scanned',), False),
migrator.add_index('gymmember', ('last_scanned',), False),
migrator.add_index('gymmember', ('pokemon_uid',), False),
migrator.add_index('gympokemon', ('trainer_name',), False),
migrator.add_index('pokestop', ('active_fort_modifier',), False),
migrator.add_index('spawnpointdetectiondata', ('spawnpoint_id',),
False),
migrator.add_index('token', ('last_updated',), False)
)
# pokestop.last_updated was missing in a previous migration
# check whether we have to add it
has_last_updated_index = False
for index in db.get_indexes('pokestop'):
if index.columns[0] == 'last_updated':
has_last_updated_index = True
break
if not has_last_updated_index:
log.debug('pokestop.last_updated index is missing. Creating now.')
migrate(
migrator.add_index('pokestop', ('last_updated',), False)
)
log.info('Schema upgrade complete.')
if old_ver < 17:
migrate(
migrator.add_column('pokemon', 'form',
SmallIntegerField(null=True))
)
if old_ver < 18:
migrate(
migrator.add_column('pokemon', 'cp',
SmallIntegerField(null=True, default=0)),
migrator.add_column('pokemon', 'cp_multiplier',
FloatField(null=True))
)
|
agpl-3.0
|
MarcoAlmada/bandit-panda
|
bandit/algorithms/UCB1RPV.py
|
1
|
2430
|
from math import log
from pandas import DataFrame
from BaseBanditAlgorithm import BaseBanditAlgorithm
class UCB1RPV(BaseBanditAlgorithm):
"""
A UCB1-Normal variant that accounts for a two-step purchasing process.
The arm selection criteria are the same used for UCB1-Normal, but with
an additional condition: each arm must yield at least one non-zero reward
before the algorithm starts the maximization phase for selection criteria.
"""
def __init__(self, counts=[], values=[], nonzero=[], sumsquares=[]):
"""
Algorithm requires no control parameters.
Inputs:
counts: List[int] -- Initial counts for each arm
values: List[float] -- Initial average reward for each arm
"""
self.arms = DataFrame({
'Iteration':counts,
'Reward':values,
'Nonzero':nonzero,
'Sum-of-squares':sumsquares
})
self.arms.index.name = 'Arm'
return
def initialize(self, n_arms):
self.arms = DataFrame(
{
'Iteration':[0],
'Reward':[0.0],
'Nonzero': [0],
'Sum-of-squares':[0.0]
},
range(n_arms)
)
self.arms.index.name = 'Arm'
return
def select_arm(self):
total_count = self.arms['Iteration'].sum()
total_conversions = self.arms['Nonzero'].sum()
arm = self.arms['Iteration'].idxmin()
if self.arms.ix[arm, 'Iteration'] <= 8 * log(total_count + 1):
return arm
elif self.arms.ix[arm, 'Nonzero'] == 0:
return arm
sq_diff = self.arms['Sum-of-squares'] - self.arms['Iteration'] * self.arms['Reward'] ** 2
sq_diff = sq_diff/(self.arms['Iteration'] - 1)
ucb_values = 16 * log(total_count) / self.arms['Iteration']
ucb_values *= sq_diff
ucb_values **= 0.5
ucb_values += self.arms['Reward']
return ucb_values.idxmax()
def update(self, chosen_arm, reward):
arm = int(chosen_arm)
n = self.arms.ix[arm, 'Iteration'] + 1
if reward != 0:
self.arms.ix[arm, 'Nonzero'] += 1
self.arms.ix[arm, 'Iteration'] = n
self.arms.ix[arm, 'Reward'] *= (n-1)/float(n)
self.arms.ix[arm, 'Reward'] += reward/float(n)
self.arms.ix[arm, 'Sum-of-squares'] += reward**2
return
|
mit
|
tjhei/burnman_old2
|
murakami_book_chapter.py
|
2
|
7539
|
# BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
"""
Attempt to reproduce Figure 6.12 From chapter 6 of Physics and Chemistry of the Deep Earth, 2013
Book chapter by Motohiko Murakami, editor Shun-ichiro Karato
"""
import os, sys, numpy as np, matplotlib.pyplot as plt
#hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1,os.path.abspath('..'))
import burnman
from burnman import minerals
import matplotlib.image as mpimg
import burnman.minerals_base as mb
import numpy as np
method = 'slb2'
#define the minerals from table 6.3
mg_perovskite = burnman.material()
mg_perovskite.params = { 'name': 'Mg perovskite',
'molar_mass' : 0.1004,
'V_0': 24.43e-6,
'K_0': 253.0e9,
'Kprime_0': 3.9,
'G_0' : 172.9e9,
'Gprime_0' : 1.56,
'n': 5.0,
'Debye_0': 1100.,
'grueneisen_0': 1.40,
'q_0': 1.40,
'eta_s_0' : 2.6}
mg_perovskite.set_method('slb2')
fe_perovskite = burnman.material()
fe_perovskite.params = { 'name': 'Fe perovskite',
'molar_mass' : 0.1319,
'V_0': 25.49e-6,
'K_0': 281.0e9,
'Kprime_0': 4.1,
'G_0' : 138.0e9,
'Gprime_0' : 1.70,
'n': 5.0,
'Debye_0': 841.,
'grueneisen_0': 1.48,
'q_0': 1.40,
'eta_s_0' : 2.1}
fe_perovskite.set_method(method)
periclase = burnman.material()
periclase.params = { 'name': 'periclase',
'molar_mass' : 0.0403,
'V_0': 11.24e-6,
'K_0': 161.0e9,
'Kprime_0': 3.9,
'G_0' : 130.9e9,
'Gprime_0' : 1.92,
'n': 2.0,
'Debye_0': 773.,
'grueneisen_0': 1.50,
'q_0': 1.50,
'eta_s_0' : 2.3}
periclase.set_method(method)
wustite = burnman.material()
wustite.params = { 'name': 'wustite',
'molar_mass' : 0.07184,
'V_0': 12.06e-6,
'K_0': 152.0e9,
'Kprime_0': 4.9,
'G_0' : 47.0e9,
'Gprime_0' : 0.70,
'n': 2.0,
'Debye_0': 455.,
'grueneisen_0': 1.28,
'q_0': 1.50,
'eta_s_0' : 0.8}
wustite.set_method(method)
#in the text for the book chapter a linear relationship in elastic properties
#for the solid solutions is assumed...
class ferropericlase(mb.helper_solid_solution):
def __init__(self, fe_num):
base_materials = [periclase, wustite]
molar_fraction = [1. - fe_num, 0.0 + fe_num]
mb.helper_solid_solution.__init__(self, base_materials, molar_fraction)
class perovskite(mb.helper_solid_solution):
def __init__(self, fe_num):
base_materials = [mg_perovskite, fe_perovskite]
molar_fraction = [1. - fe_num, 0.0 + fe_num]
mb.helper_solid_solution.__init__(self, base_materials, molar_fraction)
#define the P-T path
pressure = np.linspace(28.0e9, 129e9, 25.)
temperature_bs = burnman.geotherm.brown_shankland(pressure)
temperature_an = burnman.geotherm.anderson(pressure)
#seismic model for comparison:
seismic_model = burnman.seismic.prem() # pick from .prem() .slow() .fast() (see burnman/seismic.py)
depths = map(seismic_model.depth, pressure)
seis_p, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate_all_at(depths)
#pure perovskite
perovskitite = burnman.composite( ( (perovskite(0.06), 1.0),) )
perovskitite.set_method(method)
#pure periclase
periclasite = burnman.composite( ( (ferropericlase(0.21), 1.0),))
periclasite.set_method(method)
#pyrolite (80% perovskite)
pyrolite = burnman.composite( ( (perovskite(0.06), 0.80),
(ferropericlase(0.21), 0.20) ) )
pyrolite.set_method(method)
#preferred mixture?
amount_perovskite = 0.92
preferred_mixture = burnman.composite( ( (perovskite(0.06), amount_perovskite),
(ferropericlase(0.21), 1.0-amount_perovskite) ) )
preferred_mixture.set_method(method)
mat_rho_1, mat_vp_1, mat_vs_1, mat_vphi_1, mat_K_1, mat_G_1 = burnman.velocities_from_rock(perovskitite,seis_p, temperature_bs)
mat_rho_2, mat_vp_2, mat_vs_2, mat_vphi_2, mat_K_2, mat_G_2 = burnman.velocities_from_rock(periclasite,seis_p, temperature_bs)
mat_rho_3, mat_vp_3, mat_vs_3, mat_vphi_3, mat_K_3, mat_G_3 = burnman.velocities_from_rock(pyrolite,seis_p, temperature_bs)
mat_rho_4, mat_vp_4, mat_vs_4, mat_vphi_4, mat_K_4, mat_G_4 = burnman.velocities_from_rock(preferred_mixture,seis_p, temperature_bs)
### HERE IS THE STEP WITH THE INCORRECT MIXING ###
# comment this out to have correct phase averaging, leave it in to have incorrect phase averaging
#mat_vs_3 = 0.5*((0.80*mat_vs_1 + 0.20*mat_vs_2) + np.ones_like(mat_vs_1)/(0.80/mat_vs_1 + 0.20/mat_vs_2))
#mat_vs_4 = 0.5*((0.92*mat_vs_1 + 0.08*mat_vs_2) + np.ones_like(mat_vs_1)/(0.92/mat_vs_1 + 0.08/mat_vs_2))
plt.subplot(1,1,1)
plt.ylim(5,7.6)
plt.xlim(25,135)
fig1 = mpimg.imread('input_figures/murakami_book_chapter.png')
plt.imshow(fig1, extent=[25,135,5.0,7.6], aspect='auto')
plt.plot(seis_p/1.e9,mat_vs_1/1.e3,color='b',linestyle='--',marker='o',markerfacecolor='b',markersize=3)
plt.plot(seis_p/1.e9,mat_vs_2/1.e3,color='k',linestyle='--',marker='o',markerfacecolor='k',markersize=3)
plt.plot(seis_p/1.e9,mat_vs_3/1.e3,color='g',linestyle='--',marker='o',markerfacecolor='g',markersize=3)
plt.plot(seis_p/1.e9,mat_vs_4/1.e3,color='r',linestyle='--',marker='o',markerfacecolor='r',markersize=3)
plt.plot(seis_p/1.e9,seis_vs/1.e3,color='k',linestyle='',marker='o',markerfacecolor='w',markersize=4)
plt.title("Vs (km/s), VRH on moduli")
plt.savefig("reproduce_murakami_moduli_a.pdf")
plt.show()
mat_rho_1, mat_vp_1, mat_vs_1, mat_vphi_1, mat_K_1, mat_G_1 = burnman.velocities_from_rock(perovskitite,seis_p, temperature_an)
mat_rho_2, mat_vp_2, mat_vs_2, mat_vphi_2, mat_K_2, mat_G_2 = burnman.velocities_from_rock(periclasite,seis_p, temperature_an)
mat_rho_3, mat_vp_3, mat_vs_3, mat_vphi_3, mat_K_3, mat_G_3 = burnman.velocities_from_rock(pyrolite,seis_p, temperature_an)
# again, leave this line in to have incorrect phase averaging...
#mat_vs_3 = 0.5*((0.80*mat_vs_1 + 0.20*mat_vs_2) + np.ones_like(mat_vs_1)/(0.80/mat_vs_1 + 0.20/mat_vs_2))
plt.subplot(1,1,1)
plt.ylim(5,7.6)
plt.xlim(25,135)
fig2 = mpimg.imread('input_figures/murakami_book_chapter_b.png')
plt.imshow(fig2, extent=[25,135,5.0,7.6], aspect='auto')
plt.plot(seis_p/1.e9,seis_vs/1.e3,color='k',linestyle='',marker='o',markerfacecolor='w',markersize=4)
plt.plot(seis_p/1.e9,mat_vs_1/1.e3,color='b',linestyle='--',marker='o',markerfacecolor='b',markersize=3)
plt.plot(seis_p/1.e9,mat_vs_2/1.e3,color='k',linestyle='--',marker='o',markerfacecolor='k',markersize=3)
plt.plot(seis_p/1.e9,mat_vs_3/1.e3,color='g',linestyle='--',marker='o',markerfacecolor='g',markersize=3)
plt.title("Vs (km/s), VRH on moduli")
plt.savefig("reproduce_murakami_moduli_b.pdf")
plt.show()
|
gpl-2.0
|
airanmehr/bio
|
Scripts/TimeSeriesPaper/RealData/Data.py
|
1
|
6943
|
import sys
from multiprocessing import Pool
import numpy as np
import pandas as pd
import Utils.Util as utl
# The allele frequencies are in the format A:T:C:G:N:del, i.e: count of bases 'A', count of bases 'T',... and deletion count in the end (character '*' in the mpileup)
# ['A', 'T', 'C', 'G', 'N', 'del']
# d.loc[[8716158,8720849]].T
def processSyncFileLine(x,dialellic=True):
z = x.apply(lambda xx: pd.Series(xx.split(':'), index=['A', 'T', 'C', 'G', 'N', 'del'])).astype(float).iloc[:, :4]
ref = x.name[-1]
alt = z.sum().sort_values()[-2:]
alt = alt[(alt.index != ref)].index[0]
if dialellic: ## Alternate allele is everthing except reference
return pd.concat([z[ref].astype(int).rename('C'), (z.sum(1)).rename('D')], axis=1).stack()
else: ## Alternate allele is the allele with the most reads
return pd.concat([z[ref].astype(int).rename('C'), (z[ref] + z[alt]).rename('D')], axis=1).stack()
def createF37VCF():
a = pd.read_csv(utl.outpath + 'chris/BF37.vcf', sep='\t', header=1)
a = a.set_index('#CHROM').loc[['2L', '2R', '3L', '3R', 'X', '4']].reset_index()
a.sort_values(['#CHROM', 'POS']).to_csv(utl.outpath + 'real/SNPs.vcf', sep='\t', index=None)
def computeF59():
# Basically
# BR1,FxR5 = R0
# BR3,FxR1 = R2
# BR2,FxR4 = R1
# BR1, BR3, BR2,F15R4,F15R5,F23R1,F27R5,F37R4,F37R5,F37R1==
# F0R0,F0R2,F0R1,F15R1,F15R0,F23R2,F27R0,F37R1,F37R0,F37R2
try:
a = pd.read_pickle(utl.outpath + 'real/mpileup.sync')
except:
f = lambda x: pd.read_csv(utl.dataPathDmel + 'AlternatingTemperatures/F37/BF{}'.format(x), sep='\t', header=None)[range(13)]
f37 = pd.concat([f(15), f(37)]).drop_duplicates(subset=[0, 1]).set_index([0, 1]).sort_index()
f37.index.names = ['CHROM', 'POS']
chrom='X'
def f(chrom):
print chrom
a = pd.read_csv(utl.dataPathDmel + 'AlternatingTemperatures/F59/F59.{}.sync'.format(chrom), sep='\t', header=None)
#samtools mpileup -r $chr -d 8000 -B -f $ref $bam1 $bam4 $bam5 > F59.$chr.mpileup &
a.columns = ['CHROM', 'POS', 'REF', 1, 4, 5] # this is right!
a.columns = ['CHROM', 'POS', 'REF', 2, 1, 0] # this is right!
a.CHROM = a.CHROM.astype(str)
a = a.set_index(['CHROM', 'POS']).sort_index()
return a.loc[f37[f37.index.get_level_values('CHROM') == chrom].index].set_index('REF', append=True)
a = pd.concat(map(f, "2L 2LHet 2R 2RHet 3L 3LHet 3R 3RHet 4 X XHet YHet".split()))
a.to_pickle(utl.outpath + 'real/mpileup.sync')
print 'Computing F59...'
a.columns = pd.MultiIndex.from_product([a.columns, [59]], names=['REP', 'GEN'])
pool = Pool(7)
a = pd.concat(pool.map(oneBatch, utl.batch(a, 2000)))
pool.terminate()
gc.collect()
# a = a.apply(processSyncFileLine, axis=1).sort_index(axis=1).reset_index('REF').drop(('REF', '', ''), axis=1)
a.to_pickle(utl.outpath + 'real/CD.F59.df')
a = pd.read_pickle(utl.outpath + 'real/CD.F59.df')
a = changeCtoAlternateAndDampZeroReads(a)
a = pd.concat([a, pd.read_pickle(utl.outpath + 'real/CD.F37.df')], axis=1).sort_index().sort_index(axis=1)
a.to_pickle(utl.outpath + 'real/CD.F59.df')
def loadChrisPvals():
a = pd.read_csv(utl.outpath + 'chris/BF37', sep='\t', header=None).iloc[:, [0, 1, -1]];
b = pd.read_csv(utl.outpath + 'chris/BF15', sep='\t', header=None).iloc[:, [0, 1, -1]]
df = pd.concat([a.set_index([0, 1]), b.set_index([0, 1])], axis=1)
df.index.names = ['CHROM', 'POS']
df.columns = ['BM', 'BF']
# df = df.reset_index()
# df = df[df.CHROM.apply(lambda x: 'Het' not in x)]
# df = df.set_index(['CHROM', 'POS']);
df.sortlevel(inplace=True)
df=-df.apply(np.log)
return df
def getReplicates(a,filter=False):
R = pd.Series([a[a.columns[map(lambda x: x[-2:] == 'R1', a.columns)]],
a[a.columns[map(lambda x: x[-2:] == 'R2' or x[-2:] == 'R4', a.columns)]],
a[a.columns[map(lambda x: x[-2:] == 'R3' or x[-2:] == 'R5', a.columns)]]])
def rename(df): df.columns = pd.Series(df.columns).apply(lambda x: int(x.split('R')[0][1:]) ).values
R.apply(rename)
if filter:
idx = ~(pd.concat(map(lambda x: x.iloc[:, -1] < x.iloc[:, 0], R), axis=1).sum(1) == 3)
idx.sum()
R = R.apply(lambda x: x[idx])
for i,r in R.iteritems(): r.columns=pd.MultiIndex.from_product([[i],r.columns],names=['REP','TIME'])
R=pd.concat([r for i,r in R.iteritems()],axis=1)
return R
def changeCtoAlternateAndDampZeroReads(a):
C = a.xs('C', level=2, axis=1).sort_index().sort_index(axis=1)
D = a.xs('D', level=2, axis=1).sort_index().sort_index(axis=1)
C = D - C
print (D == 0).sum()
if (D == 0).sum().sum():
C[D == 0] += 1
D[D == 0] += 2
print (D == 0).sum()
C.columns = pd.MultiIndex.from_tuples([x + ('C',) for x in C.columns], names=C.columns.names + ['READ'])
D.columns = pd.MultiIndex.from_tuples([x + ('D',) for x in D.columns], names=D.columns.names + ['READ'])
return pd.concat([C, D], axis=1).sort_index(axis=1).sort_index()
import gc
def oneBatch(x):
# print x.shape
y = x.apply(processSyncFileLine, axis=1).sort_index(axis=1).reset_index('REF').drop(('REF', '', ''), axis=1)
print '.',
sys.stdout.flush()
gc.collect()
return y
def computeF37():
print 'Computing F37...'
# Basically
# BR1,FxR5 = R0
# BR3,FxR1 = R2
# BR2,FxR4 = R1
# " BR1, BR3, BR2,F15R4,F15R5,F23R1,F27R5,F37R4,F37R5,F37R1"
cols = "F0R0,F0R2,F0R1,F15R1,F15R0,F23R2,F27R0,F37R1,F37R0,F37R2".split(',')
f = lambda x: pd.read_csv(utl.outpath + 'chris/BF{}'.format(x), sep='\t', header=None)[range(13)]
a = pd.concat([f(15), f(37)]).drop_duplicates(subset=[0, 1]);
print a.shape
a.columns = ['CHROM', 'POS', 'REF'] + cols;
a = a.set_index(['CHROM', 'POS', 'REF']).sort_index()
colsMultiIndex = pd.Series(a.columns).apply(lambda x: (int(x[1:].split('R')[1]), int(x[1:].split('R')[0]))).values
a.columns = pd.MultiIndex.from_tuples(colsMultiIndex, names=['REP', 'GEN'])
# a = pd.concat(map(lambda x: x.apply(processSyncFileLine, axis=1).sort_index(axis=1).reset_index('REF').drop(('REF', '', ''), axis=1) , utl.batch(a, 1000)))
pool = Pool(7)
a = pd.concat(pool.map(oneBatch, utl.batch(a, 2000)))
pool.terminate()
# a = a.apply(processSyncFileLine, axis=1).sort_index(axis=1).reset_index('REF').drop(('REF', '', ''), axis=1)
changeCtoAlternateAndDampZeroReads(a).to_pickle(utl.home + 'out/real/CD.F37.df')
def getBaseFreq():
d = pd.read_pickle(utl.outpath + 'real/D.F59.df');
d = d.loc[:, d.columns.get_level_values('TIME') == 0]
c = pd.read_pickle(utl.outpath + 'real/C.F59.df');
c = c.loc[:, c.columns.get_level_values('TIME') == 0];
c = d - c
return c.sum(1) / d.sum(1)
|
mit
|
mayblue9/bokeh
|
bokeh/charts/tests/test_data_adapter.py
|
37
|
3285
|
""" This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import DataAdapter
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestDataAdapter(unittest.TestCase):
def setUp(self):
self._values = OrderedDict()
self._values['first'] = [2., 5., 3.]
self._values['second'] = [4., 1., 4.]
self._values['third'] = [6., 4., 3.]
def test_list(self):
values = list(self._values.values())
da = DataAdapter(values)
self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['0', '1', '2'])
self.assertEqual(da.keys(), ['0', '1', '2'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_array(self):
values = np.array(list(self._values.values()))
da = DataAdapter(values)
assert_array_equal(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['0', '1', '2'])
self.assertEqual(da.keys(), ['0', '1', '2'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_pandas(self):
values = pd.DataFrame(self._values)
da = DataAdapter(values)
# TODO: THIS SHOULD BE FIXED..
#self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
# We expect data adapter index to be the same as the underlying pandas
# object and not the default created by DataAdapter
self.assertEqual(da.index, [0, 1, 2])
def test_ordered_dict(self):
da = DataAdapter(self._values)
self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_blaze_data_no_fields(self):
import blaze
valuesdf = pd.DataFrame(self._values)
values = blaze.Data(valuesdf)
da = DataAdapter(values)
assert_array_equal(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
self.assertEqual(da.index, [0, 1, 2])
xs, _values = DataAdapter.get_index_and_data(values, None)
assert_array_equal([0,1,2], xs)
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.