repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
samuel1208/scikit-learn | sklearn/cross_decomposition/pls_.py | 187 | 28507 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
zuku1985/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
walkevin/ParallelTopologicalSorting | measurements/plotscripts/addline.py | 1 | 4582 | import numpy as np
import matplotlib.pyplot as plt
import glob
import re
import helper
plt.style.use('ggplot')
###############################################################################
# Absolute Timing
###############################################################################
def addAbsTiming(axis, algorithm, optimistic, size, graphtype='SOFTWARE', hostnamelike='e%',colorindex=0,markertype='D-',linelabel='nolabel', additionalwhere=''):
fixedwhere = "enable_analysis=0 AND debug=0 AND verbose=0 AND processors>=number_of_threads AND algorithm='{0}' AND optimistic={1} AND graph_type='{2}' AND hostname LIKE '{3}' AND graph_num_nodes={4} {5}".format(algorithm,optimistic,graphtype,hostnamelike,size,additionalwhere)
numthreads = helper.getData('number_of_threads', fixedwhere + ' GROUP BY number_of_threads')
avgtimings = []
if (np.size(numthreads)==0):
return
for nt in numthreads.flat:
# print "NUMTHREADS = ", nt
where = fixedwhere + ' AND number_of_threads={0}'.format(nt)
timings = helper.getData('total_time',where)
# Compute mean and stddev of first timing
if(len(avgtimings)==0):
mean, q25, q75 = helper.median_and_quantiles(data=timings)
print algorithm, " : ", mean, " , [", q25 , ",", q75, "]"
violin_parts = axis.violinplot(timings,[nt],widths=0.8)
for pc in violin_parts['bodies']:
pc.set_color(helper.getFGcolor(colorindex))
avgtimings.append(np.mean(timings))
axis.plot(numthreads,avgtimings[0]/numthreads,'--',color=helper.getBGcolor(colorindex)) # ideal scaling
axis.plot(numthreads,avgtimings,markertype,markersize=6,linewidth=1,color=helper.getFGcolor(colorindex),label=linelabel) # connecting dots
###############################################################################
# Strong Scaling
###############################################################################
def addStrongScaling(axis, algorithm, optimistic, size, graphtype='SOFTWARE', hostnamelike='e%',colorindex=0,markertype='D-',linelabel='nolabel',additionalwhere=''):
fixedwhere = "enable_analysis=0 AND debug=0 AND verbose=0 AND processors>=number_of_threads AND algorithm='{0}' AND optimistic={1} AND graph_type='{2}' AND hostname LIKE '{3}' AND graph_num_nodes={4} {5}".format(algorithm,optimistic,graphtype,hostnamelike,size,additionalwhere)
numthreads = helper.getData('number_of_threads', fixedwhere + ' GROUP BY number_of_threads')
avgtimings = []
if (np.size(numthreads)==0):
return
for nt in numthreads.flat:
# print "NUMTHREADS = ", nt
where = fixedwhere + ' AND number_of_threads={0}'.format(nt)
timings = helper.getData('total_time',where)
# Compute mean and stddev of first timing
if(len(avgtimings)==0):
mean, q25, q75 = helper.median_and_quantiles(data=timings)
print algorithm, " : ", mean, " , [", q25 , ",", q75, "]"
avgtimings.append(np.mean(timings))
speedups = avgtimings[0]/timings
violin_parts = axis.violinplot(speedups,[nt],widths=0.8)
for pc in violin_parts['bodies']:
pc.set_color(helper.getFGcolor(colorindex))
speedup = avgtimings[0]/avgtimings
axis.plot(numthreads,speedup,markertype,markersize=6,linewidth=1,color=helper.getFGcolor(colorindex),label=linelabel) # connecting dots
def addWeakScaling(axis, algorithm, optimistic, size, graphtype='SOFTWARE', hostnamelike='e%',colorindex=0,markertype='D-',linelabel='nolabel'):
fixedwhere = "enable_analysis=0 AND debug=0 AND verbose=0 AND processors>=number_of_threads AND algorithm='{0}' AND optimistic={1} AND graph_type='{2}' AND hostname LIKE '{3}' AND graph_num_nodes={4}*number_of_threads".format(algorithm,optimistic,graphtype,hostnamelike,size)
numthreads = helper.getData('number_of_threads', fixedwhere + ' GROUP BY number_of_threads')
# print(numthreads)
avgtimings = []
if (np.size(numthreads)==0):
return
for nt in numthreads.flat:
# print "NUMTHREADS = ", nt
where = fixedwhere + ' AND number_of_threads={0}'.format(nt)
timings = helper.getData('total_time',where)
# Compute mean and stddev of first timing
if(len(avgtimings)==0):
mean, q25, q75 = helper.median_and_quantiles(data=timings)
print algorithm, " : ", mean, " , [", q25 , ",", q75, "]"
avgtimings.append(np.mean(timings))
speedups = avgtimings[0]/timings
violin_parts = axis.violinplot(speedups,[nt],widths=0.8)
for pc in violin_parts['bodies']:
pc.set_color(helper.getFGcolor(colorindex))
speedup = avgtimings[0]/avgtimings
axis.plot(numthreads,speedup,markertype,markersize=6,linewidth=1,color=helper.getFGcolor(colorindex),label=linelabel) # connecting dots
| mit |
gldmt-duke/CokerAmitaiSGHMC | report/logistic_regression_simulated.py | 1 | 3249 |
import numpy as np
import matplotlib.pyplot as plt
import sghmc
import timeit
import pandas as pd
# Create data
n = 500
p = 50
beta = np.random.normal(0, 1, p+1)
Sigma = np.zeros((p, p))
Sigma_diags = np.array([25, 5, 0.2**2])
distribution = np.random.multinomial(p, pvals=[.05, .05, .9], size=1).tolist()
np.fill_diagonal(Sigma, np.repeat(Sigma_diags, distribution[0], axis=0))
X = np.random.multivariate_normal(np.zeros(p), Sigma, n)
X = np.hstack((np.ones((n, 1)), X))
p = np.exp(X @ beta)/np.exp(1 + np.exp(X @ beta))
Y = np.random.binomial(1, p, n)
# Scale data
Xs = (X - np.mean(X, axis=0))/np.concatenate((np.ones(1),np.std(X[:,1:], axis=0)))
Xs = Xs[:,1:]
p = Xs.shape[1]
# ### Regression
from sklearn.linear_model import LogisticRegression
# Unscaled
mod_logis = LogisticRegression(fit_intercept=False, C=1e50)
mod_logis.fit(X,Y)
beta_true_unscale = mod_logis.coef_.ravel()
beta_true_unscale
# In[6]:
# Scaled
mod_logis = LogisticRegression(fit_intercept=False, C=1e50)
mod_logis.fit(Xs,Y)
beta_true_scale = mod_logis.coef_.ravel()
beta_true_scale
# ### HMC
# HMC - Scaled
nsample = 10000
m = 20
eps = .001
theta = np.zeros(p)
#theta = beta_true_unscale.copy()
phi = 5
M = np.identity(p)
samples, accept, rho, H = sghmc.run_hmc(Y, Xs, sghmc.U_logistic, sghmc.gradU_logistic, M, eps, m, theta, phi, nsample)
beta_est_hmc = np.mean(samples, axis=0)
beta_est_hmc - beta_true_scale
fig, ax = plt.subplots(figsize=(4,3))
plt.plot(samples[:,0])
plt.tight_layout()
ax.set_title("Trace of First Coefficient")
ax.set_xlabel("Index of samples")
plt.savefig('hmc-trace-sim.pdf')
fig, ax = plt.subplots(figsize=(4,3))
ax.plot(H)
ax.set_title("Total energy")
ax.set_xlabel("Number of samples")
plt.tight_layout()
plt.savefig('hmc-energy-sim.pdf')
# ### SGHMC
# HMC - Scaled (no intercept)
nsample = 10000
m = 20
eps = .002
theta = np.zeros(p)
#theta = beta_true_scale.copy()
phi = 5
nbatch = 500
C = 1 * np.identity(p)
V = 0 * np.identity(p)
M = np.identity(p)
samples_sghmc, H_sghmc = sghmc.run_sghmc(Y, Xs, sghmc.U_logistic, sghmc.stogradU_logistic, M, eps, m, theta, C, V, phi, nsample, nbatch)
beta_est_sghmc = np.mean(samples_sghmc, axis=0)
np.mean(samples_sghmc, axis=0) - beta_true_scale
fig, ax = plt.subplots(figsize=(4,3))
plt.plot(samples_sghmc[:,0])
plt.tight_layout()
ax.set_title("Trace of First Coefficient")
ax.set_xlabel("Index of samples")
plt.savefig('sghmc-trace-sim.pdf')
fig, ax = plt.subplots(figsize=(4,3))
ax.plot(H_sghmc)
ax.set_title("Total energy")
ax.set_xlabel("Number of samples")
plt.tight_layout()
plt.savefig('sghmc-energy-sim.pdf')
# ### Gradient Descent
# Gradient descent - Scaled
np.random.seed(2)
phi = .1
beta_est_gd = sghmc.gd(Y, Xs, sghmc.gradU_logistic, .1, 10000, np.zeros(p), phi)
beta_est_gd - beta_true_scale
df = pd.DataFrame(np.vstack((beta_true_scale,
beta_est_hmc,
beta_est_sghmc,
beta_est_gd)).T,
columns=['MLE','HMC','SGHMC','GD'])
fig, ax = plt.subplots(figsize=(4,3))
plt.plot(df)
ax.set_title("Coefficient Estimates")
ax.set_xlabel("Coefficient")
plt.tight_layout()
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig('coefs-sim.pdf')
| mit |
wzbozon/scikit-learn | sklearn/preprocessing/tests/test_data.py | 71 | 38516 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
@ignore_warnings
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
@ignore_warnings
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
robblack007/clase-dinamica-robot | Practicas/practica1/robots/simuladores.py | 2 | 1319 | def simulador(puerto_zmq, f, x0, dt):
from scipy.integrate import ode
from zmq import Context, PUB
from msgpack import packb
from matplotlib.pyplot import figure
from time import time, sleep
from numpy import sin, pi, degrees, array
context = Context()
socket = context.socket(PUB)
socket.bind("tcp://*:" + puerto_zmq)
def mandar_mensaje(señales):
socket.send(packb(señales))
fig = figure(figsize=(6,3))
ax = fig.gca()
t0 = time()
ts = [0]
ys = [array(x0)]
sis = ode(f)
sis.set_initial_value(x0, t0)
while True:
try:
ys.append(degrees(sis.integrate(sis.t + dt)))
while time() - t0 - ts[-1] < dt - 0.0004:
sleep(dt*0.01)
ts.append(time() - t0)
mandar_mensaje(ys[-1].tolist()[0:2])
ax.clear()
if len(ys) > 100:
ax.plot(ts[-100:], ys[-100:])
#ax.text(0.05, 0.1,ts[-1]-ts[-2], transform=ax.transAxes)
else:
ax.plot(ts, ys)
#ax.text(0.05, 0.1,ts[-1]-ts[-2], transform=ax.transAxes)
fig.canvas.draw()
except KeyboardInterrupt:
break
return ts, ys | mit |
phdowling/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 35 | 15016 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
nesterione/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
ryfeus/lambda-packs | H2O/ArchiveH2O/h2o/model/dim_reduction.py | 4 | 4936 | # -*- encoding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.utils.shared_utils import can_use_pandas
from h2o.utils.compatibility import * # NOQA
from .model_base import ModelBase
from .metrics_base import * # NOQA
import h2o
class H2ODimReductionModel(ModelBase):
"""
Dimension reduction model, such as PCA or GLRM.
"""
def varimp(self, use_pandas=False):
"""
Return the Importance of components associcated with a pca model.
use_pandas: ``bool`` (default: ``False``).
"""
model = self._model_json["output"]
if "importance" in list(model.keys()) and model["importance"]:
vals = model["importance"].cell_values
header = model["importance"].col_header
if use_pandas and can_use_pandas():
import pandas
return pandas.DataFrame(vals, columns=header)
else:
return vals
else:
print("Warning: This model doesn't have importances of components.")
def num_iterations(self):
"""Get the number of iterations that it took to converge or reach max iterations."""
o = self._model_json["output"]
return o["model_summary"]["number_of_iterations"][0]
def objective(self):
"""Get the final value of the objective function."""
o = self._model_json["output"]
return o["model_summary"]["final_objective_value"][0]
def final_step(self):
"""Get the final step size for the model."""
o = self._model_json["output"]
return o["model_summary"]["final_step_size"][0]
def archetypes(self):
"""The archetypes (Y) of the GLRM model."""
o = self._model_json["output"]
yvals = o["archetypes"].cell_values
archetypes = []
for yidx, yval in enumerate(yvals):
archetypes.append(list(yvals[yidx])[1:])
return archetypes
def reconstruct(self, test_data, reverse_transform=False):
"""
Reconstruct the training data from the model and impute all missing values.
:param H2OFrame test_data: The dataset upon which the model was trained.
:param bool reverse_transform: Whether the transformation of the training data during model-building
should be reversed on the reconstructed frame.
:returns: the approximate reconstruction of the training data.
"""
if test_data is None or test_data.nrow == 0: raise ValueError("Must specify test data")
j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"reconstruct_train": True, "reverse_transform": reverse_transform})
return h2o.get_frame(j["model_metrics"][0]["predictions"]["frame_id"]["name"])
def proj_archetypes(self, test_data, reverse_transform=False):
"""
Convert archetypes of the model into original feature space.
:param H2OFrame test_data: The dataset upon which the model was trained.
:param bool reverse_transform: Whether the transformation of the training data during model-building
should be reversed on the projected archetypes.
:returns: model archetypes projected back into the original training data's feature space.
"""
if test_data is None or test_data.nrow == 0: raise ValueError("Must specify test data")
j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"project_archetypes": True, "reverse_transform": reverse_transform})
return h2o.get_frame(j["model_metrics"][0]["predictions"]["frame_id"]["name"])
def screeplot(self, type="barplot", **kwargs):
"""
Produce the scree plot.
Library ``matplotlib`` is required for this function.
:param str type: either ``"barplot"`` or ``"lines"``.
"""
# check for matplotlib. exit if absent.
is_server = kwargs.pop("server")
if kwargs:
raise ValueError("Unknown arguments %s to screeplot()" % ", ".join(kwargs.keys()))
try:
import matplotlib
if is_server: matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
except ImportError:
print("matplotlib is required for this function!")
return
variances = [s ** 2 for s in self._model_json['output']['importance'].cell_values[0][1:]]
plt.xlabel('Components')
plt.ylabel('Variances')
plt.title('Scree Plot')
plt.xticks(list(range(1, len(variances) + 1)))
if type == "barplot":
plt.bar(list(range(1, len(variances) + 1)), variances)
elif type == "lines":
plt.plot(list(range(1, len(variances) + 1)), variances, 'b--')
if not is_server: plt.show()
| mit |
jbbskinny/sympy | sympy/plotting/plot_implicit.py | 83 | 14400 | """Implicit plotting module for SymPy
The module implements a data series called ImplicitSeries which is used by
``Plot`` class to plot implicit plots for different backends. The module,
by default, implements plotting using interval arithmetic. It switches to a
fall back algorithm if the expression cannot be plotted using interval arithmetic.
It is also possible to specify to use the fall back algorithm for all plots.
Boolean combinations of expressions cannot be plotted by the fall back
algorithm.
See Also
========
sympy.plotting.plot
References
==========
- Jeffrey Allen Tupper. Reliable Two-Dimensional Graphing Methods for
Mathematical Formulae with Two Free Variables.
- Jeffrey Allen Tupper. Graphing Equations with Generalized Interval
Arithmetic. Master's thesis. University of Toronto, 1996
"""
from __future__ import print_function, division
from .plot import BaseSeries, Plot
from .experimental_lambdify import experimental_lambdify, vectorized_lambdify
from .intervalmath import interval
from sympy.core.relational import (Equality, GreaterThan, LessThan,
Relational, StrictLessThan, StrictGreaterThan)
from sympy import Eq, Tuple, sympify, Symbol, Dummy
from sympy.external import import_module
from sympy.logic.boolalg import BooleanFunction
from sympy.polys.polyutils import _sort_gens
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import flatten
import warnings
class ImplicitSeries(BaseSeries):
""" Representation for Implicit plot """
is_implicit = True
def __init__(self, expr, var_start_end_x, var_start_end_y,
has_equality, use_interval_math, depth, nb_of_points,
line_color):
super(ImplicitSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_raster
self.has_equality = has_equality # If the expression has equality, i.e.
#Eq, Greaterthan, LessThan.
self.nb_of_points = nb_of_points
self.use_interval_math = use_interval_math
self.depth = 4 + depth
self.line_color = line_color
def __str__(self):
return ('Implicit equation: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_raster(self):
func = experimental_lambdify((self.var_x, self.var_y), self.expr,
use_interval=True)
xinterval = interval(self.start_x, self.end_x)
yinterval = interval(self.start_y, self.end_y)
try:
temp = func(xinterval, yinterval)
except AttributeError:
if self.use_interval_math:
warnings.warn("Adaptive meshing could not be applied to the"
" expression. Using uniform meshing.")
self.use_interval_math = False
if self.use_interval_math:
return self._get_raster_interval(func)
else:
return self._get_meshes_grid()
def _get_raster_interval(self, func):
""" Uses interval math to adaptively mesh and obtain the plot"""
k = self.depth
interval_list = []
#Create initial 32 divisions
np = import_module('numpy')
xsample = np.linspace(self.start_x, self.end_x, 33)
ysample = np.linspace(self.start_y, self.end_y, 33)
#Add a small jitter so that there are no false positives for equality.
# Ex: y==x becomes True for x interval(1, 2) and y interval(1, 2)
#which will draw a rectangle.
jitterx = (np.random.rand(
len(xsample)) * 2 - 1) * (self.end_x - self.start_x) / 2**20
jittery = (np.random.rand(
len(ysample)) * 2 - 1) * (self.end_y - self.start_y) / 2**20
xsample += jitterx
ysample += jittery
xinter = [interval(x1, x2) for x1, x2 in zip(xsample[:-1],
xsample[1:])]
yinter = [interval(y1, y2) for y1, y2 in zip(ysample[:-1],
ysample[1:])]
interval_list = [[x, y] for x in xinter for y in yinter]
plot_list = []
#recursive call refinepixels which subdivides the intervals which are
#neither True nor False according to the expression.
def refine_pixels(interval_list):
""" Evaluates the intervals and subdivides the interval if the
expression is partially satisfied."""
temp_interval_list = []
plot_list = []
for intervals in interval_list:
#Convert the array indices to x and y values
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
#The expression is valid in the interval. Change the contour
#array values to 1.
if func_eval[1] is False or func_eval[0] is False:
pass
elif func_eval == (True, True):
plot_list.append([intervalx, intervaly])
elif func_eval[1] is None or func_eval[0] is None:
#Subdivide
avgx = intervalx.mid
avgy = intervaly.mid
a = interval(intervalx.start, avgx)
b = interval(avgx, intervalx.end)
c = interval(intervaly.start, avgy)
d = interval(avgy, intervaly.end)
temp_interval_list.append([a, c])
temp_interval_list.append([a, d])
temp_interval_list.append([b, c])
temp_interval_list.append([b, d])
return temp_interval_list, plot_list
while k >= 0 and len(interval_list):
interval_list, plot_list_temp = refine_pixels(interval_list)
plot_list.extend(plot_list_temp)
k = k - 1
#Check whether the expression represents an equality
#If it represents an equality, then none of the intervals
#would have satisfied the expression due to floating point
#differences. Add all the undecided values to the plot.
if self.has_equality:
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
if func_eval[1] and func_eval[0] is not False:
plot_list.append([intervalx, intervaly])
return plot_list, 'fill'
def _get_meshes_grid(self):
"""Generates the mesh for generating a contour.
In the case of equality, ``contour`` function of matplotlib can
be used. In other cases, matplotlib's ``contourf`` is used.
"""
equal = False
if isinstance(self.expr, Equality):
expr = self.expr.lhs - self.expr.rhs
equal = True
elif isinstance(self.expr, (GreaterThan, StrictGreaterThan)):
expr = self.expr.lhs - self.expr.rhs
elif isinstance(self.expr, (LessThan, StrictLessThan)):
expr = self.expr.rhs - self.expr.lhs
else:
raise NotImplementedError("The expression is not supported for "
"plotting in uniform meshed plot.")
np = import_module('numpy')
xarray = np.linspace(self.start_x, self.end_x, self.nb_of_points)
yarray = np.linspace(self.start_y, self.end_y, self.nb_of_points)
x_grid, y_grid = np.meshgrid(xarray, yarray)
func = vectorized_lambdify((self.var_x, self.var_y), expr)
z_grid = func(x_grid, y_grid)
z_grid[np.ma.where(z_grid < 0)] = -1
z_grid[np.ma.where(z_grid > 0)] = 1
if equal:
return xarray, yarray, z_grid, 'contour'
else:
return xarray, yarray, z_grid, 'contourf'
@doctest_depends_on(modules=('matplotlib',))
def plot_implicit(expr, x_var=None, y_var=None, **kwargs):
"""A plot function to plot implicit equations / inequalities.
Arguments
=========
- ``expr`` : The equation / inequality that is to be plotted.
- ``x_var`` (optional) : symbol to plot on x-axis or tuple giving symbol
and range as ``(symbol, xmin, xmax)``
- ``y_var`` (optional) : symbol to plot on y-axis or tuple giving symbol
and range as ``(symbol, ymin, ymax)``
If neither ``x_var`` nor ``y_var`` are given then the free symbols in the
expression will be assigned in the order they are sorted.
The following keyword arguments can also be used:
- ``adaptive``. Boolean. The default value is set to True. It has to be
set to False if you want to use a mesh grid.
- ``depth`` integer. The depth of recursion for adaptive mesh grid.
Default value is 0. Takes value in the range (0, 4).
- ``points`` integer. The number of points if adaptive mesh grid is not
used. Default value is 200.
- ``title`` string .The title for the plot.
- ``xlabel`` string. The label for the x-axis
- ``ylabel`` string. The label for the y-axis
Aesthetics options:
- ``line_color``: float or string. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
plot_implicit, by default, uses interval arithmetic to plot functions. If
the expression cannot be plotted using interval arithmetic, it defaults to
a generating a contour using a mesh grid of fixed number of points. By
setting adaptive to False, you can force plot_implicit to use the mesh
grid. The mesh grid method can be effective when adaptive plotting using
interval arithmetic, fails to plot with small line width.
Examples
========
Plot expressions:
>>> from sympy import plot_implicit, cos, sin, symbols, Eq, And
>>> x, y = symbols('x y')
Without any ranges for the symbols in the expression
>>> p1 = plot_implicit(Eq(x**2 + y**2, 5))
With the range for the symbols
>>> p2 = plot_implicit(Eq(x**2 + y**2, 3),
... (x, -3, 3), (y, -3, 3))
With depth of recursion as argument.
>>> p3 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -4, 4), (y, -4, 4), depth = 2)
Using mesh grid and not using adaptive meshing.
>>> p4 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2), adaptive=False)
Using mesh grid with number of points as input.
>>> p5 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2),
... adaptive=False, points=400)
Plotting regions.
>>> p6 = plot_implicit(y > x**2)
Plotting Using boolean conjunctions.
>>> p7 = plot_implicit(And(y > x, y > -x))
When plotting an expression with a single variable (y - 1, for example),
specify the x or the y variable explicitly:
>>> p8 = plot_implicit(y - 1, y_var=y)
>>> p9 = plot_implicit(x - 1, x_var=x)
"""
has_equality = False # Represents whether the expression contains an Equality,
#GreaterThan or LessThan
def arg_expand(bool_expr):
"""
Recursively expands the arguments of an Boolean Function
"""
for arg in bool_expr.args:
if isinstance(arg, BooleanFunction):
arg_expand(arg)
elif isinstance(arg, Relational):
arg_list.append(arg)
arg_list = []
if isinstance(expr, BooleanFunction):
arg_expand(expr)
#Check whether there is an equality in the expression provided.
if any(isinstance(e, (Equality, GreaterThan, LessThan))
for e in arg_list):
has_equality = True
elif not isinstance(expr, Relational):
expr = Eq(expr, 0)
has_equality = True
elif isinstance(expr, (Equality, GreaterThan, LessThan)):
has_equality = True
xyvar = [i for i in (x_var, y_var) if i is not None]
free_symbols = expr.free_symbols
range_symbols = Tuple(*flatten(xyvar)).free_symbols
undeclared = free_symbols - range_symbols
if len(free_symbols & range_symbols) > 2:
raise NotImplementedError("Implicit plotting is not implemented for "
"more than 2 variables")
#Create default ranges if the range is not provided.
default_range = Tuple(-5, 5)
def _range_tuple(s):
if isinstance(s, Symbol):
return Tuple(s) + default_range
if len(s) == 3:
return Tuple(*s)
raise ValueError('symbol or `(symbol, min, max)` expected but got %s' % s)
if len(xyvar) == 0:
xyvar = list(_sort_gens(free_symbols))
var_start_end_x = _range_tuple(xyvar[0])
x = var_start_end_x[0]
if len(xyvar) != 2:
if x in undeclared or not undeclared:
xyvar.append(Dummy('f(%s)' % x.name))
else:
xyvar.append(undeclared.pop())
var_start_end_y = _range_tuple(xyvar[1])
use_interval = kwargs.pop('adaptive', True)
nb_of_points = kwargs.pop('points', 300)
depth = kwargs.pop('depth', 0)
line_color = kwargs.pop('line_color', "blue")
#Check whether the depth is greater than 4 or less than 0.
if depth > 4:
depth = 4
elif depth < 0:
depth = 0
series_argument = ImplicitSeries(expr, var_start_end_x, var_start_end_y,
has_equality, use_interval, depth,
nb_of_points, line_color)
show = kwargs.pop('show', True)
#set the x and y limits
kwargs['xlim'] = tuple(float(x) for x in var_start_end_x[1:])
kwargs['ylim'] = tuple(float(y) for y in var_start_end_y[1:])
# set the x and y labels
kwargs.setdefault('xlabel', var_start_end_x[0].name)
kwargs.setdefault('ylabel', var_start_end_y[0].name)
p = Plot(series_argument, **kwargs)
if show:
p.show()
return p
| bsd-3-clause |
rahulgayatri23/moose-core | python/rdesigneur/rmoogli.py | 1 | 5808 | #########################################################################
## rdesigneur0_4.py ---
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU General Public License version 2 or later.
## See the file COPYING.LIB for the full notice.
#########################################################################
import math
import matplotlib
import sys
import moose
hasMoogli = True
try:
from PyQt4 import QtGui
import moogli
import moogli.extensions.moose
app = QtGui.QApplication(sys.argv)
except Exception as e:
print( 'Warning: Moogli not found. All moogli calls will use dummy functions' )
hasMoogli = False
runtime = 0.0
moogliDt = 1.0
rotation = math.pi / 500.0
def getComptParent( obj ):
k = moose.element(obj)
while not k.isA[ "CompartmentBase" ]:
if k == moose.element( '/' ):
return k.path
k = moose.element( k.parent )
return k.path
#######################################################################
## Here we set up the callback functions for the viewer
def prelude( view ):
view.home()
view.pitch( math.pi / 2.0 )
view.zoom( 0.3 )
#network.groups["soma"].set( "color", moogli.colors.RED )
# This func is used for the first viewer, it has to handle advancing time.
def interlude( view ):
moose.start( moogliDt )
val = [ moose.getField( i, view.mooField, "double" ) * view.mooScale for i in view.mooObj ]
#print "LEN = ", len( val ), "field = ", view.mooField
view.mooGroup.set("color", val, view.mapper)
view.yaw( rotation )
#print moogliDt, len( val ), runtime
if moose.element("/clock").currentTime >= runtime:
view.stop()
# This func is used for later viewers, that don't handle advancing time.
def interlude2( view ):
val = [ moose.getField( i, view.mooField, "double" ) * view.mooScale for i in view.mooObj ]
view.mooGroup.set("color", val, view.mapper)
view.yaw( rotation )
if moose.element("/clock").currentTime >= runtime:
view.stop()
def postlude( view ):
view.rd.display()
def makeMoogli( rd, mooObj, moogliEntry, fieldInfo ):
if not hasMoogli:
return None
mooField = moogliEntry[3]
numMoogli = len( mooObj )
network = moogli.extensions.moose.read( path = rd.elecid.path, vertices=15)
#print len( network.groups["spine"].shapes )
#print len( network.groups["dendrite"].shapes )
#print len( network.groups["soma"].shapes )
#soma = network.groups["soma"].shapes[ '/model/elec/soma']
#print network.groups["soma"].shapes
soma = network.groups["soma"].shapes[ rd.elecid.path + '/soma[0]']
if ( mooField == 'n' or mooField == 'conc' ):
updateGroup = soma.subdivide( numMoogli )
displayObj = mooObj
else:
shell = moose.element( '/' )
displayObj = [i for i in mooObj if i != shell ]
cpa = [getComptParent( i ) for i in displayObj ]
updateGroup = moogli.Group( "update" )
updateShapes = [network.shapes[i] for i in cpa]
#print "########### Len( cpa, mooObj ) = ", len( cpa ), len( mooObj ), len( updateShapes )
updateGroup.attach_shapes( updateShapes )
normalizer = moogli.utilities.normalizer(
moogliEntry[5], moogliEntry[6],
clipleft =True,
clipright = True )
colormap = moogli.colors.MatplotlibColorMap(matplotlib.cm.rainbow)
mapper = moogli.utilities.mapper(colormap, normalizer)
viewer = moogli.Viewer("Viewer")
viewer.setWindowTitle( moogliEntry[4] )
if ( mooField == 'n' or mooField == 'conc' ):
viewer.attach_shapes( updateGroup.shapes.values())
viewer.detach_shape(soma)
else:
viewer.attach_shapes(network.shapes.values())
if len( rd.moogNames ) == 0:
view = moogli.View("main-view",
prelude=prelude,
interlude=interlude,
postlude = postlude)
else:
view = moogli.View("main-view",
prelude=prelude,
interlude=interlude2)
cb = moogli.widgets.ColorBar(id="cb",
title=fieldInfo[3],
text_color=moogli.colors.BLACK,
position=moogli.geometry.Vec3f(0.975, 0.5, 0.0),
size=moogli.geometry.Vec3f(0.30, 0.05, 0.0),
text_font="/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-R.ttf",
orientation=math.pi / 2.0,
text_character_size=16,
label_formatting_precision=0,
colormap=moogli.colors.MatplotlibColorMap(matplotlib.cm.rainbow),
color_resolution=100,
scalar_range=moogli.geometry.Vec2f(
moogliEntry[5],
moogliEntry[6]))
view.attach_color_bar(cb)
view.rd = rd
view.mooObj = displayObj
view.mooGroup = updateGroup
view.mooField = mooField
view.mooScale = fieldInfo[2]
view.mapper = mapper
viewer.attach_view(view)
return viewer
def displayMoogli( rd, _dt, _runtime, _rotation ):
if not hasMoogli:
return None
global runtime
global moogliDt
global rotation
runtime = _runtime
moogliDt = _dt
rotation = _rotation
for i in rd.moogNames:
i.show()
i.start()
#viewer.showMaximized()
#viewer.show()
#viewer.start()
return app.exec_()
| gpl-3.0 |
hughdbrown/QSTK-nohist | src/qstkfeat/classes.py | 1 | 3233 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Nov 7, 2011
@author: John Cornwell
@contact: [email protected]
@summary: File containing various classification functions
'''
# 3rd Party Imports
import pandas as pand
import numpy as np
def class_fut_ret(d_data, i_lookforward=21, s_rel=None, b_use_open=False):
'''
@summary: Calculate classification, uses future returns
@param d_data: Dictionary of data to use
@param i_lookforward: Number of days to look in the future
@param s_rel: Stock symbol that this should be relative to, ususally $SPX.
@param b_use_open: If True, stock will be purchased at T+1 open, sold at
T+i_lookforward close
@return: DataFrame containing values
'''
df_close = d_data['close']
if b_use_open:
df_open = d_data['open']
# Class DataFrame will be 1:1, we can use the price as a template,
# need to copy values
df_ret = pand.DataFrame(index=df_close.index, columns=df_close.columns,
data=np.copy(df_close.values))
# If we want market relative, calculate those values now
if not s_rel is None:
#assert False, 'Use generic MR param instead,
# recognized by applyfeatures'
i_len = len(df_close[s_rel].index)
# Loop over time
for i in range(i_len):
# We either buy on todays close or tomorrows open
if b_use_open:
if i + 1 + i_lookforward >= i_len:
df_ret[s_rel][i] = float('nan')
continue
f_buy = df_open[s_rel][i + 1]
f_sell = df_open[s_rel][i + 1 + i_lookforward]
else:
if i + i_lookforward >= i_len:
df_ret[s_rel][i] = float('nan')
continue
f_buy = df_close[s_rel][i]
f_sell = df_close[s_rel][i + i_lookforward]
df_ret[s_rel][i] = (f_sell - f_buy) / f_buy
# Loop through stocks
for s_stock in df_close.columns:
# We have already done this stock
if s_stock == s_rel:
continue
i_len = len(df_close[s_stock].index)
# Loop over time
for i in range(i_len):
# We either buy on todays close or tomorrows open
if b_use_open:
if i + 1 + i_lookforward >= i_len:
df_ret[s_stock][i] = float('nan')
continue
f_buy = df_open[s_stock][i + 1]
f_sell = df_open[s_stock][i + 1 + i_lookforward]
else:
if i + i_lookforward >= i_len:
df_ret[s_stock][i] = float('nan')
continue
f_buy = df_close[s_stock][i]
f_sell = df_close[s_stock][i + i_lookforward]
df_ret[s_stock][i] = (f_sell - f_buy) / f_buy
# Make market relative
if not s_rel is None:
df_ret[s_stock][i] -= df_ret[s_rel][i]
return df_ret
if __name__ == '__main__':
pass
| bsd-3-clause |
zscore/pavement_analysis | src/data_munging.py | 1 | 7987 | import functools
import numpy as np
import os
import pandas as pd
import pyproj
import requests
import rtree
import subprocess
src_dir = os.path.dirname(os.path.abspath(__file__)) + '/'
root_dir = src_dir + '../'
data_dir = root_dir + 'dat/'
def _proj_apply(row, proj):
x,y = proj(*row)
return pd.Series({'x':x, 'y':y})
#EPSG:3435
#NAD83 / Illinois East (ftUS) (Google it)
#Note that this this projection takes arguments in the form (lon, lat),
#presumably to agree with the (x, y) output
NAD83 = pyproj.Proj('+init=EPSG:3435')
chicago_bounding_box = {'upper_lat': 42.3098,
'lower_lat': 41.3273,
'left_lon': -88.3823,
'right_lon': -87.0941}
nyc_bounding_box = {'upper_lat': 41.340,
'lower_lat': 40.355,
'left_lon': -74.713,
'right_lon': -71.570}
kimball_bounding_box = {'upper_lat': 41.95,
'lower_lat': 41.90,
'left_lon': -87.75,
'right_lon': -87.70}
def filter_readings_by_bb(readings, bb):
"""Filters readings by start lat/lon contained in bounding box."""
to_keep = np.logical_and(np.logical_and(readings['start_lon'] >= bb['left_lon'],
readings['start_lon'] <= bb['right_lon']),
np.logical_and(readings['start_lat'] >= bb['lower_lat'],
readings['start_lat'] <= bb['upper_lat']))
return readings.ix[to_keep, :]
def filter_readings_to_chicago(readings):
return filter_readings_by_bb(readings, chicago_bounding_box)
def filter_readings_to_nyc(readings):
return filter_readings_by_bb(readings, nyc_bounding_box)
def add_routes_to_shapely():
"nothing"
def filter_to_good_readings(readings):
return readings.loc[get_good_readings(readings), :]
def get_good_readings(readings):
return np.logical_and.reduce((readings['gps_mph'] < 30,
readings['gps_mph'] > 4,
readings['total_readings'] > 90,
readings['total_readings'] < 110))
def calc_dist(*args):
"""I have no idea if Clark ='s ellipsoid is good for our purposes or not.
Accepts calc_dist(lon, lat, lon, lat) where they may be iterables or
single values."""
try:
args = [list(arg) for arg in args]
except:
'nothing'
clark_geod = pyproj.Geod(ellps='clrk66')
az12, az21, dist = clark_geod.inv(*args)
return dist
def _string_to_array(str_array):
if str_array.strip() == '---':
return pd.Series()
if str_array[:2] == '- ':
return pd.Series(float(str_array[2:]))
return np.array([float(i) for i in str_array[5:-1].split('\n- ')])
def get_nearest_street(lat, lon):
osrm_api = 'http://router.project-osrm.org/nearest?loc='
nearest_street_resp = requests.get(osrm_api + str(lat) + ',' + str(lon))
return nearest_street.json()['name']
def read_raw_data():
"""Returns rides and readings Pandas dfs"""
rides = pd.read_csv('../dat/rides.csv')
readings = pd.read_csv('../dat/readings.csv')
readings.sort_values(by=['ride_id', 'id'], inplace=True)
return (rides, readings)
def add_proj_to_readings(readings, proj):
proj_apply = functools.partial(_proj_apply, proj=proj)
start_xy = readings.loc[:, ['start_lon', 'start_lat']].apply(proj_apply, axis=1)
end_xy = readings.loc[:, ['end_lon', 'end_lat']].apply(proj_apply, axis=1)
start_xy.columns = ('start_x', 'start_y')
end_xy.columns = ('end_x', 'end_y')
readings = readings.join(start_xy)
readings = readings.join(end_xy)
return readings
to_total_mag = lambda x: [np.array([(x['num_accel_x'][i] ** 2 +
x['num_accel_y'][i] ** 2 +
x['num_accel_z'][i] ** 2) ** 0.5
for i in range(len(x['num_accel_x']))])]
def clean_readings(readings):
for axis in ['x', 'y', 'z']:
readings['num_accel_' + axis] = readings['acceleration_' + axis].apply(_string_to_array)
readings['abs_mean_' + axis] = readings['num_accel_' + axis].apply(lambda x: np.mean(np.abs(x)))
readings['std_' + axis] = readings['num_accel_' + axis].apply(np.std)
readings['std_total'] = (readings['std_x'] ** 2 + readings['std_y'] ** 2 + readings['std_z'] ** 2) ** 0.5
readings['duration'] = readings['end_time'] - readings['start_time']
readings['gps_dist'] = calc_dist(readings['start_lon'],
readings['start_lat'],
readings['end_lon'],
readings['end_lat'])
readings['num_accel_total'] = readings.apply(to_total_mag, axis=1)
readings['num_accel_total'] = readings['num_accel_total'].apply(lambda x: x[0])
readings['abs_mean_total'] = readings['num_accel_total'].apply(np.mean)
readings['gps_speed'] = readings['gps_dist'] / readings['duration']
readings['gps_mph'] = readings['gps_speed'] * 2.23694
readings['total_readings'] = readings['num_accel_x'].apply(lambda x: len(x))
readings['start_datetime'] = readings['start_time'].apply(pd.datetime.fromtimestamp)
readings['end_datetime'] = readings['end_time'].apply(pd.datetime.fromtimestamp)
readings['abs_mean_over_speed'] = readings['abs_mean_total'] / readings['gps_speed']
return readings
def pull_data_from_heroku():
#does not work for some reason
subprocess.call(src_dir + 'dump_data_to_csv.sh')
def pull_data_by_time_range():
"""Pulls data by time range"""
def update_data():
"""Archives old data and pulls new data. If data grows large,
only get the new stuff. Can organize this within folders."""
def coords_to_bb(coord0, coord1):
left = min(coord0[0], coord1[0])
right = max(coord0[0], coord1[0])
bottom = min(coord0[1], coord1[1])
top = max(coord0[1], coord1[1])
return (left, bottom, right, top)
def reading_to_bb(row):
left = min(row.start_x, row.end_x)
right = max(row.start_x, row.end_x)
bottom = min(row.start_y, row.end_y)
top = max(row.start_y, row.end_y)
return (left, bottom, right, top)
def area_of_bb(bb):
left, bottom, right, top = bb
return (abs(left - right) * abs(top - bottom))
def insert_readings_rtree(readings):
readings_idx = rtree.index.Index()
for index, reading in readings.iterrows():
readings_idx.insert(index, reading_to_bb(reading))
return readings_idx
def point_to_bb(x, y, side_length):
return [x - side_length / 2., y - side_length / 2.,
x + side_length / 2, y + side_length / 2.]
def expand_bb(bb, exp_amt):
return [bb[0] - exp_amt, bb[1] - exp_amt,
bb[2] + exp_amt, bb[3] + exp_amt]
def calc_reading_diffs(reading0, reading1):
start0 = reading0[['start_x', 'start_y']].values
start1 = reading1[['start_x', 'start_y']].values
end0 = reading0[['end_x', 'end_y']].values
end1 = reading1[['end_x', 'end_y']].values
diff0 = np.linalg.norm(start0 - start1) + np.linalg.norm(end0 - end1)
diff1 = np.linalg.norm(start0 - end1) + np.linalg.norm(end0 - start1)
diff = min(diff0, diff1)
dist0 = np.linalg.norm(start0 - end0)
dist1 = np.linalg.norm(start1 - end1)
if dist0 == 0 or dist1 == 0:
return np.inf
return diff / (dist0 + dist1)
def select_random_point(readings):
""" Selects a random reading and samples a point from
the segment as uniform(0,1) linearly interpolating from start
to end.
"""
an_idx = np.random.choice(readings.index, 1)
place_on_route = np.random.uniform()
return (an_idx,
(float(place_on_route * readings['start_x'][an_idx] +
(1 - place_on_route) * readings['end_x'][an_idx]),
float(place_on_route * readings['start_y'][an_idx] +
(1 - place_on_route) * readings['end_y'][an_idx])))
if __name__ == '__main__':
"""does nothing""" | mit |
jreback/pandas | pandas/tests/window/test_api.py | 1 | 9525 | import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
Timestamp,
concat,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.base import SpecificationError
def test_getitem(frame):
r = frame.rolling(window=5)
tm.assert_index_equal(r._selected_obj.columns, frame.columns)
r = frame.rolling(window=5)[1]
assert r._selected_obj.name == frame.columns[1]
# technically this is allowed
r = frame.rolling(window=5)[1, 3]
tm.assert_index_equal(r._selected_obj.columns, frame.columns[[1, 3]])
r = frame.rolling(window=5)[[1, 3]]
tm.assert_index_equal(r._selected_obj.columns, frame.columns[[1, 3]])
def test_select_bad_cols():
df = DataFrame([[1, 2]], columns=["A", "B"])
g = df.rolling(window=5)
with pytest.raises(KeyError, match="Columns not found: 'C'"):
g[["C"]]
with pytest.raises(KeyError, match="^[^A]+$"):
# A should not be referenced as a bad column...
# will have to rethink regex if you change message!
g[["A", "C"]]
def test_attribute_access():
df = DataFrame([[1, 2]], columns=["A", "B"])
r = df.rolling(window=5)
tm.assert_series_equal(r.A.sum(), r["A"].sum())
msg = "'Rolling' object has no attribute 'F'"
with pytest.raises(AttributeError, match=msg):
r.F
def tests_skip_nuisance():
df = DataFrame({"A": range(5), "B": range(5, 10), "C": "foo"})
r = df.rolling(window=3)
result = r[["A", "B"]].sum()
expected = DataFrame(
{"A": [np.nan, np.nan, 3, 6, 9], "B": [np.nan, np.nan, 18, 21, 24]},
columns=list("AB"),
)
tm.assert_frame_equal(result, expected)
def test_skip_sum_object_raises():
df = DataFrame({"A": range(5), "B": range(5, 10), "C": "foo"})
r = df.rolling(window=3)
result = r.sum()
expected = DataFrame(
{"A": [np.nan, np.nan, 3, 6, 9], "B": [np.nan, np.nan, 18, 21, 24]},
columns=list("AB"),
)
tm.assert_frame_equal(result, expected)
def test_agg():
df = DataFrame({"A": range(5), "B": range(0, 10, 2)})
r = df.rolling(window=3)
a_mean = r["A"].mean()
a_std = r["A"].std()
a_sum = r["A"].sum()
b_mean = r["B"].mean()
b_std = r["B"].std()
result = r.aggregate([np.mean, np.std])
expected = concat([a_mean, a_std, b_mean, b_std], axis=1)
expected.columns = MultiIndex.from_product([["A", "B"], ["mean", "std"]])
tm.assert_frame_equal(result, expected)
result = r.aggregate({"A": np.mean, "B": np.std})
expected = concat([a_mean, b_std], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
result = r.aggregate({"A": ["mean", "std"]})
expected = concat([a_mean, a_std], axis=1)
expected.columns = MultiIndex.from_tuples([("A", "mean"), ("A", "std")])
tm.assert_frame_equal(result, expected)
result = r["A"].aggregate(["mean", "sum"])
expected = concat([a_mean, a_sum], axis=1)
expected.columns = ["mean", "sum"]
tm.assert_frame_equal(result, expected)
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
# using a dict with renaming
r.aggregate({"A": {"mean": "mean", "sum": "sum"}})
with pytest.raises(SpecificationError, match=msg):
r.aggregate(
{"A": {"mean": "mean", "sum": "sum"}, "B": {"mean2": "mean", "sum2": "sum"}}
)
result = r.aggregate({"A": ["mean", "std"], "B": ["mean", "std"]})
expected = concat([a_mean, a_std, b_mean, b_std], axis=1)
exp_cols = [("A", "mean"), ("A", "std"), ("B", "mean"), ("B", "std")]
expected.columns = MultiIndex.from_tuples(exp_cols)
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_apply(raw):
# passed lambda
df = DataFrame({"A": range(5), "B": range(0, 10, 2)})
r = df.rolling(window=3)
a_sum = r["A"].sum()
result = r.agg({"A": np.sum, "B": lambda x: np.std(x, ddof=1)})
rcustom = r["B"].apply(lambda x: np.std(x, ddof=1), raw=raw)
expected = concat([a_sum, rcustom], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_consistency():
df = DataFrame({"A": range(5), "B": range(0, 10, 2)})
r = df.rolling(window=3)
result = r.agg([np.sum, np.mean]).columns
expected = MultiIndex.from_product([list("AB"), ["sum", "mean"]])
tm.assert_index_equal(result, expected)
result = r["A"].agg([np.sum, np.mean]).columns
expected = Index(["sum", "mean"])
tm.assert_index_equal(result, expected)
result = r.agg({"A": [np.sum, np.mean]}).columns
expected = MultiIndex.from_tuples([("A", "sum"), ("A", "mean")])
tm.assert_index_equal(result, expected)
def test_agg_nested_dicts():
# API change for disallowing these types of nested dicts
df = DataFrame({"A": range(5), "B": range(0, 10, 2)})
r = df.rolling(window=3)
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
r.aggregate({"r1": {"A": ["mean", "sum"]}, "r2": {"B": ["mean", "sum"]}})
expected = concat(
[r["A"].mean(), r["A"].std(), r["B"].mean(), r["B"].std()], axis=1
)
expected.columns = MultiIndex.from_tuples(
[("ra", "mean"), ("ra", "std"), ("rb", "mean"), ("rb", "std")]
)
with pytest.raises(SpecificationError, match=msg):
r[["A", "B"]].agg({"A": {"ra": ["mean", "std"]}, "B": {"rb": ["mean", "std"]}})
with pytest.raises(SpecificationError, match=msg):
r.agg({"A": {"ra": ["mean", "std"]}, "B": {"rb": ["mean", "std"]}})
def test_count_nonnumeric_types():
# GH12541
cols = [
"int",
"float",
"string",
"datetime",
"timedelta",
"periods",
"fl_inf",
"fl_nan",
"str_nan",
"dt_nat",
"periods_nat",
]
dt_nat_col = [Timestamp("20170101"), Timestamp("20170203"), Timestamp(None)]
df = DataFrame(
{
"int": [1, 2, 3],
"float": [4.0, 5.0, 6.0],
"string": list("abc"),
"datetime": date_range("20170101", periods=3),
"timedelta": timedelta_range("1 s", periods=3, freq="s"),
"periods": [
Period("2012-01"),
Period("2012-02"),
Period("2012-03"),
],
"fl_inf": [1.0, 2.0, np.Inf],
"fl_nan": [1.0, 2.0, np.NaN],
"str_nan": ["aa", "bb", np.NaN],
"dt_nat": dt_nat_col,
"periods_nat": [
Period("2012-01"),
Period("2012-02"),
Period(None),
],
},
columns=cols,
)
expected = DataFrame(
{
"int": [1.0, 2.0, 2.0],
"float": [1.0, 2.0, 2.0],
"string": [1.0, 2.0, 2.0],
"datetime": [1.0, 2.0, 2.0],
"timedelta": [1.0, 2.0, 2.0],
"periods": [1.0, 2.0, 2.0],
"fl_inf": [1.0, 2.0, 2.0],
"fl_nan": [1.0, 2.0, 1.0],
"str_nan": [1.0, 2.0, 1.0],
"dt_nat": [1.0, 2.0, 1.0],
"periods_nat": [1.0, 2.0, 1.0],
},
columns=cols,
)
result = df.rolling(window=2, min_periods=0).count()
tm.assert_frame_equal(result, expected)
result = df.rolling(1, min_periods=0).count()
expected = df.notna().astype(float)
tm.assert_frame_equal(result, expected)
def test_preserve_metadata():
# GH 10565
s = Series(np.arange(100), name="foo")
s2 = s.rolling(30).sum()
s3 = s.rolling(20).sum()
assert s2.name == "foo"
assert s3.name == "foo"
@pytest.mark.parametrize(
"func,window_size,expected_vals",
[
(
"rolling",
2,
[
[np.nan, np.nan, np.nan, np.nan],
[15.0, 20.0, 25.0, 20.0],
[25.0, 30.0, 35.0, 30.0],
[np.nan, np.nan, np.nan, np.nan],
[20.0, 30.0, 35.0, 30.0],
[35.0, 40.0, 60.0, 40.0],
[60.0, 80.0, 85.0, 80],
],
),
(
"expanding",
None,
[
[10.0, 10.0, 20.0, 20.0],
[15.0, 20.0, 25.0, 20.0],
[20.0, 30.0, 30.0, 20.0],
[10.0, 10.0, 30.0, 30.0],
[20.0, 30.0, 35.0, 30.0],
[26.666667, 40.0, 50.0, 30.0],
[40.0, 80.0, 60.0, 30.0],
],
),
],
)
def test_multiple_agg_funcs(func, window_size, expected_vals):
# GH 15072
df = DataFrame(
[
["A", 10, 20],
["A", 20, 30],
["A", 30, 40],
["B", 10, 30],
["B", 30, 40],
["B", 40, 80],
["B", 80, 90],
],
columns=["stock", "low", "high"],
)
f = getattr(df.groupby("stock"), func)
if window_size:
window = f(window_size)
else:
window = f()
index = MultiIndex.from_tuples(
[("A", 0), ("A", 1), ("A", 2), ("B", 3), ("B", 4), ("B", 5), ("B", 6)],
names=["stock", None],
)
columns = MultiIndex.from_tuples(
[("low", "mean"), ("low", "max"), ("high", "mean"), ("high", "min")]
)
expected = DataFrame(expected_vals, index=index, columns=columns)
result = window.agg({"low": ["mean", "max"], "high": ["mean", "min"]})
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| mit |
Herpinemmanuel/Oceanography | Cas_1/Vorticity/A_Vorticity.py | 1 | 1379 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import xgcm
import cartopy.crs as ccrs
from xmitgcm import open_mdsdataset
from matplotlib.mlab import bivariate_normal
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
dir0 = '/homedata/bderembl/runmit/test_southatlgyre'
ds0 = open_mdsdataset(dir0,iters='all',prefix=['U','V'])
grid = xgcm.Grid(ds0)
print(grid)
Vorticity = (-grid.diff(ds0.U.where(ds0.hFacW>0)*ds0.dxC, 'Y') + grid.diff(ds0.V.where(ds0.hFacS>0)*ds0.dyC, 'X'))/ds0.rAz
print('Vorticity')
i = 0
nz = 0
while (i < 50) :
i=i+1
print(i)
plt.figure(1)
ax = plt.subplot(projection=ccrs.PlateCarree());
Vorticity[i,nz,:,:].plot.pcolormesh('XG','YG', ax=ax,vmin=-0.00020,vmax=0.00020,cmap='ocean')
plt.title('Case 1 : Vorticity')
plt.text(5,5,i,ha='center',wrap=True)
ax.coastlines()
gl = ax.gridlines(draw_labels=True, alpha = 0.5, linestyle='--');
gl.xlabels_top = False
gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
if (i < 10):
plt.savefig('Vorticity_cas1-'+'00'+str(i)+'.png')
plt.clf()
elif (i > 9) and (i < 100):
plt.savefig('Vorticity_cas1-'+'0'+str(i)+'.png')
plt.clf()
else:
plt.savefig('Vorticity_cas1-'+str(i)+'.png')
plt.clf()
| mit |
cybernet14/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
belltailjp/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
mwv/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
gpiatkovska/Machine-Learning-in-Python | Ex7_Cluster_PCA/ex7_pca.py | 1 | 7487 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 30 16:44:28 2015
@author: Hanna
"""
import numpy as np
import scipy.io as io
import matplotlib.pyplot as plt
import scipy.misc as misc
from ex7 import kMeansInitCentroids, runkMeans
from mpl_toolkits.mplot3d import Axes3D
def featureNormalize(X):
mu = np.mean(X,axis=0)
X_norm = X-mu
sigma = np.std(X_norm,axis=0,ddof=1) # to get the same results as matlab
X_norm = X_norm/sigma
return X_norm, mu, sigma
def pca(X):
#covariance matrix
Sigma = np.dot(X.T,X)/np.shape(X)[0]
#singular value decomposition
U, s, V = np.linalg.svd(Sigma)
return U, s
def projectData(X, U, K):
return np.dot(X, U[:,:K])
def recoverData(Z, U, K):
return np.dot(Z, U[:,:K].T)
def displayData(X):
pixels = np.sqrt(np.shape(X)[1]).astype(np.int) # images are pixels by pixels size
#images are shown on a display_rows by display_cols square
display_rows = np.sqrt(np.shape(X)[0]).astype(np.int)
display_cols = display_rows
#print(pixels,display_rows,display_cols)
out = np.zeros((pixels*display_rows,pixels*display_cols))
indices = range(0,display_rows*display_cols)
for j in range(0, display_rows):
for i in range(0, display_cols):
start_i = i*pixels
start_j = j*pixels
out[start_i:start_i+pixels, start_j:start_j+pixels] = X[indices[display_rows*j+i]].reshape(pixels, pixels).T
return out
if __name__ == '__main__':
#load and plot the example dataset
mat = io.loadmat("ex7data1.mat")
X = mat['X']
plt.figure()
plt.scatter(X[:,0], X[:,1], facecolors='none', edgecolors='b')
plt.xlim([0.5, 6.5])
plt.ylim([2, 8])
plt.axes().set_aspect('equal')
plt.savefig('ExampledataSet1.pdf')
#PCA
#normalize features
X_norm, mu, sigma = featureNormalize(X)
#run pca
U, s = pca(X_norm)
#print(np.shape(s))
#print(np.shape(U)) # n by n, n is # of features
#draw left singular vectors with lengths proportional to corresponding singular values
#centered at mean of data to show the directions of maximum variations in the dataset
plt.figure()
plt.scatter(X[:,0], X[:,1], facecolors='none', edgecolors='b', s=20, linewidths=1)
plt.plot([mu[0], mu[0] + 1.5*s[0]*U[0,0]],[mu[1], mu[1] + 1.5*s[0]*U[1,0]], color='k', linestyle='-', linewidth=2)
plt.plot([mu[0], mu[0] + 1.5*s[1]*U[0,1]],[mu[1], mu[1] + 1.5*s[1]*U[1,1]], color='k', linestyle='-', linewidth=2)
plt.xlim([0.5, 6.5])
plt.ylim([2, 8])
plt.axes().set_aspect('equal')
plt.savefig('PCAdataSet1.pdf')
print("left singular vector corresponding to largest singular value: ")
print(U[:,0])
print("should be about [-0.707 -0.707]") # gives [-0.70710678 -0.70710678]
#dimensionality reduction
#project data into 1D
K = 1
Z = projectData(X_norm, U, K)
print(Z[0], " should be about 1.481") # gives [ 1.48127391]
#approximately recover original 2D data
X_norm_rec = recoverData(Z, U, K)
print("recovered X_norm ", X_norm_rec[0], "should be about [-1.047 -1.047]") # gives [-1.04741883 -1.04741883]
print("original X_norm ", X_norm[0])
print("original X ", X[0])
#plot original data and projection
plt.figure()
plt.scatter(X_norm[:,0], X_norm[:,1], facecolors='none', edgecolors='b', s=20, linewidths=1)
plt.scatter(X_norm_rec[:,0], X_norm_rec[:,1], facecolors='none', edgecolors='r', s=20, linewidths=1)
plt.plot([X_norm[:,0], X_norm_rec[:,0]],[X_norm[:,1], X_norm_rec[:,1]], color='k', linestyle='--', linewidth=1)
plt.xlim([-4, 3])
plt.ylim([-4, 3])
plt.axes().set_aspect('equal')
plt.savefig('ProjectiondataSet1.pdf')
plt.show()
#faces dataset
#load data
mat = io.loadmat("ex7faces.mat")
X = mat['X']
#display first 100 faces
out = displayData(X[:100])
fig = plt.figure()
ax = fig.gca()
ax.imshow(out,cmap="Greys_r")
ax.set_axis_off()
ax.set_title("Faces dataset")
plt.savefig("100Faces.pdf")
#normalize features
X_norm, mu, sigma = featureNormalize(X)
#display first 100 normalized faces
out = displayData(X_norm[:100])
fig = plt.figure()
ax = fig.gca()
ax.imshow(out,cmap="Greys_r")
ax.set_axis_off()
ax.set_title("Normalized faces dataset")
plt.savefig("100FacesNorm.pdf")
#look not as contrast as original
#run pca on faces
U, s = pca(X_norm)
#display 36 principal "eigenfaces"
out = displayData(U[:,:36].T)
fig = plt.figure()
ax = fig.gca()
ax.imshow(out,cmap="Greys_r")
ax.set_axis_off()
ax.set_title("Principal components of faces dataset")
plt.savefig("36Eigenfaces.pdf")
#reduce dimension to 100
K = 100
Z = projectData(X_norm, U, K)
#approximately recover data
X_norm_rec = recoverData(Z, U, K)
#display first 100 normalized original and recovered faces
fig = plt.figure(figsize=(10, 8), dpi=200) # make large figure so that individual faces and quality difference can be recognized
out = displayData(X_norm[:100])
out_rec = displayData(X_norm_rec[:100])
ax = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax.imshow(out,cmap="Greys_r")
ax.set_title("Original faces")
ax.set_axis_off()
ax2.imshow(out_rec,cmap="Greys_r")
ax2.set_title("Recovered faces")
ax2.set_axis_off()
plt.savefig("OriginalVsRecoveredFaces.pdf")
plt.show()
#PCA for visualizations
#load the image
A = misc.imread("bird_small.png")
#divide by 255 so that all values are in the range [0,1]
A = A/255.0
#reshape into 128*128 (# of pixels) by 3 (RGB intensities) matrix
X = A.reshape(np.shape(A)[0]*np.shape(A)[1], 3)
K_c = 16 # 16 clusters
max_iters = 10
#randomly initialize centroids
initial_centroids = kMeansInitCentroids(X, K_c)
#run K-means
centroids, idx = runkMeans(X, initial_centroids, max_iters)
#visualize centroid assignment (for the entire dataset)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(X[:,0], X[:,1], X[:,2], c = idx, marker='.')
plt.savefig("3DVisualization.pdf")
#normalize features
X_norm, mu, sigma = featureNormalize(X)
#to get the plot in the same bounds as in Fig 11 of assignment text
#need only subtract mean during normalization (no division by std)
#mu = np.mean(X,axis=0)
#X_norm = X-mu
#run pca
U, s = pca(X_norm)
#reduce dimension to 2D
K = 2
Z = projectData(X_norm, U, K)
#print(X[:10], np.max(X),np.min(X),np.mean(X),np.std(X))
#print(X_norm[:10], np.max(X_norm),np.min(X_norm),np.mean(X),np.std(X_norm))
#print(Z[:10], np.max(Z),np.min(Z),np.mean(X),np.std(Z))
#visualize centroid assignment for 2D data
fig = plt.figure()
ax = fig.gca()
ax.scatter(Z[:,0], Z[:,1], c = idx, marker='.')
#the plot is flipped wrt both axes compared to the one provided in the assignment text,
#evidentely we got negative singular vectors compared to assignment text
#(which is OK since U and -U are equally valid)
#so using -U, and hence -Z gives the same plot as in the text
#ax.scatter(-Z[:,0], -Z[:,1], c = idx, marker='.')
plt.savefig("2DVisualization.pdf")
plt.show()
| mit |
RomainBrault/scikit-learn | examples/datasets/plot_iris_dataset.py | 36 | 1929 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/mpl_examples/pylab_examples/multi_image.py | 12 | 2201 | #!/usr/bin/env python
'''
Make a set of images with a single colormap, norm, and colorbar.
It also illustrates colorbar tick labelling with a multiplier.
'''
from matplotlib.pyplot import figure, show, axes, sci
from matplotlib import cm, colors
from matplotlib.font_manager import FontProperties
from numpy import amin, amax, ravel
from numpy.random import rand
Nr = 3
Nc = 2
fig = figure()
cmap = cm.cool
figtitle = 'Multiple images'
t = fig.text(0.5, 0.95, figtitle,
horizontalalignment='center',
fontproperties=FontProperties(size=16))
cax = fig.add_axes([0.2, 0.08, 0.6, 0.04])
w = 0.4
h = 0.22
ax = []
images = []
vmin = 1e40
vmax = -1e40
for i in range(Nr):
for j in range(Nc):
pos = [0.075 + j*1.1*w, 0.18 + i*1.2*h, w, h]
a = fig.add_axes(pos)
if i > 0:
a.set_xticklabels([])
# Make some fake data with a range that varies
# somewhat from one plot to the next.
data =((1+i+j)/10.0)*rand(10,20)*1e-6
dd = ravel(data)
# Manually find the min and max of all colors for
# use in setting the color scale.
vmin = min(vmin, amin(dd))
vmax = max(vmax, amax(dd))
images.append(a.imshow(data, cmap=cmap))
ax.append(a)
# Set the first image as the master, with all the others
# observing it for changes in cmap or norm.
class ImageFollower:
'update image in response to changes in clim or cmap on another image'
def __init__(self, follower):
self.follower = follower
def __call__(self, leader):
self.follower.set_cmap(leader.get_cmap())
self.follower.set_clim(leader.get_clim())
norm = colors.Normalize(vmin=vmin, vmax=vmax)
for i, im in enumerate(images):
im.set_norm(norm)
if i > 0:
images[0].callbacksSM.connect('changed', ImageFollower(im))
# The colorbar is also based on this master image.
fig.colorbar(images[0], cax, orientation='horizontal')
# We need the following only if we want to run this interactively and
# modify the colormap:
axes(ax[0]) # Return the current axes to the first one,
sci(images[0]) # because the current image must be in current axes.
show()
| gpl-2.0 |
clemkoa/scikit-learn | sklearn/gaussian_process/tests/test_gpc.py | 31 | 5994 | """Testing for Gaussian process classification """
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils.testing import (assert_true, assert_greater,
assert_almost_equal, assert_array_equal)
def f(x):
return np.sin(x)
X = np.atleast_2d(np.linspace(0, 10, 30)).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = np.array(f(X).ravel() > 0, dtype=int)
fX = f(X).ravel()
y_mc = np.empty(y.shape, dtype=int) # multi-class
y_mc[fX < -0.35] = 0
y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
y_mc[fX > 0.35] = 2
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=0.1), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))]
def test_predict_consistent():
# Check binary predict decision has also predicted probability above 0.5.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X),
gpc.predict_proba(X)[:, 1] >= 0.5)
def test_lml_improving():
# Test that hyperparameter-tuning improves log-marginal likelihood.
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
# Test that lml of optimized kernel is stored correctly.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_almost_equal(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(), 7)
def test_converged_to_local_maximum():
# Test that we are in local maximum after hyperparameter-optimization.
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 0]) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 1])))
def test_lml_gradient():
# Compare analytic and numeric gradient of log marginal likelihood.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpc.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the log marginal likelihood of the chosen theta.
n_samples, n_features = 25, 2
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1e-3] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features)
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessClassifier(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_custom_optimizer():
# Test that GPC can use externally defined optimizers.
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
gpc.fit(X, y_mc)
# Checks that optimizer improved marginal likelihood
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_multi_class():
# Test GPC for multi-class classification problems.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
assert_almost_equal(y_prob.sum(1), 1)
y_pred = gpc.predict(X2)
assert_array_equal(np.argmax(y_prob, 1), y_pred)
def test_multi_class_n_jobs():
# Test that multi-class GPC produces identical results with n_jobs>1.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
gpc_2.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
y_prob_2 = gpc_2.predict_proba(X2)
assert_almost_equal(y_prob, y_prob_2)
| bsd-3-clause |
barajasr/Baseball-Reference-Plotting | Plot.py | 1 | 9927 | import os
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import Auxiliary as aux
import BrefScraper as brf
class Plot(object):
""" With data obtained from BrefScraper, Plot clean the raw
data and saves it to file.
"""
def __init__(self, scraper=brf.BrefScraper(), histogram=True):
""" Set defaults to be used later in cleaning and plotting
"""
self.scraper = scraper
self.histogram = histogram
# Axes limit hints to use
self.x_min = 1
self.x_max = 10
self.y_min = -24
self.y_max = 24
# Constants
self.major, self.minor = 6, 3
def _clean_data(self, data):
""" For given raw data, split it and fill in missing keys
with zeroes.
"""
x_max = self._max_key(data)
negative, positive = [], []
bi_keys = aux.LOSS, aux.WIN
for index in range(self.x_min, x_max+1):
negative.append(0)
positive.append(0)
if index in data[bi_keys[0]]:
negative[-1] = -data[bi_keys[0]][index]
if not self.histogram:
if index != 0 or self.x_min != 0:
negative[-1] *= index
if index in data[bi_keys[1]]:
positive[-1] = data[bi_keys[1]][index]
if not self.histogram:
if index != 0 or self.x_min != 0:
positive[-1] *= index
return aux.Data(negative, positive)
def _fit_y_axis(self, data):
""" Adjust Y-axis range to next minor tick if required.
"""
y_min, y_max = self.y_min, self.y_max
set_min = min(data.negative)
if set_min <= self.y_min:
y_min = set_min - (self.minor - set_min % self.minor)
set_max = max(data.positive)
if set_max >= self.y_max:
y_max = set_max + (self.minor - set_max % self.minor)
return aux.Axis(y_min, y_max)
def _max_key(self, data):
""" Return the max x-axis value found in keys.
"""
dict_max = max([key for sub_data in data.values()
for key in sub_data])
key_max = self.x_max
if dict_max > self.x_max:
key_max = dict_max
return key_max
def plot(self, plot_type, average):
""" Main point of entry. Set off scraper, process and plot data.
"""
# Dict with appropiate functions for data transforming defined
# at bottom of module.
(self.x_min, teams_raw, team_set, get_clean, to_plot) = OPTIONS[plot_type]
cumulative = aux.Data([], [])
for team, raw_data in teams_raw(self.scraper):
raw_set = team_set(raw_data)
data = get_clean(self, raw_set)
to_plot(self, team, data)
if average:
aux.aggragate_cumulative(cumulative, data)
if average:
aux.average_data(cumulative, len(self.scraper.teams))
to_plot(self, 'League Average', cumulative)
def _plot_outcome_conceding(self, team, data):
""" Sets the specific params for of win/loss outcome when team concedes
x runs.
"""
y_axis = self._fit_y_axis(data)
record = aux.outcome_record(data, self.histogram)
y_label = 'Wins/losses when conceding x runs' if self.histogram else\
'Total runs sorted by runs conceded per game'
tag_label = 'outcome_conceding_histogram' if self.histogram else\
'outcome_conceding_sorted'
self._plot_team(data,
record,
aux.Labels(team, 'Runs conceded', y_label, tag_label),
y_axis)
def _plot_outcome_scoring(self, team, data):
""" Sets the specific params for of win/loss outcome when team scores
x runs.
"""
y_axis = self._fit_y_axis(data)
record = aux.outcome_record(data, self.histogram)
y_label = 'Wins/losses when scoring x runs' if self.histogram else\
'Total runs sorted by runs scored per game'
tag_label = 'outcome_scoring_histogram' if self.histogram else\
'outcome_scoring_sorted'
self._plot_team(data,
record,
aux.Labels(team, 'Runs scored', y_label, tag_label),
y_axis)
def _plot_team(self, data, record, labels, y_axis):
""" Generic plotting for data found on the team's schedule and results
page.
"""
net = [n + m for m, n in zip(data.negative, data.positive)]
fig = plt.figure()
plt.xlabel(labels.x)
plt.ylabel(labels.y)
# record turned into string for int/float possibilty
if isinstance(record.wins, int):
plt.title('{} ({}-{}) - {}'\
.format(labels.team, record.wins, record.losses, self.scraper.year))
else:
plt.title('{} ({:.2f}-{:.2f}) - {}'\
.format(labels.team, record.wins, record.losses, self.scraper.year))
x_max = len(data.negative) + 1 if self.x_min == 1 else len(data.negative)
plt.axis([self.x_min, x_max, y_axis.min, y_axis.max])
ax = plt.subplot()
ax.set_xticks(np.arange(1, x_max, 1))
major_locator = ticker.MultipleLocator(self.major)
major_formatter = ticker.FormatStrFormatter('%d')
minor_locator = ticker.MultipleLocator(self.minor)
ax.yaxis.set_major_locator(major_locator)
ax.yaxis.set_major_formatter(major_formatter)
ax.yaxis.set_minor_locator(minor_locator)
x_axis = range(self.x_min, x_max)
ax.bar(x_axis, data.negative, width=0.96, color='r', edgecolor=None, linewidth=0)
ax.bar(x_axis, data.positive, width=0.96, color='b', edgecolor=None, linewidth=0)
ax.bar(x_axis, net, width=0.96, color='g', edgecolor=None, linewidth=0, label='Net')
plt.axhline(0, color='black')
plt.grid(which='both')
ax.grid(which='minor', alpha=0.5)
ax.grid(which='major', alpha=0.9)
legend = ax.legend(loc='best')
frame = legend.get_frame()
frame.set_facecolor('0.90')
self._save(labels.team, labels.tag)
def _plot_win_loss_margins(self, team, data):
""" Sets the specific params for margins of win/loss plot.
"""
y_axis = self._fit_y_axis(data)
wins = sum(data.positive) if self.histogram else\
sum([runs // (margin + 1) \
for margin, runs in enumerate(data.positive)])
losses = -sum(data.negative) if self.histogram else\
-sum([runs // (margin + 1) \
for margin, runs in enumerate(data.negative)])
y_label = '# of times won/loss by margin' if self.histogram else\
'Total Runs sorted by margin'
tag_label = 'margin_histogram' if self.histogram else 'margin_sorted'
self._plot_team(data,
aux.Record(wins, losses),
aux.Labels(team, 'Margin of win/loss', y_label, tag_label),
y_axis)
def _plot_win_loss_streaks(self, team, data):
""" Sets the specific params for win/loss streaks plot.
"""
y_axis = self._fit_y_axis(data)
wins = sum([(m + 1) * n for m, n in enumerate(data.positive)]) \
if self.histogram else sum(data.positive)
losses = -sum([(m + 1) * n for m, n in enumerate(data.negative)]) \
if self.histogram else -sum(data.negative)
y_label = '# of Streaks' if self.histogram else 'Win/Losses sorted by streak'
tag_label = 'streaks_histogram' if self.histogram else 'streaks_sorted'
self._plot_team(data,
aux.Record(wins, losses),
aux.Labels(team, 'Streak Length', y_label, tag_label),
y_axis)
def _save(self, filename, directory, ext='png', close=True, verbose=True):
""" Save the current plot to file.
"""
# Unpack list with splat
year = self.scraper.year
path = year if directory == [] else os.path.join(year, directory)
if not os.path.exists(path):
os.makedirs(path)
savepath = os.path.join(path, filename + '.' + ext)
if verbose:
print("Saving figure to '{}'...".format(savepath))
plt.savefig(savepath)
if close:
plt.close()
def set_default_axes(self, x_min=1, x_max=10, y_min=-24, y_max=24):
""" Adjust default axes range.
"""
self.x_min, self.x_max = x_min, x_max
self.y_min, self.y_max = y_min, y_max
# Data transformation and plotting chain
OPTIONS = {'outcome_conceding': [0,
brf.BrefScraper.game_scores,
aux.outcome_when_conceding,
Plot._clean_data,
Plot._plot_outcome_conceding],
'outcome_scoring': [0,
brf.BrefScraper.game_scores,
aux.outcome_when_scoring,
Plot._clean_data,
Plot._plot_outcome_scoring],
'win_loss_streaks' : [1,
brf.BrefScraper.wins_losses,
aux.count_wins_losses,
Plot._clean_data,
Plot._plot_win_loss_streaks],
'win_loss_margins' : [1,
brf.BrefScraper.game_scores,
aux.win_loss_margins,
Plot._clean_data,
Plot._plot_win_loss_margins]}
| bsd-2-clause |
Charley-fan/metaArray | example.py | 1 | 30767 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from os.path import join
from os import linesep
from textwrap import TextWrapper
import cStringIO
from misc import filePath, dirPath
# Enviromental variables
demo_dir = join(filePath(__file__).baseDir, 'example')
tty_width = 72
partition = '-' * tty_width
prompt = '>>> '
# Current dir
# current_dir = dirPath('./')
wrapper = TextWrapper()
wrapper.width = tty_width
wrapper.replace_whitespace = False
# wrapper.drop_whitespace = False
wrapper.initial_indent = "- "
wrapper.subsequent_indent = '- '
comment_wrapper = TextWrapper()
comment_wrapper.replace_whitespace = False
# comment_wrapper.drop_whitespace = False
comment_wrapper.width = tty_width
comment_wrapper.subsequent_indent = '# '
class demo_menu:
"""
Menu object
Contains a list of menu options, as well as the parent menu if exist.
"""
def __init__(self, title = '', parent = None):
self.title = title
self.parent = parent
self.info = ''
self.items = {}
return
def add_item(self, obj):
obj.parent = self
self.items[obj.title] = obj
return
def __call__(self):
print(partition)
if self.parent is not None:
title = self.parent.title + ' - ' + self.title
else:
title = self.title
print(title.center(tty_width))
print(partition)
print(wrapper.fill(self.info))
print(partition)
lst = self.items.keys()
lst.sort()
print('\tOption\tDescription')
for i in range(len(lst)):
print('\t' + str(i).rjust(6) + '\t' + self.items[lst[i]].title)
# Present the return option if parent menu exists
i += 1
return_option = i
if self.parent is not None:
print('\t' + str(i).rjust(6) + '\tReturn to: ' + self.parent.title)
i += 1
print('\t' + str(i).rjust(6) + '\tQuit')
quit_option = i
print(partition)
while True:
option = raw_input("Which option would you like to select? ")
try:
option = int(option)
except:
continue
if option == quit_option:
return
if option == return_option:
print(partition + linesep + linesep)
return self.parent()
if option >= 0 and option < len(lst):
item = self.items[lst[option]]
if isinstance(item, demo_item):
print(linesep + linesep + partition)
print(wrapper.fill(item.info))
print(partition + linesep + linesep)
return item()
continue
return
class demo_item:
"""
Menu item
Contains pointer to menu item.
"""
def __init__(self, title = '', parent = None, exe = None):
self.title = title
self.parent = parent
self.info = ''
self.exe = exe
return
def __call__(self):
summary = self.exe()
if summary is not None:
print(linesep + ' demo summary '.center(tty_width, '-') + linesep)
print(summary)
else:
print(linesep + linesep)
print(' END of demo '.center(tty_width, '-') + linesep)
if self.parent is not None:
print(linesep + linesep)
return self.parent()
return
#def prcs_demo_lst(lst, summary = ''):
#"""
#lst = []
#lst.append("fig, ax = plot1d(ary, legend=-1)")
#lst.append("# fig.savefig('plot1d.png', dpi=200, format='png')")
#lst.append("show()")
#lst.append("close(fig)")
#"""
#for st in lst:
#summary += st + linesep
#if st == '':
#print(linesep)
#elif st[0] == '#':
#print(comment_wrapper.fill(st))
#else:
#print(prompt + st)
#exec(st)
#return summary
def prcs_demo(code, summary = ''):
# Fill the code block into IO buffer
str_buff = cStringIO.StringIO()
str_buff.write(code.strip())
str_buff.seek(0)
for st in str_buff:
st = st.strip()
summary += st + linesep
if st == '':
print(linesep)
elif st[0] == '#':
print(comment_wrapper.fill(st))
else:
print(prompt + st)
exec(st)
#try: exec(st)
#except: return
str_buff.close()
return summary
#
# Begin example codes
########################
main_menu = demo_menu(title = 'metaArray demos')
main_menu.info = 'This is a list of demos to illustrate the usage of metaArray.'
###############################################################################
# I/O demos
########################
drv_menu = demo_menu(title = 'File I/O demos')
drv_menu.info = 'This is a list of demos to illustrate the usage file I/O with metaArray.'
####################################
## Example on Tek isf file reader
####################################
def isf_demo():
code = """
from metaArray.drv_Tek import isf
f = isf('""" + join(demo_dir,'DPO4000B.isf') + """', debug = True)
# Have a look at the file content
#*********************************
print(f)
# Load it into metaArray
#************************
ary = f[0]
# See what the metaArray looks like
print(ary)
# Here is a plot of the contents
#********************************
from metaArray.drv_pylab import plot1d
from matplotlib.pyplot import show, close
fig, ax = plot1d(ary, legend=-1)
fig.savefig('demo_isf.png', format='png')
show()
close(fig)
"""
return prcs_demo(code)
demo_isf = demo_item(title = 'Read Tektronix isf file.', exe = isf_demo)
demo_isf.info = 'This demo will illustrate the usage of Tek isf file interpreter'
drv_menu.add_item(demo_isf)
####################################
## Example on DPO2000 csv file reader
####################################
def DPO2000_csv_demo():
code = """
from metaArray.drv_Tek import DPO2000_csv
f = DPO2000_csv('""" + join(demo_dir,'DPO2000.csv') + """', debug = True)
# Have a look at the file content
#*********************************
print(f)
# Load it into metaArray
#************************
ary = f()
# See what the metaArray looks like
print(ary)
# Here is a plot of the contents
#********************************
from metaArray.drv_pylab import plot1d
from matplotlib.pyplot import show, close
fig, ax = plot1d(ary, legend=-1)
fig.savefig('demo_DPO2000_csv.png', format='png')
show()
close(fig)
"""
return prcs_demo(code)
demo_DPO2000_csv = demo_item(title = 'Read Tektronix DPO2000 csv file.', exe = DPO2000_csv_demo)
demo_DPO2000_csv.info = 'This demo will illustrate the usage of Tek DPO2000 series csv file interpreter'
drv_menu.add_item(demo_DPO2000_csv)
####################################
## Example on TDS2000 csv file reader
####################################
def TDS2000_csv_demo():
code = """
from metaArray.drv_Tek import TDS2000_csv
f = TDS2000_csv('""" + join(demo_dir,'TDS2000.csv') + """', debug = True)
# Have a look at the file content
#*********************************
print(f)
# Load it into metaArray
#************************
ary = f()
# See what the metaArray looks like
#***********************************
print(ary)
# Here is a plot of the contents
#********************************
from metaArray.drv_pylab import plot1d
from matplotlib.pyplot import show, close
fig, ax = plot1d(ary, legend=-1)
fig.savefig('demo_TDS2000_csv.png', format='png')
show()
close(fig)
"""
return prcs_demo(code)
demo_TDS2000_csv = demo_item(title = 'Read Tektronix TDS2000 csv file.', exe = TDS2000_csv_demo)
demo_TDS2000_csv.info = 'This demo will illustrate the usage of Tek TDS2000 series csv file interpreter'
drv_menu.add_item(demo_TDS2000_csv)
####################################
## Example on PZFlex POUT file reader
####################################
#python26
#from os import chdir
#chdir("/usr/lib64/python2.6/site-packages/utlib/sample/")
#from utlib import pout_hist
#a = pout_hist("2D_Al_10_Avg.flxhst", debug = True)
#a
#ary = a[3]
#ary
#from matplotlib.pyplot import show
#from matplotlib.pyplot import close
#from utlib.drv_pylab import plot1d
#fig, ax = plot1d(ary, legend=-1)
#show()
#close(fig)
def pout_hist_demo():
code = """
from metaArray.drv_flex import pout_hist
f = pout_hist('""" + join(demo_dir,'june10j.flxhst') + """')
# Have a look at the file content
#*********************************
print(f)
# Load it into metaArray
#************************
ary = f[2]
ary['name'] = ary['june10j.desc']
ary.set_range(0, 'unit', 's')
# See what the metaArray looks like
#***********************************
print(ary)
# Here is a plot of the contents
#********************************
from metaArray.drv_pylab import plot1d
from matplotlib.pyplot import show, close
fig, ax = plot1d(ary, legend=-1)
fig.savefig('demo_pout_hist.png', format='png')
show()
close(fig)
"""
return prcs_demo(code)
demo_pout_hist = demo_item(title = 'Read PZFlex flxhst file.', exe = pout_hist_demo)
demo_pout_hist.info = 'This demo will illustrate the usage of PZFlex flxhst \
file interpreter. flxhst files are generated by invoking the POUT HIST command \
in the PZFlex input file.'
drv_menu.add_item(demo_pout_hist)
####################################
## Example on PZFlex data out1 file reader
####################################
def data_out1_demo():
code = """
from metaArray.drv_flex import data_out1
f = data_out1('""" + join(demo_dir,'3D_billet_10mm_40mm.flxdato') + """')
# Have a look at the file content
#*********************************
print(f)
# Load it into metaArray
#************************
from metaArray import metaArray
ary = f[27][:,:,0]
ary = metaArray(ary)
ary['name'] = '3D_billet_10mm_40mm'
ary['unit'] = ''
ary['label'] = 'x-velocity'
ary.set_range(0, 'begin', 0)
ary.set_range(0, 'end', 1651.0)
ary.set_range(0, 'unit', 'mm')
ary.set_range(0, 'label', 'x - Horizontal')
ary.set_range(1, 'begin', 0)
ary.set_range(1, 'end', 226.0)
ary.set_range(1, 'unit', 'mm')
ary.set_range(1, 'label', 'y - Vertical')
# See what the metaArray looks like
#***********************************
print(ary)
# Here is a plot of the contents
#********************************
from metaArray.drv_pylab import plot2d
from matplotlib.pyplot import show, close
fig, ax = plot2d(ary)
# fig.savefig('plot2d.png', format='png')
show()
close(fig)
"""
return prcs_demo(code)
demo_data_out1 = demo_item(title = 'Read PZFlex flxdato file.', exe = data_out1_demo)
demo_data_out1.info = 'This demo will illustrate the usage of PZFlex flxdato \
file interpreter. flxdato files are generated by invoking the DATA OUT1 command \
in the PZFlex input file.'
drv_menu.add_item(demo_data_out1)
main_menu.add_item(drv_menu)
################### END I/O demo ###################################
###############################################################################
###############################################################################
###############################################################################
# Plotting demos
########################
plot_menu = demo_menu(title = 'Plotting and visualisation demos')
plot_menu.info = 'This is a list of demos to illustrate the usage metaArray \
aware plotting and visualisation funtions. The plotting routines are based on \
matplotlib.'
####################################
## Example on multiple 1D plot usage
####################################
def multi_1d_demo():
code = """
# Load a selection of data files as example
#*******************************************
from metaArray.drv_Tek import isf
ary1 = isf('""" + join(demo_dir, 'multi_1.isf') + """')()
ary2 = isf('""" + join(demo_dir, 'multi_2.isf') + """')()
ary3 = isf('""" + join(demo_dir, 'multi_3.isf') + """')()
# Correct for the DC offsets
#****************************
ary1.data -= ary1[72e-6:100e-6].data.mean()
ary2.data -= ary2[72e-6:100e-6].data.mean()
ary3.data -= ary3[72e-6:100e-6].data.mean()
# Only need to set the last label and range
#*******************************************
ary3['label'] = 'Voltage'
ary3.set_range(0, 'label', 'Time')
# Here is how to plot
#*********************
from metaArray.drv_pylab import plot1d
from matplotlib.pyplot import show, close
fig, ax = plot1d(ary1[72e-6:100e-6], size = (20, 15), label = 'First signal')
fig, ax = plot1d(ary2[72e-6:100e-6], size = (20, 15), label = 'Second signal', fig=fig, ax=ax)
fig, ax = plot1d(ary3[72e-6:100e-6], size = (20, 15), label = 'Third signal', fig=fig, ax=ax)
ax.legend(loc=0)
ax.set_title('Comparison of Generated signal on bent coil', fontsize=20)
fig.savefig('demo_multi_1d.png', format='png')
show()
close(fig)
"""
return prcs_demo(code)
demo_multi_1d = demo_item(title = 'Plotting multiple 1D (A-scan) data.', exe = multi_1d_demo)
demo_multi_1d.info = 'This demo will illustrate the usage of the plot1d \
interface to put multiple 1D (A-scan) metaArray on the same plot.'
plot_menu.add_item(demo_multi_1d)
####################################
## Example on matplotlib 1D plot interface
####################################
def plot1d_demo():
code = """
# Load some data as example
#***************************
from metaArray.drv_Tek import isf
ary = isf('""" + join(demo_dir, 'DPO2000.isf') + """')()
# Here is how to plot
#*********************
from metaArray.drv_pylab import plot1d
from matplotlib.pyplot import show, close
fig, ax = plot1d(ary, legend=-1)
fig.savefig('demo_plot1d.png', format='png')
show()
close(fig)
"""
return prcs_demo(code)
demo_plot1d = demo_item(title = 'Plotting of 1D (A-scan) data.', exe = plot1d_demo)
demo_plot1d.info = 'This demo will illustrate the usage of the plot1d function \
for metaArray.'
plot_menu.add_item(demo_plot1d)
####################################
## Example on matplotlib 2D plot interface
####################################
def plot2d_demo():
code = """
# Load some data as example
#***************************
from cPickle import load
f = open('""" + join(demo_dir, 'rel_amplitude.pickle') + """', 'rb')
a = load(f).transpose()
f.close()
# Construct metaArray from numpy ndarray
#****************************************
from metaArray import metaArray
ary = metaArray(a)
ary['name'] = 'Relative amplitude'
ary['unit'] = '' # '' -> unitless; None -> undefined
ary['label'] = 'Amplitude ratio'
# Per axis definitions
ary.set_range(0, 'begin', -5)
ary.set_range(0, 'end', 5)
ary.set_range(0, 'unit', 'mm')
ary.set_range(0, 'label', 'x - Horizontal')
ary.set_range(1, 'begin', 5)
ary.set_range(1, 'end', -5)
ary.set_range(1, 'unit', 'mm')
ary.set_range(1, 'label', 'y - Vertical')
# Now its ready for plotting
#****************************
from metaArray.drv_pylab import plot2d
from matplotlib.pyplot import show, close
fig, ax = plot2d(ary)
fig.savefig('demo_plot2d', dpi=400, format='png')
show()
close(fig)
"""
return prcs_demo(code)
demo_plot2d = demo_item(title = 'Plotting of 2D (B-scan) data.', exe = plot2d_demo)
demo_plot2d.info = 'This demo will illustrate the usage of the plot2d function \
for metaArray.'
plot_menu.add_item(demo_plot2d)
####################################
## Example on matplotlib complex array plotting interface
####################################
def plot_complex_demo():
code = """
from scipy.signal.wavelets import morlet
from metaArray import metaArray
from metaArray.drv_pylab import plotcomplex, plotcomplexpolar
from matplotlib.pyplot import show, close
ary = morlet(1000, w=5.0, s=0.5, complete=True)
metAry = metaArray(ary)
metAry['name'] = '5 cycle complex morlet'
metAry['unit'] = ''
metAry.set_range(0, 'begin', -0.5)
metAry.set_range(0, 'end', 0.5)
metAry.set_range(0, 'unit', '')
metAry.set_range(0, 'label', 'Scale')
fig, host, par = plotcomplex(metAry)
show()
close(fig)
fig, host, par = plotcomplexpolar(metAry)
show()
close(fig)
"""
return prcs_demo(code)
demo_plot_complex = demo_item(title = 'Plotting of complex number metaArray.', exe = plot_complex_demo)
demo_plot_complex.info = 'This demo will illustrate the usage of the \
plotcomplex, and plotcomplexpolar function for metaArray.'
plot_menu.add_item(demo_plot_complex)
main_menu.add_item(plot_menu)
################# END visualisation demo ###########################
###############################################################################
###############################################################################
###############################################################################
# Miscellaneous demos
########################
misc_menu = demo_menu(title = 'Misc. (non-metaArray) demos')
misc_menu.info = 'This is a list of demos for miscellaneous non-metaArray aware \
classes and functions. These are useful helper classes and function used by \
various metaArray components.'
# dirPath, filePath, file_list, buffered_search
# gettypecode, prettyunit, unitPrefix
# cplx_trig_func, mother_morlet, resample, resize
####################################
## cplx_trig_func
####################################
def cplx_trig_func_demo():
code = """
# Three cycles, and 20 points
#*****************************
from metaArray.misc import cplx_trig_func
func = cplx_trig_func(nLambda = 3, pts = 20)
print(func())
# Or, specify 1kHz signal, for 2ms, at 5kHz sampling rate
#*********************************************************
func = cplx_trig_func(freq = 1e3, length = 2e-3, samp_rate = 5e3)
print(func())
"""
return prcs_demo(code)
demo_cplx_trig_func = demo_item(title = 'Complex trigonometry function generator.', exe = cplx_trig_func_demo)
demo_cplx_trig_func.info = 'This demo will illustrate the usage of the \
cplx_trig_func object, it will return a complex numpy ndarray based on the \
given combinations of the following parameters (they are also attributes of \
class instance): nLambda => Number of wavelengths. pts => Number of samples \
(length of the array). freq => Frequency of the desire trigonometry function. \
length => Duration of the function in time. samp_rate => Sampling rate. dt => \
Sampling interval. Only the minimum combination set of these parameters needs \
to be specify. InsufficientInput error will be raised if the instanc is called\
before the parameters are sufficiently defined.'
misc_menu.add_item(demo_cplx_trig_func)
####################################
## Misc. unit formatting
####################################
def misc_units_demo():
code = """
# Format number in engineering units
#************************************
from metaArray.misc import engUnit
print(engUnit(1.23456e7, unit = 'eV', sigfig=3))
print(engUnit(1.23456e8, unit = 'eV', sigfig=3))
print(engUnit(1.23456e9, unit = 'eV', sigfig=4))
# Find out a suitable SI unit prefix for a given number
#*******************************************************
from metaArray.misc import unitPrefix
num, name, prefix, exponent = unitPrefix(1.23456e7)
print('The scaled number is: ' + str(num)); print('SI unit \
prefix name is: ' + name); print('SI unit prefix is: ' + prefix); print('The \
exponent to scale number with is: ' + str(exponent))
"""
return prcs_demo(code)
demo_misc_units = demo_item(title = 'Work out suitable SI unit prefixes.', exe = misc_units_demo)
demo_misc_units.info = ''
misc_menu.add_item(demo_misc_units)
####################################
## Misc obtaining a list of files
####################################
def flist_demo():
code = """
# Here is how to obtain a list of files under the given directory
#*****************************************************************
from metaArray.misc import file_list
flist = file_list('""" + demo_dir + """')
for fpath in flist: print('* ' + fpath)
# You can also specify a particular file name extension, and \
whether or not to search the subdirectories.
#**************************************************************************
flist = file_list('""" + demo_dir + """', ext = 'flxhst', SubDir = False)
for fpath in flist: print('* ' + fpath)
"""
return prcs_demo(code)
demo_flist = demo_item(title = 'Get a list of files under a given directory.', exe = flist_demo)
demo_flist.info = 'This demo illustrate the usage of the file_list function, \
it will obtain a list of files under a given directory. It has the option to \
search for subdirectories (disabled by default), and return only those with \
matching file name extension.'
misc_menu.add_item(demo_flist)
main_menu.add_item(misc_menu)
################### END I/O demo ###################################
###############################################################################
###############################################################################
# General demos
#########################
####################################
## Basic metaArray usage
####################################
def basic_demo():
code = """
from numpy import linspace, cos, pi
from metaArray import metaArray
from metaArray.drv_pylab import plot1d
from matplotlib.pyplot import show, close
# Construct some data
#*********************
ary = cos(linspace(-2*pi, 2*pi, 28))
# Here is how the numpy ndarray look like:
print(ary)
# Construct a metaArray from the given numpy ndarray
#****************************************************
metaAry = metaArray(ary)
# Here is how the metaArray look like:
print(metaAry)
# metaArray is just a wrapper on top of the ndarray, can directly access
# the underlying ndarray like this:
print(metaAry.data)
# Setting the meta information
#******************************
metaAry['name'] = 'cosine function'
metaAry['unit'] = None # None => arbitrary unit, '' => unitless
metaAry['label'] = 'Amplitude'
# Set the axis attribute
metaAry.set_range(0, 'begin', -2) # 1st axis, the first value (x0)
metaAry.set_range(0, 'end', 2) # 1st axis, the value (x1)
metaAry.set_range(0, 'label', 'lambda') # label for the 1st axis
metaAry.set_range(0, 'unit', '') # 1st axis, unit
# This is how the metaArray look like now:
print(metaAry)
# This is how to retrive the meta data
#**************************************
print(metaAry['name'])
print(metaAry.get_range(0, 'begin'))
# You can plot the metaArray like this:
#***************************************
fig, ax = plot1d(metaAry, legend=-1)
fig.savefig('demo_basic.png', format='png')
show()
close(fig)
# You can slice the metaArray using array indexies like a ndarry:
#*****************************************************************
fig, ax = plot1d(metaAry[5:20], legend=-1)
fig.savefig('demo_basic_index_slicing.png', format='png')
show()
close(fig)
# Or you can slice the metaArray in real space like this:
#*********************************************************
fig, ax = plot1d(metaAry[-0.25:0.75], legend=-1)
fig.savefig('demo_basic_meta_slicing.png', format='png')
show()
close(fig)
"""
return prcs_demo(code)
demo_basic = demo_item(title = 'metaArray basic usage.', exe = basic_demo)
demo_basic.info = 'This demo will illustrate the basic usage of metaArray.'
main_menu.add_item(demo_basic)
####################################
## Example on meta functions
####################################
def general_demo():
code = """
# Load some data as example
#***************************
from metaArray.drv_pylab import plot1d
from matplotlib.pyplot import show, close
from metaArray.drv_Tek import isf
ary = isf('""" + join(demo_dir, 'DPO2000.isf') + """')[1]
fig, ax = plot1d(ary, legend=-1)
fig.savefig('demo_isf.png', format='png')
show()
close(fig)
# Do a magnitude FFT, and save only the first 3kHz:
#***************************************************
from numpy import log10
from metaArray.metaTrans import rfft
fary = abs(rfft(ary))[:1e3]
fary.log10() # Put the values on log scale
fig, ax = plot1d(fary, legend=-1)
fig.savefig('demo_general_1.png', format='png')
show()
close(fig)
# Try again with good amount of padding for the magnitude FFT:
#**************************************************************
from metaArray.metaFunc import padding_calc
# Say 1024 points between 0 - 3kHz
fary = rfft(ary, n = padding_calc(ary, min_freq = 0, max_freq = 1e3, resolution = 1024))
fary = abs(fary)[:1e3].log10()
fig, ax = plot1d(fary, legend=-1)
fig.savefig('demo_general_2.png', format='png')
show()
close(fig)
# Short-time FFT (STFFT):
#*************************
from metaArray.metaTrans import stfft
from metaArray.drv_pylab import plot2d
# 0-30kHz, temporal resolution 100, frequency resolution 256:
tfary = stfft(ary, tres=100, fres=256, fmax=30e3)
fig, ax = plot2d(tfary.log10())
fig.savefig('demo_general_3.png', format='png')
show()
close(fig)
# Down sample to 10kHz:
# Zero group delay low-pass FIR filter will apply when down sampling
#********************************************************************
from metaArray.metaFunc import meta_resample
bry = meta_resample(ary, rate=10e3)
bry['name'] = 'Down sample to 10kHz'
fig, ax = plot1d(bry, legend=-1)
fig.savefig('demo_general_4.png', format='png')
show()
close(fig)
# Up sample again, and look at the difference:
#**********************************************
bry = meta_resample(bry, rate=3.125e6)
bry = ary - bry
bry['name'] = 'Noise filtered out by down sampling'
fig, ax = plot1d(bry, legend=-1)
fig.savefig('demo_general_5.png', format='png')
show()
close(fig)
# A straight forward zero group delay low pass filter at 5kHz:
#**************************************************************
from metaArray.metaFunc import meta_lowpass
bry = meta_lowpass(ary, 5e3)
fig, ax = plot1d(bry, legend=-1)
fig.savefig('demo_general_6.png', format='png')
show()
close(fig)
# A similar zero group delay high pass filter at 5kHz:
#******************************************************
from metaArray.metaFunc import meta_highpass
bry = meta_highpass(ary, 5e3)
fig, ax = plot1d(bry, legend=-1)
fig.savefig('demo_general_7.png', format='png')
show()
close(fig)
"""
return prcs_demo(code)
demo_general = demo_item(title = 'metaArray general usage.', exe = general_demo)
demo_general.info = 'This demo will illustrate the usage of some of the more advanced \
usages of metaArray functions.'
main_menu.add_item(demo_general)
####################################
## Example on meta functions
####################################
def hist_demo():
code = """
from numpy import round
from numpy.random import rand
from metaArray import metaArray
from metaArray.metaFunc import histogram
from metaArray.drv_pylab import plot1d
from matplotlib.pyplot import show, close
metAry = metaArray(rand(100000)-0.5)
metAry['name'] = 'a random distribution'
hist = histogram(metAry, bins = 20)
fig, ax = plot1d(hist)
show()
close(fig)
for i in range(99): metAry.data += rand(100000)-0.5
# If the data itself is already quantised into regular steps, \
such as by rounding it to the nearest integer value, but the steps need not be\
integer values.
#**************************************************************************
metAry.data = round(metAry.data)
metAry['name'] = 'the sum of 100 random distributions'
# Then there is no need to specify number of bins
hist = histogram(metAry)
fig, ax = plot1d(hist)
show()
close(fig)
"""
return prcs_demo(code)
demo_hist = demo_item(title = 'metaArray histogram usage.', exe = hist_demo)
demo_hist.info = 'This demo will illustrate how to bin a metaArray into a \
histogram.'
main_menu.add_item(demo_hist)
################### END General demo ###############################
###############################################################################
###############################################################################
def demo():
return main_menu()
if __name__ == "__main__":
main_menu()
| gpl-3.0 |
hejunbok/apm_planner | libs/mavlink/share/pyshared/pymavlink/examples/mavgraph.py | 29 | 5951 | #!/usr/bin/env python
'''
graph a MAVLink log file
Andrew Tridgell August 2011
'''
import sys, struct, time, os, datetime
import math, re
import pylab, pytz, matplotlib
from math import *
# allow import from the parent directory, where mavlink.py is
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
from mavextra import *
locator = None
formatter = None
def plotit(x, y, fields, colors=[]):
'''plot a set of graphs using date for x axis'''
global locator, formatter
pylab.ion()
fig = pylab.figure(num=1, figsize=(12,6))
ax1 = fig.gca()
ax2 = None
xrange = 0.0
for i in range(0, len(fields)):
if len(x[i]) == 0: continue
if x[i][-1] - x[i][0] > xrange:
xrange = x[i][-1] - x[i][0]
xrange *= 24 * 60 * 60
if formatter is None:
if xrange < 1000:
formatter = matplotlib.dates.DateFormatter('%H:%M:%S')
else:
formatter = matplotlib.dates.DateFormatter('%H:%M')
interval = 1
intervals = [ 1, 2, 5, 10, 15, 30, 60, 120, 240, 300, 600,
900, 1800, 3600, 7200, 5*3600, 10*3600, 24*3600 ]
for interval in intervals:
if xrange / interval < 15:
break
locator = matplotlib.dates.SecondLocator(interval=interval)
ax1.xaxis.set_major_locator(locator)
ax1.xaxis.set_major_formatter(formatter)
empty = True
ax1_labels = []
ax2_labels = []
for i in range(0, len(fields)):
if len(x[i]) == 0:
print("Failed to find any values for field %s" % fields[i])
continue
if i < len(colors):
color = colors[i]
else:
color = 'red'
(tz, tzdst) = time.tzname
if axes[i] == 2:
if ax2 == None:
ax2 = ax1.twinx()
ax = ax2
ax2.xaxis.set_major_locator(locator)
ax2.xaxis.set_major_formatter(formatter)
label = fields[i]
if label.endswith(":2"):
label = label[:-2]
ax2_labels.append(label)
else:
ax1_labels.append(fields[i])
ax = ax1
ax.plot_date(x[i], y[i], color=color, label=fields[i],
linestyle='-', marker='None', tz=None)
pylab.draw()
empty = False
if ax1_labels != []:
ax1.legend(ax1_labels,loc=opts.legend)
if ax2_labels != []:
ax2.legend(ax2_labels,loc=opts.legend2)
if empty:
print("No data to graph")
return
from optparse import OptionParser
parser = OptionParser("mavgraph.py [options] <filename> <fields>")
parser.add_option("--no-timestamps",dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_option("--planner",dest="planner", action='store_true', help="use planner file format")
parser.add_option("--condition",dest="condition", default=None, help="select packets by a condition")
parser.add_option("--labels",dest="labels", default=None, help="comma separated field labels")
parser.add_option("--mav10", action='store_true', default=False, help="Use MAVLink protocol 1.0")
parser.add_option("--legend", default='upper left', help="default legend position")
parser.add_option("--legend2", default='upper right', help="default legend2 position")
(opts, args) = parser.parse_args()
if opts.mav10:
os.environ['MAVLINK10'] = '1'
import mavutil
if len(args) < 2:
print("Usage: mavlogdump.py [options] <LOGFILES...> <fields...>")
sys.exit(1)
filenames = []
fields = []
for f in args:
if os.path.exists(f):
filenames.append(f)
else:
fields.append(f)
msg_types = set()
multiplier = []
field_types = []
colors = [ 'red', 'green', 'blue', 'orange', 'olive', 'black', 'grey' ]
# work out msg types we are interested in
x = []
y = []
axes = []
first_only = []
re_caps = re.compile('[A-Z_]+')
for f in fields:
caps = set(re.findall(re_caps, f))
msg_types = msg_types.union(caps)
field_types.append(caps)
y.append([])
x.append([])
axes.append(1)
first_only.append(False)
def add_data(t, msg, vars):
'''add some data'''
mtype = msg.get_type()
if mtype not in msg_types:
return
for i in range(0, len(fields)):
if mtype not in field_types[i]:
continue
f = fields[i]
if f.endswith(":2"):
axes[i] = 2
f = f[:-2]
if f.endswith(":1"):
first_only[i] = True
f = f[:-2]
v = mavutil.evaluate_expression(f, vars)
if v is None:
continue
y[i].append(v)
x[i].append(t)
def process_file(filename):
'''process one file'''
print("Processing %s" % filename)
mlog = mavutil.mavlink_connection(filename, notimestamps=opts.notimestamps)
vars = {}
while True:
msg = mlog.recv_match(opts.condition)
if msg is None: break
tdays = (msg._timestamp - time.timezone) / (24 * 60 * 60)
tdays += 719163 # pylab wants it since 0001-01-01
add_data(tdays, msg, mlog.messages)
if len(filenames) == 0:
print("No files to process")
sys.exit(1)
if opts.labels is not None:
labels = opts.labels.split(',')
if len(labels) != len(fields)*len(filenames):
print("Number of labels (%u) must match number of fields (%u)" % (
len(labels), len(fields)*len(filenames)))
sys.exit(1)
else:
labels = None
for fi in range(0, len(filenames)):
f = filenames[fi]
process_file(f)
for i in range(0, len(x)):
if first_only[i] and fi != 0:
x[i] = []
y[i] = []
if labels:
lab = labels[fi*len(fields):(fi+1)*len(fields)]
else:
lab = fields[:]
plotit(x, y, lab, colors=colors[fi*len(fields):])
for i in range(0, len(x)):
x[i] = []
y[i] = []
pylab.show()
raw_input('press enter to exit....')
| agpl-3.0 |
mac389/at-risk-agents | analyze-intervention.py | 1 | 9847 | import os, json,re, datetime, itertools
import numpy as np
import Graphics as artist
import matplotlib.pyplot as plt
import visualization as visualization
from awesome_print import ap
from matplotlib import rcParams
from optparse import OptionParser
from scipy.stats import percentileofscore,scoreatpercentile
from texttable import Texttable
from scipy.stats import ks_2samp
plt.switch_backend('Agg')
params = {
'axes.labelsize': 8,
'text.fontsize': 8,
'legend.fontsize': 10,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'text.usetex': True
}
rcParams.update(params)
parser = OptionParser(usage="usage: %prog [options] filename",
version="%prog 1.0")
parser.add_option("-s", "--source",
action="store",
dest="source",
default=False,
help="Folder with data to analyze")
(options, args) = parser.parse_args()
READ = 'rb'
TAB = '\t'
INTEGER = '%d'
FLOAT = '%.04f'
hist_props={"range":[-1,1],"histtype":"stepfilled"}
make_filename = lambda filename: os.path.join(os.getcwd(),basepath,filename)
basepath = os.path.join(os.getcwd(),options.source)
logfilename = os.path.join(basepath,'log-%s.txt'%(datetime.datetime.now().time().isoformat()))
USERS = 0
TIME = 1
PAST_MONTH_DRINKING = 3
#---------HELPER FUNCTIONS
def compare_demographics(data,nrows=2,ncols=3):
#Data is a list of tuples of (label,data,color)
fig,axs = plt.subplots(nrows=nrows,ncols=ncols,sharex=False,sharey=True)
first_label,first_idx,first_color = data[0]
second_label,second_idx,second_color = data[1]
MALE = 0.5
FEMALE = 0.3
for i,col in enumerate(axs):
for j,row in enumerate(col):
characteristic = characteristics[i*ncols+j]
uq = demographics[characteristic][first_idx]
lq = demographics[characteristic][second_idx]
_,_,patches1=row.hist(uq,color=first_color,label=artist.format(first_label), histtype='step',
weights = np.ones_like(uq)/len(uq))
plt.hold(True)
_,_,patches2=row.hist(lq,color=second_color,label=artist.format(second_label),histtype='step',
weights=np.ones_like(lq)/len(lq))
fig.canvas.mpl_connect('draw_event', artist.on_draw)
artist.adjust_spines(row)
if 'attitude' not in yvars[i*ncols+j]:
row.set_xlabel(artist.format(yvars[i*ncols+j]))
if 'gender' in yvars[i*ncols+j]:
axs[i,j].set_xticks([FEMALE,MALE])
axs[i,j].set_xticklabels(map(artist.format,['Female','Male']))
elif 'psychological' in yvars[i*ncols+j]:
label = '\n'.join(map(artist.format,['Attitude to','psychological','consequences']))
row.set_xlabel(label)
elif 'medical' in yvars[i*ncols+j]:
label = '\n'.join(map(artist.format,['Attitude','to medical','consequences']))
row.set_xlabel(label)
#axs[i,j].set_xlim([-50,50])
plt.tight_layout()
fig.legend((patches1[0], patches2[0]), (artist.format(first_label),artist.format(second_label)),
loc='lower right', frameon=False, ncol=2)
#filename = os.path.join(os.getcwd(),basepath,'compare-quartile-demographics-no-temporal-threshold.png')
filename = os.path.join(os.getcwd(),basepath,'compare-quartile-demographics-%s-vs-%s.png'%(first_label,second_label))
plt.savefig(filename,dpi=300)
del fig,axs,i,j
def compare_distributions(variable_source_name,idxs,rng=(0,1)):
#Assume idxs is dictionary structured as {name:[corresponding indices]}
fig = plt.figure()
ax = fig.add_subplot(111)
data = np.loadtxt(make_filename('%s.txt'%(variable_source_name)),delimiter=TAB)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.hold(True)
for subpopulation,idx,color in idxs:
weights = np.ones_like(data[idx])/len(data[idx])
ax.hist(data[idx],color=color,label=artist.format(subpopulation),histtype='step',range=rng,weights=weights)
fig.canvas.mpl_connect('draw_event', artist.on_draw)
artist.adjust_spines(ax)
ax.set_ylabel(artist.format('Prevalance'))
ax.set_xlabel(artist.format(variable_source_name))
plt.legend(frameon=False,ncol=2,loc='upper center',bbox_to_anchor=(.5,1.05))
plt.tight_layout()
plt.savefig(make_filename('%s-%s.png'%(variable_source_name,'-'.join([idx[0] for idx in idxs]))),dpi=300)
del fig,ax
#-----INITIALIZE------------------------------------------
data = {}
directory = json.load(open(os.path.join(basepath,'directory.json'),READ))
for variable in directory:
data[variable] = np.load(directory[variable]) if variable == 'complete record' else np.loadtxt(directory[variable],delimiter = TAB)
RESPONDER_FILENAME = os.path.join(basepath,'responders')
if not os.path.isfile(RESPONDER_FILENAME):
responders = [agent for agent in xrange(data['complete record'].shape[1])
if np.gradient(np.array_split(data['complete record'][:,agent,PAST_MONTH_DRINKING],3)[1]).mean()<0]
np.savetxt(RESPONDER_FILENAME,responders,delimiter=TAB,fmt=INTEGER)
ap('%d Responders: %s'%(len(responders),' '.join(map(str,responders))))
identified_responders = set(responders) & set(data['at-risk'])
ap('%d Responders identified as at-risk: %s'%(len(identified_responders),map(str,identified_responders)))
else:
responders = np.loadtxt(RESPONDER_FILENAME,delimiter=TAB)
overall_population = data['attitudes'].shape[0]
yes_response_yes_atrisk = len(set(responders) & set(data['at-risk']))
no_response_yes_atrisk = len(set(data['at-risk']) - set(responders))
no_response_no_atrisk = len(set(range(overall_population)) - set(responders)-set(data['at-risk']))
yes_response_no_atrisk = len(set(responders)-set(data['at-risk']))
#print contingency_table
table = Texttable()
table.set_cols_align(["r", "l","l","l"])
table.set_cols_valign(["t", "b","b","b"])
table.add_rows([ ["","", "At-risk", ""],
["","","+","-"],
["Responded","+", yes_response_yes_atrisk, yes_response_no_atrisk],
["","-",no_response_yes_atrisk,no_response_no_atrisk]])
print(table.draw() + "\n")
try:
print 'Sensitivity: %.02f'%(yes_response_yes_atrisk/float(yes_response_yes_atrisk+no_response_yes_atrisk))
except:
print "Sensitivity: -1"
try:
print 'Specificity: %.02f'%(no_response_no_atrisk/float(yes_response_no_atrisk+no_response_no_atrisk))
except:
print "Sensitivity -1"
#Do the heaviest consumers have different demographics than the lightest consumers?
upper_quartile_cutoff = scoreatpercentile(data['past month drinking'],75)
lower_quartile_cutoff = scoreatpercentile(data['past month drinking'],25)
#ap('Upper drinking cutoff: %.02f, lower cutoff %.02f'%(upper_quartile_cutoff,lower_quartile_cutoff))
#When loading complete record from file, first axis is time, second axis is agent, third axis is variable
light_users_idx = np.where(data['complete record'][:,:,PAST_MONTH_DRINKING].mean(axis=0)<lower_quartile_cutoff)[USERS]
heavy_users_idx = np.where(data['complete record'][:,:,PAST_MONTH_DRINKING].mean(axis=0)>upper_quartile_cutoff)[USERS]
variable_filenames = [filename for filename in os.listdir(basepath) if 'initial-distribution' in filename]
demographics = {filename:np.loadtxt(make_filename(filename),delimiter=TAB) for filename in variable_filenames}
yvars = open('./agent-variables',READ).read().splitlines()
characteristics = ['initial-distribution-%s.txt'%('-'.join(yvar.split())) for yvar in yvars]
#-------MAIN
#Baseline demographics, Compare initial and final drinking distributions, Time series of consumption behavior
visualization.graph_everything(basepath=basepath,verbose=False,logfilename=logfilename)
#What is the evolution of the distribution of consumptive behavior?
visualization.snapshots(data['attitudes'],indices=[0,data['attitudes'].shape[1]/2-1,
data['attitudes'].shape[1]-1],basepath=basepath,data_label='drinking behavior')
#Are the demographics of responders different?
if len(responders)>0:
visualization.population_summary(basepath=basepath,criterion=list(responders),criterionname='responders')
#Are the demographics of the at-risk population different?
visualization.population_summary(basepath=basepath,criterion=map(int,data['at-risk']), criterionname='at risk')
#Are the dynamics of the pattern of consumption of the at-risk population different?
visualization.time_series(basepath=basepath,criterion=map(int,data['at-risk']),criterionname='at risk')
'''
POSITIVE CONTROL: We hypothesize that those at-risk for drug consumption have different attitudes to the
medical consequences of consumption. Is this true?
Compare general population | at-risk subpopulation | at-risk and responsive subpopulation
'''
visualization.plot_variable(data['attitudes'],basepath=basepath,
criterion=[list(responders),map(int,data['at-risk']),list(set(map(int,data['at-risk']))-set(responders))],
dataname='Intent to Use',criterionname=['Responders','At risk','Non-responders'])
'''
visualization.plot_variable(data['complete record'][:,:,PAST_MONTH_DRINKING].T,basepath=basepath,
criterion=[list(responders),map(int,data['at-risk']),list(set(map(int,data['at-risk']))-set(responders))],dataname='Drug Use',
criterionname=['Responders','At risk','Non-responders'])
#visualization.plot_variable(data['complete record'][:,:,PAST_MONTH_DRINKING])
'''
visualization.plot_variable(data['past month drinking'],basepath=basepath,
criterion=[list(responders),map(int,data['at-risk']),list(set(map(int,data['at-risk']))-set(responders))],dataname='Drug Use',
criterionname=['Responders','At risk','Non-responders'])
#visualization.plot_variable(data['complete record'][:,:,PAST_MONTH_DRINKING])
#Identify baseline characteristics of each quartile
#compare_demographics([('Heavy users',heavy_users_idx,'r'),('Light users',light_users_idx,'k')])
#compare_distributions('alpha',[('Heavy users',heavy_users_idx,'r'),('Light users',light_users_idx,'k')])
compare_distributions('alpha',[('Responders',list(responders),'r'),
('Non-responders',list(set(range(data['complete record'].shape[1]))-set(responders)),'k')])
| mit |
JsNoNo/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
gidonro/Connectome-embeddings | connectome_embed_nature.py | 1 | 11397 | '''
Reference implementation of node2vec.
Original node2vec functions and implementation
Author: Aditya Grover
For more details, refer to the paper:
node2vec: Scalable Feature Learning for Networks
Aditya Grover and Jure Leskovec
Knowledge Discovery and Data Mining (KDD), 2016
Modifications for:
Connectome embeddings: A deep learning framework for
mapping higher-order relations between brain structure and function
Author: Gideon Rosenthal
'''
import numpy as np
import networkx as nx
import node2vec
from gensim.models import Word2Vec
from sklearn.preprocessing import Normalizer
import pickle
def create_embedding(dir_name, input_edge_list, output_embedding, current_dti, current_name,
permutation_no=500, lesion_node = 0, dimensions=30, walk_length=20,
num_walks=800, window_size=3, iter=1, workers=10, p=0.1, q=1.6, sg=0,
weighted=True, directed=False):
'''
Args:
Connectome embedding related attributes
dir_name: directory name
input_edge_list: name of input edge list
output_embedding: name of output embedding
current_dti: matrix of current dti to embbed
current_name: name of the analysis
permutation_no: how many permutations are needed
lesion_node: if a lesion node is needed
node2vec related attributes
dimensions: dimensions of embeddings
walk_length: Length of walk per source
num_walks:Number of walks per source
window_size : Context size for optimization
iter : Number of epochs in SGD
workers : Number of parallel workers
p: Return hyperparameter
q: Inout hyperparameter
sg: skipgram = 1, cbow=0
weighted:Boolean specifying (un)weighted
directed:Graph is (un)directed
Returns:
word2Vecmodelsorted: word2vec embeddings
'''
zero = 1.11324633283e-16
#creating edge list in the format which is digestible by node2vec
if lesion_node > 0:
with open(input_edge_list, 'w') as edge_list:
for r in range(current_dti.shape[0]):
for c in range(current_dti.shape[0]):
if current_dti[r, c] != 0 and r != lesion_node and c != lesion_node:
edge_list.write('%s %s %s \n' % (r, c, current_dti[r, c]))
if r == lesion_node or c == lesion_node:
edge_list.write('%s %s %s \n' % (r, c, zero))
else:
with open(input_edge_list, 'w') as edge_list:
for r in range(current_dti.shape[0]):
for c in range(current_dti.shape[0]):
if current_dti[r, c] != 0:
edge_list.write('%s %s %s \n' % (r, c, current_dti[r, c]))
# we multiply the num_walks by permutation_no to save time in calling the functions.
walks_agg = node2vec_agg_walks(input_edge_list, walk_length=walk_length, num_walks=num_walks * permutation_no,
workers=workers, p=p, q=q, weighted=weighted, directed=directed)
with open(dir_name + current_name + '_walks_lesion_' + str(lesion_node), 'w') as f:
pickle.dump(walks_agg, f)
word2Vecmodelsorted = node2veclearn_agg(walks_agg, output_embedding, num_walks=num_walks,
permutation_no=permutation_no, number_of_nodes=current_dti.shape[0],
dimensions=dimensions, window_size=window_size, iter=iter, workers=workers)
return word2Vecmodelsorted
def read_graph(input_edge_list, weighted, directed):
'''
Reads the input network in networkx.
'''
if weighted:
G = nx.read_edgelist(input_edge_list, nodetype=int, data=(('weight', float),), create_using=nx.DiGraph())
else:
G = nx.read_edgelist(input_edge_list, nodetype=int, create_using=nx.DiGraph())
for edge in G.edges():
G[edge[0]][edge[1]]['weight'] = 1
if not directed:
G = G.to_undirected()
return G
def learn_embeddings(walks, dimensions, window_size, workers, iter, output_embedding, sg=0):
'''
Learn embeddings
'''
walks = [map(str, walk) for walk in walks]
model = Word2Vec(walks, size=dimensions, window=window_size, min_count=0, sg=sg,
workers=workers, iter=iter)
model.save(output_embedding + '.embeddings')
# model.save_word2vec_format(output_embedding + 'word2vecformat.embeddings')
return model
def normalize_embeddings(word2Vecmodel):
normalizer = Normalizer(copy=False)
word2Vecmodelsorted = np.zeros([word2Vecmodel.syn0.shape[0], word2Vecmodel.syn0.shape[1]])
for i in range(word2Vecmodel.syn0.shape[0]):
word2Vecmodelsorted[i] = normalizer.fit_transform(word2Vecmodel[str(i)])
return word2Vecmodelsorted
def node2veclearn(input_edge_list, output_embedding, dimensions=128, walk_length=10, num_walks=10, window_size=10,
iter=1, workers=8, p=1, q=1, weighted=True, directed=True, sg=0):
"""Pipeline for representational learning for all nodes in a graph.
Keyword arguments:
input_edge_list -- Input graph path
output_embedding -- Embeddings path
dimensions -- Number of dimensions (default=128)
walk-length -- Length of walk per source (default=10)
num-walks -- Number of walks per source (default=10)
window-size -- Context size for optimization (default=10)
iter -- Number of epochs in SGD (default=1)
workers -- Number of parallel workers (default=8)
p -- Return hyperparameter (default=1)
q -- Inout hyperparameter (default=1)
weighted -- Boolean specifying (un)weighted (default=True)
directed -- Graph is (un)directed(default=True)
example -
working_dir = '/home/lab_users/Downloads/NKI_Rockland/hagmann/'
input_edge_list = working_dir + 'hagmann_dti_no_ENT_only_positive.txt'
output_embedding = working_dir + 'hagmann_dti.embeddings'
node2veclearn(input_edge_list, output_embedding, dimensions = 30, walk_length = 50, num_walks=400, window_size=3, iter=1, workers=40, p=0.2, q=2.0, weighted=True, directed=True)
"""
nx_G = read_graph(input_edge_list, weighted, directed)
G = node2vec.Graph(nx_G, directed, p, q)
G.preprocess_transition_probs()
walks = G.simulate_walks(num_walks, walk_length)
model = learn_embeddings(walks, dimensions, window_size, workers, iter, output_embedding, sg)
return model
def node2vec_agg_walks(input_edge_list, walk_length=10, num_walks=10, workers=8, p=1, q=1, weighted=True,
directed=True):
"""Pipeline for representational learning for all nodes in a graph.
Keyword arguments:
input_edge_list -- Input graph path
walk-length -- Length of walk per source (default=10)
num-walks -- Number of walks per source (default=10)
workers -- Number of parallel workers (default=8)
p -- Return hyperparameter (default=1)
q -- Inout hyperparameter (default=1)
weighted -- Boolean specifying (un)weighted (default=True)
directed -- Graph is (un)directed(default=True)
example -
working_dir = '/home/lab_users/Downloads/NKI_Rockland/hagmann/'
input_edge_list = working_dir + 'hagmann_dti_no_ENT_only_positive.txt'
output_embedding = working_dir + 'hagmann_dti.embeddings'
node2veclearn(input_edge_list, output_embedding, dimensions = 30, walk_length = 50, num_walks=400, window_size=3, iter=1, workers=40, p=0.2, q=2.0, weighted=True, directed=True)
"""
nx_G = read_graph(input_edge_list, weighted, directed)
G = node2vec.Graph(nx_G, directed, p, q)
G.preprocess_transition_probs()
walks = G.simulate_walks_parallel(num_walks, walk_length, workers)
return walks
def node2veclearn_agg(walks, output_embedding, num_walks=10, permutation_no=10, number_of_nodes=83, dimensions=128,
window_size=10, iter=1, workers=8, sg=0):
"""Pipeline for representational learning for all nodes in a graph.
Keyword arguments:
input_edge_list -- Input graph path
output_embedding -- Embeddings path
dimensions -- Number of dimensions (default=128)
num-walks -- Number of walks per source (default=10)
permutation_no -- number of permutation (default = 10)
window-size -- Context size for optimization (default=10)
iter -- Number of epochs in SGD (default=1)
workers -- Number of parallel workers (default=8)
sg -- skipgram = 1, cbow=1
p -- Return hyperparameter (default=1)
q -- Inout hyperparameter (default=1)
weighted -- Boolean specifying (un)weighted (default=True)
directed -- Graph is (un)directed(default=True)
example -
working_dir = '/home/lab_users/Downloads/NKI_Rockland/hagmann/'
input_edge_list = working_dir + 'hagmann_dti_no_ENT_only_positive.txt'
output_embedding = working_dir + 'hagmann_dti.embeddings'
node2veclearn(input_edge_list, output_embedding, dimensions = 30, walk_length = 50, num_walks=400, window_size=3, iter=1, workers=40, p=0.2, q=2.0, weighted=True, directed=True)
"""
word2vec_permutations = np.zeros([permutation_no, number_of_nodes, dimensions])
count = 0
for permute in range(0, permutation_no * num_walks * number_of_nodes, num_walks * number_of_nodes):
model = learn_embeddings(walks[permute:permute + num_walks * number_of_nodes], dimensions, window_size, workers,
iter, output_embedding, sg)
word2Vecmodelsorted = normalize_embeddings(model)
word2vec_permutations[count, ...] = word2Vecmodelsorted
count += 1
return word2vec_permutations
def node2veclearn_update(input_edge_list, org_embedding, new_embedding, dimensions=128, walk_length=10, num_walks=10,
window_size=10, iter=1, workers=8, p=1, q=1, weighted=True, directed=True):
"""Pipeline for updating an embedding
Keyword arguments:
org_embedding-- original embedging
new_embedding -- new Embeddings path
dimensions -- Number of dimensions (default=128)
walk-length -- Length of walk per source (default=10)
num-walks -- Number of walks per source (default=10)
window-size -- Context size for optimization (default=10)
iter -- Number of epochs in SGD (default=1)
workers -- Number of parallel workers (default=8)
p -- Return hyperparameter (default=1)
q -- Inout hyperparameter (default=1)
weighted -- Boolean specifying (un)weighted (default=True)
directed -- Graph is (un)directed(default=True)
example -
working_dir = '/home/lab_users/Downloads/NKI_Rockland/hagmann/'
input_edge_list = working_dir + 'hagmann_dti_no_ENT_only_positive.txt'
org_embedding = working_dir + 'hagmann_dti.embeddings'
new_embedding = working_dir + 'hagmann_dti_updated'
node2veclearn_update(org_embedding, new_embedding, walk_length = 50, num_walks=400, p=0.2, q=2.0, weighted=True, directed=True)
"""
nx_G = read_graph(input_edge_list, weighted, directed)
G = node2vec.Graph(nx_G, directed, p, q)
G.preprocess_transition_probs()
walks = G.simulate_walks(num_walks, walk_length)
model = Word2Vec.load(org_embedding)
model.train(walks)
model.save(new_embedding + '.embeddings')
# model.save_word2vec_format(new_embedding + 'word2vecformat.embeddings')
return model | mit |
jlegendary/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 222 | 3055 | # Author: Christian Osendorfer <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
Rob-Rau/EbbCFD | scripts/python/PlotMeshes.py | 1 | 1035 | #!/usr/bin/env python3
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
import EbbUtils as eu
#-----------------------------------------------------------
def plotmesh(Mesh, color):
V = Mesh['V']
E = Mesh['E']
BE = Mesh['BE']
F = np.zeros((np.size(E)/3, 1))
F = F[:,0]
F[:] = 1
plt.tripcolor(V[:,0], V[:,1], triangles=E, facecolors=F, edgecolors=color, cmap=plt.cm.gray, vmin=0, vmax=1, alpha=1, linewidth=0.5)
for i in range(len(BE)):
x = [V[BE[i,0],0], V[BE[i,1],0]]
y = [V[BE[i,0],1], V[BE[i,1],1]]
plt.plot(x, y, '-', linewidth=2.5, color=color)
plt.axis('equal')
plt.axis([-100, 100,-100, 100])
if __name__ == "__main__":
if len(sys.argv) < 2:
print('Not enough input arguments')
sys.exit()
colors = ['k', 'b', 'g', 'c', 'y', 'm', 'r']
f = plt.figure(figsize=(12,6))
for idx in range(len(sys.argv) - 1):
print("Plotting "+sys.argv[idx+1])
mesh = eu.importEbbMatlabMesh(sys.argv[idx+1])
plotmesh(mesh, colors[idx%len(colors)])
plt.hold(True)
plt.show(block=True) | mit |
markneville/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/pyplot.py | 69 | 77521 | import sys
import matplotlib
from matplotlib import _pylab_helpers, interactive
from matplotlib.cbook import dedent, silent_list, is_string_like, is_numlike
from matplotlib.figure import Figure, figaspect
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.image import imread as _imread
from matplotlib import rcParams, rcParamsDefault, get_backend
from matplotlib.rcsetup import interactive_bk as _interactive_bk
from matplotlib.artist import getp, get, Artist
from matplotlib.artist import setp as _setp
from matplotlib.axes import Axes
from matplotlib.projections import PolarAxes
from matplotlib import mlab # for csv2rec in plotfile
from matplotlib.scale import get_scale_docs, get_scale_names
from matplotlib import cm
from matplotlib.cm import get_cmap
# We may not need the following imports here:
from matplotlib.colors import Normalize, normalize # latter for backwards compat.
from matplotlib.lines import Line2D
from matplotlib.text import Text, Annotation
from matplotlib.patches import Polygon, Rectangle, Circle, Arrow
from matplotlib.widgets import SubplotTool, Button, Slider, Widget
from ticker import TickHelper, Formatter, FixedFormatter, NullFormatter,\
FuncFormatter, FormatStrFormatter, ScalarFormatter,\
LogFormatter, LogFormatterExponent, LogFormatterMathtext,\
Locator, IndexLocator, FixedLocator, NullLocator,\
LinearLocator, LogLocator, AutoLocator, MultipleLocator,\
MaxNLocator
## Backend detection ##
def _backend_selection():
""" If rcParams['backend_fallback'] is true, check to see if the
current backend is compatible with the current running event
loop, and if not switches to a compatible one.
"""
backend = rcParams['backend']
if not rcParams['backend_fallback'] or \
backend not in _interactive_bk:
return
is_agg_backend = rcParams['backend'].endswith('Agg')
if 'wx' in sys.modules and not backend in ('WX', 'WXAgg'):
import wx
if wx.App.IsMainLoopRunning():
rcParams['backend'] = 'wx' + 'Agg' * is_agg_backend
elif 'qt' in sys.modules and not backend == 'QtAgg':
import qt
if not qt.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qtAgg'
elif 'PyQt4.QtCore' in sys.modules and not backend == 'Qt4Agg':
import PyQt4.QtGui
if not PyQt4.QtGui.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qt4Agg'
elif 'gtk' in sys.modules and not backend in ('GTK', 'GTKAgg',
'GTKCairo'):
import gobject
if gobject.MainLoop().is_running():
rcParams['backend'] = 'gtk' + 'Agg' * is_agg_backend
elif 'Tkinter' in sys.modules and not backend == 'TkAgg':
#import Tkinter
pass #what if anything do we need to do for tkinter?
_backend_selection()
## Global ##
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def findobj(o=None, match=None):
if o is None:
o = gcf()
return o.findobj(match)
findobj.__doc__ = Artist.findobj.__doc__
def switch_backend(newbackend):
"""
Switch the default backend to newbackend. This feature is
**experimental**, and is only expected to work switching to an
image backend. Eg, if you have a bunch of PostScript scripts that
you want to run from an interactive ipython session, you may want
to switch to the PS backend before running them to avoid having a
bunch of GUI windows popup. If you try to interactively switch
from one GUI backend to another, you will explode.
Calling this command will close all open windows.
"""
close('all')
global new_figure_manager, draw_if_interactive, show
matplotlib.use(newbackend, warn=False)
reload(matplotlib.backends)
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def isinteractive():
"""
Return the interactive status
"""
return matplotlib.is_interactive()
def ioff():
'Turn interactive mode off.'
matplotlib.interactive(False)
def ion():
'Turn interactive mode on.'
matplotlib.interactive(True)
def rc(*args, **kwargs):
matplotlib.rc(*args, **kwargs)
if matplotlib.rc.__doc__ is not None:
rc.__doc__ = dedent(matplotlib.rc.__doc__)
def rcdefaults():
matplotlib.rcdefaults()
draw_if_interactive()
if matplotlib.rcdefaults.__doc__ is not None:
rcdefaults.__doc__ = dedent(matplotlib.rcdefaults.__doc__)
# The current "image" (ScalarMappable) is tracked here on a
# per-pylab-session basis:
def gci():
"""
Get the current :class:`~matplotlib.cm.ScalarMappable` instance
(image or patch collection), or *None* if no images or patch
collections have been defined. The commands
:func:`~matplotlib.pyplot.imshow` and
:func:`~matplotlib.pyplot.figimage` create
:class:`~matplotlib.image.Image` instances, and the commands
:func:`~matplotlib.pyplot.pcolor` and
:func:`~matplotlib.pyplot.scatter` create
:class:`~matplotlib.collections.Collection` instances.
"""
return gci._current
gci._current = None
def sci(im):
"""
Set the current image (target of colormap commands like
:func:`~matplotlib.pyplot.jet`, :func:`~matplotlib.pyplot.hot` or
:func:`~matplotlib.pyplot.clim`).
"""
gci._current = im
## Any Artist ##
# (getp is simply imported)
def setp(*args, **kwargs):
ret = _setp(*args, **kwargs)
draw_if_interactive()
return ret
if _setp.__doc__ is not None:
setp.__doc__ = _setp.__doc__
## Figures ##
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize = None, # defaults to rc figure.figsize
dpi = None, # defaults to rc figure.dpi
facecolor = None, # defaults to rc figure.facecolor
edgecolor = None, # defaults to rc figure.edgecolor
frameon = True,
FigureClass = Figure,
**kwargs
):
"""
call signature::
figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
Create a new figure and return a :class:`matplotlib.figure.Figure`
instance. If *num* = *None*, the figure number will be incremented and
a new figure will be created. The returned figure objects have a
*number* attribute holding this number.
If *num* is an integer, and ``figure(num)`` already exists, make it
active and return the handle to it. If ``figure(num)`` does not exist
it will be created. Numbering starts at 1, matlab style::
figure(1)
If you are creating many figures, make sure you explicitly call "close"
on the figures you are not using, because this will enable pylab
to properly clean up the memory.
Optional keyword arguments:
========= =======================================================
Keyword Description
========= =======================================================
figsize width x height in inches; defaults to rc figure.figsize
dpi resolution; defaults to rc figure.dpi
facecolor the background color; defaults to rc figure.facecolor
edgecolor the border color; defaults to rc figure.edgecolor
========= =======================================================
rcParams defines the default values, which can be modified in the
matplotlibrc file
*FigureClass* is a :class:`~matplotlib.figure.Figure` or derived
class that will be passed on to :meth:`new_figure_manager` in the
backends which allows you to hook custom Figure classes into the
pylab interface. Additional kwargs will be passed on to your
figure init function.
"""
if figsize is None : figsize = rcParams['figure.figsize']
if dpi is None : dpi = rcParams['figure.dpi']
if facecolor is None : facecolor = rcParams['figure.facecolor']
if edgecolor is None : edgecolor = rcParams['figure.edgecolor']
if num is None:
allnums = [f.num for f in _pylab_helpers.Gcf.get_all_fig_managers()]
if allnums:
num = max(allnums) + 1
else:
num = 1
else:
num = int(num) # crude validation of num argument
figManager = _pylab_helpers.Gcf.get_fig_manager(num)
if figManager is None:
if get_backend().lower() == 'ps': dpi = 72
figManager = new_figure_manager(num, figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
**kwargs)
# make this figure current on button press event
def make_active(event):
_pylab_helpers.Gcf.set_active(figManager)
cid = figManager.canvas.mpl_connect('button_press_event', make_active)
figManager._cidgcf = cid
_pylab_helpers.Gcf.set_active(figManager)
figManager.canvas.figure.number = num
draw_if_interactive()
return figManager.canvas.figure
def gcf():
"Return a handle to the current figure."
figManager = _pylab_helpers.Gcf.get_active()
if figManager is not None:
return figManager.canvas.figure
else:
return figure()
def get_current_fig_manager():
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None:
gcf() # creates an active figure as a side effect
figManager = _pylab_helpers.Gcf.get_active()
return figManager
# note we check for __doc__ is not None since py2exe optimize removes
# the docstrings
def connect(s, func):
return get_current_fig_manager().canvas.mpl_connect(s, func)
if FigureCanvasBase.mpl_connect.__doc__ is not None:
connect.__doc__ = dedent(FigureCanvasBase.mpl_connect.__doc__)
def disconnect(cid):
return get_current_fig_manager().canvas.mpl_disconnect(cid)
if FigureCanvasBase.mpl_disconnect.__doc__ is not None:
disconnect.__doc__ = dedent(FigureCanvasBase.mpl_disconnect.__doc__)
def close(*args):
"""
Close a figure window
``close()`` by itself closes the current figure
``close(num)`` closes figure number *num*
``close(h)`` where *h* is a :class:`Figure` instance, closes that figure
``close('all')`` closes all the figure windows
"""
if len(args)==0:
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None: return
else:
figManager.canvas.mpl_disconnect(figManager._cidgcf)
_pylab_helpers.Gcf.destroy(figManager.num)
elif len(args)==1:
arg = args[0]
if arg=='all':
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
elif isinstance(arg, int):
_pylab_helpers.Gcf.destroy(arg)
elif isinstance(arg, Figure):
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
if manager.canvas.figure==arg:
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
else:
raise TypeError('Unrecognized argument type %s to close'%type(arg))
else:
raise TypeError('close takes 0 or 1 arguments')
def clf():
"""
Clear the current figure
"""
gcf().clf()
draw_if_interactive()
def draw():
'redraw the current figure'
get_current_fig_manager().canvas.draw()
def savefig(*args, **kwargs):
fig = gcf()
return fig.savefig(*args, **kwargs)
if Figure.savefig.__doc__ is not None:
savefig.__doc__ = dedent(Figure.savefig.__doc__)
def ginput(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is negative, does not timeout.
"""
return gcf().ginput(*args, **kwargs)
if Figure.ginput.__doc__ is not None:
ginput.__doc__ = dedent(Figure.ginput.__doc__)
def waitforbuttonpress(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* key or mouse clicks from the user and
return a list containing True's for keyboard clicks and False's
for mouse clicks.
If *timeout* is negative, does not timeout.
"""
return gcf().waitforbuttonpress(*args, **kwargs)
if Figure.waitforbuttonpress.__doc__ is not None:
waitforbuttonpress.__doc__ = dedent(Figure.waitforbuttonpress.__doc__)
# Putting things in figures
def figtext(*args, **kwargs):
ret = gcf().text(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.text.__doc__ is not None:
figtext.__doc__ = dedent(Figure.text.__doc__)
def suptitle(*args, **kwargs):
ret = gcf().suptitle(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.suptitle.__doc__ is not None:
suptitle.__doc__ = dedent(Figure.suptitle.__doc__)
def figimage(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
ret = gcf().figimage(*args, **kwargs)
draw_if_interactive()
gci._current = ret
return ret
if Figure.figimage.__doc__ is not None:
figimage.__doc__ = dedent(Figure.figimage.__doc__) + """
Addition kwargs: hold = [True|False] overrides default hold state"""
def figlegend(handles, labels, loc, **kwargs):
"""
Place a legend in the figure.
*labels*
a sequence of strings
*handles*
a sequence of :class:`~matplotlib.lines.Line2D` or
:class:`~matplotlib.patches.Patch` instances
*loc*
can be a string or an integer specifying the legend
location
A :class:`matplotlib.legend.Legend` instance is returned.
Example::
figlegend( (line1, line2, line3),
('label1', 'label2', 'label3'),
'upper right' )
.. seealso::
:func:`~matplotlib.pyplot.legend`:
For information about the location codes
"""
l = gcf().legend(handles, labels, loc, **kwargs)
draw_if_interactive()
return l
## Figure and Axes hybrid ##
def hold(b=None):
"""
Set the hold state. If *b* is None (default), toggle the
hold state, else set the hold state to boolean value *b*::
hold() # toggle hold
hold(True) # hold is on
hold(False) # hold is off
When *hold* is *True*, subsequent plot commands will be added to
the current axes. When *hold* is *False*, the current axes and
figure will be cleared on the next plot command.
"""
fig = gcf()
ax = fig.gca()
fig.hold(b)
ax.hold(b)
# b=None toggles the hold state, so let's get get the current hold
# state; but should pyplot hold toggle the rc setting - me thinks
# not
b = ax.ishold()
rc('axes', hold=b)
def ishold():
"""
Return the hold status of the current axes
"""
return gca().ishold()
def over(func, *args, **kwargs):
"""
over calls::
func(*args, **kwargs)
with ``hold(True)`` and then restores the hold state.
"""
h = ishold()
hold(True)
func(*args, **kwargs)
hold(h)
## Axes ##
def axes(*args, **kwargs):
"""
Add an axes at position rect specified by:
- ``axes()`` by itself creates a default full ``subplot(111)`` window axis.
- ``axes(rect, axisbg='w')`` where *rect* = [left, bottom, width,
height] in normalized (0, 1) units. *axisbg* is the background
color for the axis, default white.
- ``axes(h)`` where *h* is an axes instance makes *h* the current
axis. An :class:`~matplotlib.axes.Axes` instance is returned.
======= ============ ================================================
kwarg Accepts Desctiption
======= ============ ================================================
axisbg color the axes background color
frameon [True|False] display the frame?
sharex otherax current axes shares xaxis attribute with otherax
sharey otherax current axes shares yaxis attribute with otherax
polar [True|False] use a polar axes?
======= ============ ================================================
Examples:
* :file:`examples/pylab_examples/axes_demo.py` places custom axes.
* :file:`examples/pylab_examples/shared_axis_demo.py` uses
*sharex* and *sharey*.
"""
nargs = len(args)
if len(args)==0: return subplot(111, **kwargs)
if nargs>1:
raise TypeError('Only one non keyword arg to axes allowed')
arg = args[0]
if isinstance(arg, Axes):
a = gcf().sca(arg)
else:
rect = arg
a = gcf().add_axes(rect, **kwargs)
draw_if_interactive()
return a
def delaxes(*args):
"""
``delaxes(ax)``: remove *ax* from the current figure. If *ax*
doesn't exist, an error will be raised.
``delaxes()``: delete the current axes
"""
if not len(args):
ax = gca()
else:
ax = args[0]
ret = gcf().delaxes(ax)
draw_if_interactive()
return ret
def gca(**kwargs):
"""
Return the current axis instance. This can be used to control
axis properties either using set or the
:class:`~matplotlib.axes.Axes` methods, for example, setting the
xaxis range::
plot(t,s)
set(gca(), 'xlim', [0,10])
or::
plot(t,s)
a = gca()
a.set_xlim([0,10])
"""
ax = gcf().gca(**kwargs)
return ax
# More ways of creating axes:
def subplot(*args, **kwargs):
"""
Create a subplot command, creating axes with::
subplot(numRows, numCols, plotNum)
where *plotNum* = 1 is the first plot number and increasing *plotNums*
fill rows first. max(*plotNum*) == *numRows* * *numCols*
You can leave out the commas if *numRows* <= *numCols* <=
*plotNum* < 10, as in::
subplot(211) # 2 rows, 1 column, first (upper) plot
``subplot(111)`` is the default axis.
New subplots that overlap old will delete the old axes. If you do
not want this behavior, use
:meth:`matplotlib.figure.Figure.add_subplot` or the
:func:`~matplotlib.pyplot.axes` command. Eg.::
from pylab import *
plot([1,2,3]) # implicitly creates subplot(111)
subplot(211) # overlaps, subplot(111) is killed
plot(rand(12), rand(12))
subplot(212, axisbg='y') # creates 2nd subplot with yellow background
Keyword arguments:
*axisbg*:
The background color of the subplot, which can be any valid
color specifier. See :mod:`matplotlib.colors` for more
information.
*polar*:
A boolean flag indicating whether the subplot plot should be
a polar projection. Defaults to False.
*projection*:
A string giving the name of a custom projection to be used
for the subplot. This projection must have been previously
registered. See :func:`matplotlib.projections.register_projection`
.. seealso::
:func:`~matplotlib.pyplot.axes`:
For additional information on :func:`axes` and
:func:`subplot` keyword arguments.
:file:`examples/pylab_examples/polar_scatter.py`
**Example:**
.. plot:: mpl_examples/pylab_examples/subplot_demo.py
"""
fig = gcf()
a = fig.add_subplot(*args, **kwargs)
bbox = a.bbox
byebye = []
for other in fig.axes:
if other==a: continue
if bbox.fully_overlaps(other.bbox):
byebye.append(other)
for ax in byebye: delaxes(ax)
draw_if_interactive()
return a
def twinx(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the xaxis. The ticks for *ax2* will be placed on
the right, and the *ax2* instance is returned.
.. seealso::
:file:`examples/api_examples/two_scales.py`
"""
if ax is None:
ax=gca()
ax1 = ax.twinx()
draw_if_interactive()
return ax1
def twiny(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the yaxis. The ticks for *ax2* will be placed on
the top, and the *ax2* instance is returned.
"""
if ax is None:
ax=gca()
ax1 = ax.twiny()
draw_if_interactive()
return ax1
def subplots_adjust(*args, **kwargs):
"""
call signature::
subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
Tune the subplot layout via the
:class:`matplotlib.figure.SubplotParams` mechanism. The parameter
meanings (and suggested defaults) are::
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for blank space between subplots
hspace = 0.2 # the amount of height reserved for white space between subplots
The actual defaults are controlled by the rc file
"""
fig = gcf()
fig.subplots_adjust(*args, **kwargs)
draw_if_interactive()
def subplot_tool(targetfig=None):
"""
Launch a subplot tool window for *targetfig* (default gcf).
A :class:`matplotlib.widgets.SubplotTool` instance is returned.
"""
tbar = rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
rcParams['toolbar'] = 'None'
if targetfig is None:
manager = get_current_fig_manager()
targetfig = manager.canvas.figure
else:
# find the manager for this figure
for manager in _pylab_helpers.Gcf._activeQue:
if manager.canvas.figure==targetfig: break
else: raise RuntimeError('Could not find manager for targetfig')
toolfig = figure(figsize=(6,3))
toolfig.subplots_adjust(top=0.9)
ret = SubplotTool(targetfig, toolfig)
rcParams['toolbar'] = tbar
_pylab_helpers.Gcf.set_active(manager) # restore the current figure
return ret
def box(on=None):
"""
Turn the axes box on or off according to *on*.
If *on* is *None*, toggle state.
"""
ax = gca()
if on is None:
on = not ax.get_frame_on()
ax.set_frame_on(on)
draw_if_interactive()
def title(s, *args, **kwargs):
"""
Set the title of the current axis to *s*.
Default font override is::
override = {'fontsize': 'medium',
'verticalalignment': 'bottom',
'horizontalalignment': 'center'}
.. seealso::
:func:`~matplotlib.pyplot.text`:
for information on how override and the optional args work.
"""
l = gca().set_title(s, *args, **kwargs)
draw_if_interactive()
return l
## Axis ##
def axis(*v, **kwargs):
"""
Set/Get the axis properties:
>>> axis()
returns the current axes limits ``[xmin, xmax, ymin, ymax]``.
>>> axis(v)
sets the min and max of the x and y axes, with
``v = [xmin, xmax, ymin, ymax]``.
>>> axis('off')
turns off the axis lines and labels.
>>> axis('equal')
changes limits of *x* or *y* axis so that equal increments of *x*
and *y* have the same length; a circle is circular.
>>> axis('scaled')
achieves the same result by changing the dimensions of the plot box instead
of the axis data limits.
>>> axis('tight')
changes *x* and *y* axis limits such that all data is shown. If
all data is already shown, it will move it to the center of the
figure without modifying (*xmax* - *xmin*) or (*ymax* -
*ymin*). Note this is slightly different than in matlab.
>>> axis('image')
is 'scaled' with the axis limits equal to the data limits.
>>> axis('auto')
and
>>> axis('normal')
are deprecated. They restore default behavior; axis limits are automatically
scaled to make the data fit comfortably within the plot box.
if ``len(*v)==0``, you can pass in *xmin*, *xmax*, *ymin*, *ymax*
as kwargs selectively to alter just those limits without changing
the others.
The xmin, xmax, ymin, ymax tuple is returned
.. seealso::
:func:`xlim`, :func:`ylim`
"""
ax = gca()
v = ax.axis(*v, **kwargs)
draw_if_interactive()
return v
def xlabel(s, *args, **kwargs):
"""
Set the *x* axis label of the current axis to *s*
Default override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'top',
'horizontalalignment' : 'center'
}
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args work
"""
l = gca().set_xlabel(s, *args, **kwargs)
draw_if_interactive()
return l
def ylabel(s, *args, **kwargs):
"""
Set the *y* axis label of the current axis to *s*.
Defaults override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'center',
'horizontalalignment' : 'right',
'rotation'='vertical' : }
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args
work.
"""
l = gca().set_ylabel(s, *args, **kwargs)
draw_if_interactive()
return l
def xlim(*args, **kwargs):
"""
Set/Get the xlimits of the current axes::
xmin, xmax = xlim() # return the current xlim
xlim( (xmin, xmax) ) # set the xlim to xmin, xmax
xlim( xmin, xmax ) # set the xlim to xmin, xmax
If you do not specify args, you can pass the xmin and xmax as
kwargs, eg.::
xlim(xmax=3) # adjust the max leaving min unchanged
xlim(xmin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_xlim(*args, **kwargs)
draw_if_interactive()
return ret
def ylim(*args, **kwargs):
"""
Set/Get the ylimits of the current axes::
ymin, ymax = ylim() # return the current ylim
ylim( (ymin, ymax) ) # set the ylim to ymin, ymax
ylim( ymin, ymax ) # set the ylim to ymin, ymax
If you do not specify args, you can pass the *ymin* and *ymax* as
kwargs, eg.::
ylim(ymax=3) # adjust the max leaving min unchanged
ylim(ymin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_ylim(*args, **kwargs)
draw_if_interactive()
return ret
def xscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the x-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_xscale(*args, **kwargs)
draw_if_interactive()
return ret
xscale.__doc__ = dedent(xscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def yscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the y-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_yscale(*args, **kwargs)
draw_if_interactive()
return ret
yscale.__doc__ = dedent(yscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def xticks(*args, **kwargs):
"""
Set/Get the xlimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = xticks()
# set the locations of the xticks
xticks( arange(6) )
# set the locations and labels of the xticks
xticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_xticks()
labels = ax.get_xticklabels()
elif len(args)==1:
locs = ax.set_xticks(args[0])
labels = ax.get_xticklabels()
elif len(args)==2:
locs = ax.set_xticks(args[0])
labels = ax.set_xticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to xticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return locs, silent_list('Text xticklabel', labels)
def yticks(*args, **kwargs):
"""
Set/Get the ylimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = yticks()
# set the locations of the yticks
yticks( arange(6) )
# set the locations and labels of the yticks
yticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_yticks()
labels = ax.get_yticklabels()
elif len(args)==1:
locs = ax.set_yticks(args[0])
labels = ax.get_yticklabels()
elif len(args)==2:
locs = ax.set_yticks(args[0])
labels = ax.set_yticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to yticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return ( locs,
silent_list('Text yticklabel', labels)
)
def rgrids(*args, **kwargs):
"""
Set/Get the radial locations of the gridlines and ticklabels on a
polar plot.
call signatures::
lines, labels = rgrids()
lines, labels = rgrids(radii, labels=None, angle=22.5, **kwargs)
When called with no arguments, :func:`rgrid` simply returns the
tuple (*lines*, *labels*), where *lines* is an array of radial
gridlines (:class:`~matplotlib.lines.Line2D` instances) and
*labels* is an array of tick labels
(:class:`~matplotlib.text.Text` instances). When called with
arguments, the labels will appear at the specified radial
distances and angles.
*labels*, if not *None*, is a len(*radii*) list of strings of the
labels to use at each angle.
If *labels* is None, the rformatter will be used
Examples::
# set the locations of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0) )
# set the locations and labels of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0), ('Tom', 'Dick', 'Harry' )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.yaxis.get_ticklines()
labels = ax.yaxis.get_ticklabels()
else:
lines, labels = ax.set_rgrids(*args, **kwargs)
draw_if_interactive()
return ( silent_list('Line2D rgridline', lines),
silent_list('Text rgridlabel', labels) )
def thetagrids(*args, **kwargs):
"""
Set/Get the theta locations of the gridlines and ticklabels.
If no arguments are passed, return a tuple (*lines*, *labels*)
where *lines* is an array of radial gridlines
(:class:`~matplotlib.lines.Line2D` instances) and *labels* is an
array of tick labels (:class:`~matplotlib.text.Text` instances)::
lines, labels = thetagrids()
Otherwise the syntax is::
lines, labels = thetagrids(angles, labels=None, fmt='%d', frac = 1.1)
set the angles at which to place the theta grids (these gridlines
are equal along the theta dimension).
*angles* is in degrees.
*labels*, if not *None*, is a len(angles) list of strings of the
labels to use at each angle.
If *labels* is *None*, the labels will be ``fmt%angle``.
*frac* is the fraction of the polar axes radius at which to place
the label (1 is the edge). Eg. 1.05 is outside the axes and 0.95
is inside the axes.
Return value is a list of tuples (*lines*, *labels*):
- *lines* are :class:`~matplotlib.lines.Line2D` instances
- *labels* are :class:`~matplotlib.text.Text` instances.
Note that on input, the *labels* argument is a list of strings,
and on output it is a list of :class:`~matplotlib.text.Text`
instances.
Examples::
# set the locations of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90) )
# set the locations and labels of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90), ('NE', 'NW', 'SW','SE') )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.xaxis.get_ticklines()
labels = ax.xaxis.get_ticklabels()
else:
lines, labels = ax.set_thetagrids(*args, **kwargs)
draw_if_interactive()
return (silent_list('Line2D thetagridline', lines),
silent_list('Text thetagridlabel', labels)
)
## Plotting Info ##
def plotting():
"""
Plotting commands
=============== =========================================================
Command Description
=============== =========================================================
axes Create a new axes
axis Set or return the current axis limits
bar make a bar chart
boxplot make a box and whiskers chart
cla clear current axes
clabel label a contour plot
clf clear a figure window
close close a figure window
colorbar add a colorbar to the current figure
cohere make a plot of coherence
contour make a contour plot
contourf make a filled contour plot
csd make a plot of cross spectral density
draw force a redraw of the current figure
errorbar make an errorbar graph
figlegend add a legend to the figure
figimage add an image to the figure, w/o resampling
figtext add text in figure coords
figure create or change active figure
fill make filled polygons
fill_between make filled polygons
gca return the current axes
gcf return the current figure
gci get the current image, or None
getp get a handle graphics property
hist make a histogram
hold set the hold state on current axes
legend add a legend to the axes
loglog a log log plot
imread load image file into array
imshow plot image data
matshow display a matrix in a new figure preserving aspect
pcolor make a pseudocolor plot
plot make a line plot
plotfile plot data from a flat file
psd make a plot of power spectral density
quiver make a direction field (arrows) plot
rc control the default params
savefig save the current figure
scatter make a scatter plot
setp set a handle graphics property
semilogx log x axis
semilogy log y axis
show show the figures
specgram a spectrogram plot
stem make a stem plot
subplot make a subplot (numrows, numcols, axesnum)
table add a table to the axes
text add some text at location x,y to the current axes
title add a title to the current axes
xlabel add an xlabel to the current axes
ylabel add a ylabel to the current axes
=============== =========================================================
The following commands will set the default colormap accordingly:
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
"""
pass
def get_plot_commands(): return ( 'axes', 'axis', 'bar', 'boxplot', 'cla', 'clf',
'close', 'colorbar', 'cohere', 'csd', 'draw', 'errorbar',
'figlegend', 'figtext', 'figimage', 'figure', 'fill', 'gca',
'gcf', 'gci', 'get', 'gray', 'barh', 'jet', 'hist', 'hold', 'imread',
'imshow', 'legend', 'loglog', 'quiver', 'rc', 'pcolor', 'pcolormesh', 'plot', 'psd',
'savefig', 'scatter', 'set', 'semilogx', 'semilogy', 'show',
'specgram', 'stem', 'subplot', 'table', 'text', 'title', 'xlabel',
'ylabel', 'pie', 'polar')
def colors():
"""
This is a do nothing function to provide you with help on how
matplotlib handles colors.
Commands which take color arguments can use several formats to
specify the colors. For the basic builtin colors, you can use a
single letter
===== =======
Alias Color
===== =======
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
===== =======
For a greater range of colors, you have two options. You can
specify the color using an html hex string, as in::
color = '#eeefff'
or you can pass an R,G,B tuple, where each of R,G,B are in the
range [0,1].
You can also use any legal html name for a color, for example::
color = 'red',
color = 'burlywood'
color = 'chartreuse'
The example below creates a subplot with a dark
slate gray background
subplot(111, axisbg=(0.1843, 0.3098, 0.3098))
Here is an example that creates a pale turqoise title::
title('Is this the best color?', color='#afeeee')
"""
pass
def colormaps():
"""
matplotlib provides the following colormaps.
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
You can set the colormap for an image, pcolor, scatter, etc,
either as a keyword argument::
imshow(X, cmap=cm.hot)
or post-hoc using the corresponding pylab interface function::
imshow(X)
hot()
jet()
In interactive mode, this will update the colormap allowing you to
see which one works best for your data.
"""
pass
## Plotting part 1: manually generated functions and wrappers ##
from matplotlib.colorbar import colorbar_doc
def colorbar(mappable=None, cax=None, ax=None, **kw):
if mappable is None:
mappable = gci()
if ax is None:
ax = gca()
ret = gcf().colorbar(mappable, cax = cax, ax=ax, **kw)
draw_if_interactive()
return ret
colorbar.__doc__ = colorbar_doc
def clim(vmin=None, vmax=None):
"""
Set the color limits of the current image
To apply clim to all axes images do::
clim(0, 0.5)
If either *vmin* or *vmax* is None, the image min/max respectively
will be used for color scaling.
If you want to set the clim of multiple images,
use, for example::
for im in gca().get_images():
im.set_clim(0, 0.05)
"""
im = gci()
if im is None:
raise RuntimeError('You must first define an image, eg with imshow')
im.set_clim(vmin, vmax)
draw_if_interactive()
def imread(*args, **kwargs):
return _imread(*args, **kwargs)
if _imread.__doc__ is not None:
imread.__doc__ = dedent(_imread.__doc__)
def matshow(A, fignum=None, **kw):
"""
Display an array as a matrix in a new figure window.
The origin is set at the upper left hand corner and rows (first
dimension of the array) are displayed horizontally. The aspect
ratio of the figure window is that of the array, unless this would
make an excessively short or narrow figure.
Tick labels for the xaxis are placed on top.
With the exception of fignum, keyword arguments are passed to
:func:`~matplotlib.pyplot.imshow`.
*fignum*: [ None | integer | False ]
By default, :func:`matshow` creates a new figure window with
automatic numbering. If *fignum* is given as an integer, the
created figure will use this figure number. Because of how
:func:`matshow` tries to set the figure aspect ratio to be the
one of the array, if you provide the number of an already
existing figure, strange things may happen.
If *fignum* is *False* or 0, a new figure window will **NOT** be created.
"""
if fignum is False or fignum is 0:
ax = gca()
else:
# Extract actual aspect ratio of array and make appropriately sized figure
fig = figure(fignum, figsize=figaspect(A))
ax = fig.add_axes([0.15, 0.09, 0.775, 0.775])
im = ax.matshow(A, **kw)
gci._current = im
draw_if_interactive()
return im
def polar(*args, **kwargs):
"""
call signature::
polar(theta, r, **kwargs)
Make a polar plot. Multiple *theta*, *r* arguments are supported,
with format strings, as in :func:`~matplotlib.pyplot.plot`.
"""
ax = gca(polar=True)
ret = ax.plot(*args, **kwargs)
draw_if_interactive()
return ret
def plotfile(fname, cols=(0,), plotfuncs=None,
comments='#', skiprows=0, checkrows=5, delimiter=',',
**kwargs):
"""
Plot the data in *fname*
*cols* is a sequence of column identifiers to plot. An identifier
is either an int or a string. If it is an int, it indicates the
column number. If it is a string, it indicates the column header.
matplotlib will make column headers lower case, replace spaces with
underscores, and remove all illegal characters; so ``'Adj Close*'``
will have name ``'adj_close'``.
- If len(*cols*) == 1, only that column will be plotted on the *y* axis.
- If len(*cols*) > 1, the first element will be an identifier for
data for the *x* axis and the remaining elements will be the
column indexes for multiple subplots
*plotfuncs*, if not *None*, is a dictionary mapping identifier to
an :class:`~matplotlib.axes.Axes` plotting function as a string.
Default is 'plot', other choices are 'semilogy', 'fill', 'bar',
etc. You must use the same type of identifier in the *cols*
vector as you use in the *plotfuncs* dictionary, eg., integer
column numbers in both or column names in both.
*comments*, *skiprows*, *checkrows*, and *delimiter* are all passed on to
:func:`matplotlib.pylab.csv2rec` to load the data into a record array.
kwargs are passed on to plotting functions.
Example usage::
# plot the 2nd and 4th column against the 1st in two subplots
plotfile(fname, (0,1,3))
# plot using column names; specify an alternate plot type for volume
plotfile(fname, ('date', 'volume', 'adj_close'), plotfuncs={'volume': 'semilogy'})
"""
fig = figure()
if len(cols)<1:
raise ValueError('must have at least one column of data')
if plotfuncs is None:
plotfuncs = dict()
r = mlab.csv2rec(fname, comments=comments,
skiprows=skiprows, checkrows=checkrows, delimiter=delimiter)
def getname_val(identifier):
'return the name and column data for identifier'
if is_string_like(identifier):
return identifier, r[identifier]
elif is_numlike(identifier):
name = r.dtype.names[int(identifier)]
return name, r[name]
else:
raise TypeError('identifier must be a string or integer')
xname, x = getname_val(cols[0])
if len(cols)==1:
ax1 = fig.add_subplot(1,1,1)
funcname = plotfuncs.get(cols[0], 'plot')
func = getattr(ax1, funcname)
func(x, **kwargs)
ax1.set_xlabel(xname)
else:
N = len(cols)
for i in range(1,N):
if i==1:
ax = ax1 = fig.add_subplot(N-1,1,i)
ax.grid(True)
else:
ax = fig.add_subplot(N-1,1,i, sharex=ax1)
ax.grid(True)
yname, y = getname_val(cols[i])
funcname = plotfuncs.get(cols[i], 'plot')
func = getattr(ax, funcname)
func(x, y, **kwargs)
ax.set_ylabel(yname)
if ax.is_last_row():
ax.set_xlabel(xname)
else:
ax.set_xlabel('')
if xname=='date':
fig.autofmt_xdate()
draw_if_interactive()
## Plotting part 2: autogenerated wrappers for axes methods ##
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def acorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().acorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.acorr.__doc__ is not None:
acorr.__doc__ = dedent(Axes.acorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def arrow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().arrow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.arrow.__doc__ is not None:
arrow.__doc__ = dedent(Axes.arrow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhline.__doc__ is not None:
axhline.__doc__ = dedent(Axes.axhline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhspan.__doc__ is not None:
axhspan.__doc__ = dedent(Axes.axhspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvline.__doc__ is not None:
axvline.__doc__ = dedent(Axes.axvline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvspan.__doc__ is not None:
axvspan.__doc__ = dedent(Axes.axvspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().bar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.bar.__doc__ is not None:
bar.__doc__ = dedent(Axes.bar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barh.__doc__ is not None:
barh.__doc__ = dedent(Axes.barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def broken_barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().broken_barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.broken_barh.__doc__ is not None:
broken_barh.__doc__ = dedent(Axes.broken_barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def boxplot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().boxplot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.boxplot.__doc__ is not None:
boxplot.__doc__ = dedent(Axes.boxplot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cohere(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().cohere(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.cohere.__doc__ is not None:
cohere.__doc__ = dedent(Axes.cohere.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def clabel(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().clabel(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.clabel.__doc__ is not None:
clabel.__doc__ = dedent(Axes.clabel.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contour(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contour(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contour.__doc__ is not None:
contour.__doc__ = dedent(Axes.contour.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contourf(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contourf(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contourf.__doc__ is not None:
contourf.__doc__ = dedent(Axes.contourf.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def csd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().csd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.csd.__doc__ is not None:
csd.__doc__ = dedent(Axes.csd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def errorbar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().errorbar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.errorbar.__doc__ is not None:
errorbar.__doc__ = dedent(Axes.errorbar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill.__doc__ is not None:
fill.__doc__ = dedent(Axes.fill.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill_between(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill_between(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill_between.__doc__ is not None:
fill_between.__doc__ = dedent(Axes.fill_between.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hexbin(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hexbin(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.hexbin.__doc__ is not None:
hexbin.__doc__ = dedent(Axes.hexbin.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hist(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hist(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hist.__doc__ is not None:
hist.__doc__ = dedent(Axes.hist.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hlines.__doc__ is not None:
hlines.__doc__ = dedent(Axes.hlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def imshow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().imshow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.imshow.__doc__ is not None:
imshow.__doc__ = dedent(Axes.imshow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def loglog(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().loglog(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.loglog.__doc__ is not None:
loglog.__doc__ = dedent(Axes.loglog.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolor(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolor(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolor.__doc__ is not None:
pcolor.__doc__ = dedent(Axes.pcolor.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolormesh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolormesh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolormesh.__doc__ is not None:
pcolormesh.__doc__ = dedent(Axes.pcolormesh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pie(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pie(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.pie.__doc__ is not None:
pie.__doc__ = dedent(Axes.pie.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot.__doc__ is not None:
plot.__doc__ = dedent(Axes.plot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot_date(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot_date(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot_date.__doc__ is not None:
plot_date.__doc__ = dedent(Axes.plot_date.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def psd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().psd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.psd.__doc__ is not None:
psd.__doc__ = dedent(Axes.psd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiver(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiver(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.quiver.__doc__ is not None:
quiver.__doc__ = dedent(Axes.quiver.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiverkey(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiverkey(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.quiverkey.__doc__ is not None:
quiverkey.__doc__ = dedent(Axes.quiverkey.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def scatter(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().scatter(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.scatter.__doc__ is not None:
scatter.__doc__ = dedent(Axes.scatter.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogx(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogx(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogx.__doc__ is not None:
semilogx.__doc__ = dedent(Axes.semilogx.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogy.__doc__ is not None:
semilogy.__doc__ = dedent(Axes.semilogy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def specgram(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().specgram(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret[-1]
hold(b)
return ret
if Axes.specgram.__doc__ is not None:
specgram.__doc__ = dedent(Axes.specgram.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().spy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.spy.__doc__ is not None:
spy.__doc__ = dedent(Axes.spy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def stem(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().stem(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.stem.__doc__ is not None:
stem.__doc__ = dedent(Axes.stem.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def step(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().step(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.step.__doc__ is not None:
step.__doc__ = dedent(Axes.step.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def vlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().vlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.vlines.__doc__ is not None:
vlines.__doc__ = dedent(Axes.vlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def xcorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().xcorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.xcorr.__doc__ is not None:
xcorr.__doc__ = dedent(Axes.xcorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barbs(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barbs(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barbs.__doc__ is not None:
barbs.__doc__ = dedent(Axes.barbs.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cla(*args, **kwargs):
ret = gca().cla(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.cla.__doc__ is not None:
cla.__doc__ = dedent(Axes.cla.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def grid(*args, **kwargs):
ret = gca().grid(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.grid.__doc__ is not None:
grid.__doc__ = dedent(Axes.grid.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def legend(*args, **kwargs):
ret = gca().legend(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.legend.__doc__ is not None:
legend.__doc__ = dedent(Axes.legend.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def table(*args, **kwargs):
ret = gca().table(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.table.__doc__ is not None:
table.__doc__ = dedent(Axes.table.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def text(*args, **kwargs):
ret = gca().text(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.text.__doc__ is not None:
text.__doc__ = dedent(Axes.text.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def annotate(*args, **kwargs):
ret = gca().annotate(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.annotate.__doc__ is not None:
annotate.__doc__ = dedent(Axes.annotate.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def autumn():
'''
set the default colormap to autumn and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='autumn')
im = gci()
if im is not None:
im.set_cmap(cm.autumn)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bone():
'''
set the default colormap to bone and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='bone')
im = gci()
if im is not None:
im.set_cmap(cm.bone)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cool():
'''
set the default colormap to cool and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='cool')
im = gci()
if im is not None:
im.set_cmap(cm.cool)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def copper():
'''
set the default colormap to copper and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='copper')
im = gci()
if im is not None:
im.set_cmap(cm.copper)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def flag():
'''
set the default colormap to flag and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='flag')
im = gci()
if im is not None:
im.set_cmap(cm.flag)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def gray():
'''
set the default colormap to gray and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='gray')
im = gci()
if im is not None:
im.set_cmap(cm.gray)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hot():
'''
set the default colormap to hot and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hot')
im = gci()
if im is not None:
im.set_cmap(cm.hot)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hsv():
'''
set the default colormap to hsv and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hsv')
im = gci()
if im is not None:
im.set_cmap(cm.hsv)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def jet():
'''
set the default colormap to jet and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='jet')
im = gci()
if im is not None:
im.set_cmap(cm.jet)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pink():
'''
set the default colormap to pink and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='pink')
im = gci()
if im is not None:
im.set_cmap(cm.pink)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def prism():
'''
set the default colormap to prism and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='prism')
im = gci()
if im is not None:
im.set_cmap(cm.prism)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spring():
'''
set the default colormap to spring and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spring')
im = gci()
if im is not None:
im.set_cmap(cm.spring)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def summer():
'''
set the default colormap to summer and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='summer')
im = gci()
if im is not None:
im.set_cmap(cm.summer)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def winter():
'''
set the default colormap to winter and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='winter')
im = gci()
if im is not None:
im.set_cmap(cm.winter)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spectral():
'''
set the default colormap to spectral and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spectral')
im = gci()
if im is not None:
im.set_cmap(cm.spectral)
draw_if_interactive()
| agpl-3.0 |
themrmax/scikit-learn | sklearn/metrics/tests/test_ranking.py | 2 | 42060 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import ndcg_score
from sklearn.exceptions import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
expected_auc = _auc(y_true, probas_pred)
for drop in [True, False]:
fpr, tpr, thresholds = roc_curve(y_true, probas_pred,
drop_intermediate=drop)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no positive sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no negative sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_roc_curve_drop_intermediate():
# Test that drop_intermediate drops the correct thresholds
y_true = [0, 0, 0, 0, 1, 1]
y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds, [1., 0.7, 0.])
# Test dropping thresholds with repeating scores
y_true = [0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1]
y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9,
0.6, 0.7, 0.8, 0.9, 0.9, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds,
[1.0, 0.9, 0.7, 0.6, 0.])
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
# This test was expanded (added scaled_down) in response to github
# issue #3864 (and others), where overly aggressive rounding was causing
# problems for users with very small y_score values
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled_up = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_scaled_down = roc_auc_score(y_true, 1e-6 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled_up)
assert_equal(roc_auc, roc_auc_scaled_down)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled_up = average_precision_score(y_true, 100 * probas_pred)
pr_auc_scaled_down = average_precision_score(y_true, 1e-6 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled_up)
assert_equal(pr_auc, pr_auc_scaled_down)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def test_ndcg_score():
# Check perfect ranking
y_true = [1, 0, 2]
y_score = [
[0.15, 0.55, 0.2],
[0.7, 0.2, 0.1],
[0.06, 0.04, 0.9]
]
perfect = ndcg_score(y_true, y_score)
assert_equal(perfect, 1.0)
# Check bad ranking with a small K
y_true = [0, 2, 1]
y_score = [
[0.15, 0.55, 0.2],
[0.7, 0.2, 0.1],
[0.06, 0.04, 0.9]
]
short_k = ndcg_score(y_true, y_score, k=1)
assert_equal(short_k, 0.0)
# Check a random scoring
y_true = [2, 1, 0]
y_score = [
[0.15, 0.55, 0.2],
[0.7, 0.2, 0.1],
[0.06, 0.04, 0.9]
]
average_ranking = ndcg_score(y_true, y_score, k=2)
assert_almost_equal(average_ranking, 0.63092975)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
weissercn/learningml | learningml/GoF/analysis/event_shapes_lower_level/plot_event_shapes_lower_level_alphaSvalue_analysis.py | 1 | 10670 | from __future__ import print_function
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
import time
# Options for mode 'lower_level'
MODE = 'lower_level'
label_size = 28
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
mpl.rc('font', family='serif', size=34, serif="Times New Roman")
#mpl.rcParams['text.usetex'] = True
#mpl.rcParams['text.latex.preamble'] = [r'\boldmath']
mpl.rcParams['legend.fontsize'] = "medium"
mpl.rc('savefig', format ="pdf")
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
mpl.rcParams['figure.figsize'] = 8, 6
mpl.rcParams['lines.linewidth'] = 3
def binomial_error(l1):
err_list = []
for item in l1:
if item==1. or item==0.: err_list.append(np.sqrt(100./101.*(1.-100./101.)/101.))
else: err_list.append(np.sqrt(item*(1.-item)/100.))
return err_list
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
# E V E N T S H A P E S - L O W E R L E V E L
################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
if MODE == 'lower_level':
#dimensions = [2,3,4,5,6,7,8,9,10]
#dimensions = [1,2,3,4,5]
#param_list = [0.130,0.132,0.133,0.134,0.135,0.1365,0.14]
param_list = [0.130, 0.132,0.133,0.134,0.1345,0.135,0.1355,0.136,0.137,0.1375,0.138,0.139,0.14]
ml_classifiers = ['nn','bdt','xgb','svm']
ml_classifiers_colors = ['green','magenta','cyan']
ml_classifiers_bin = 5
chi2_color = 'red'
chi2_splits = [1,2,3,4,5,6,7,8,9,10]
#chi2_splits = [8]
ml_folder_name = "automatisation_monash_alphaSvalue_lower_level/evaluation_monash_lower_level_2files_attempt4"
chi2_folder_name = "event_shapes_lower_level"
#chi2_folder_name = "event_shapes_lower_level_without_Mult"
ml_file_name = "{1}_monash_{0}_alphaSvalue_lower_level_syst_0_01__p_values"
chi2_file_name = "event_shapes_lower_level_syst_0_01_attempt4__{0}D_chi2_{1}_splits_p_values"
#chi2_file_name = "event_shapes_lower_level_syst_0_01_attempt4_without_Mult__{0}D_chi2_{1}_splits_p_values"
chi2_thrust_folder_name = "event_shapes_thrust"
chi2_thrust_file_name = "event_shapes_thrust_syst_0_01_attempt4__{0}D_chi2_{1}_splits_p_values"
title = "Event shapes lower level"
name = "event_shapes_lower_level"
CL = 0.95
ml_classifiers_dict={}
chi2_splits_dict={}
chi2_thrust_splits_dict={}
#xwidth = [0.5]*len(param_list)
xwidth = np.subtract(param_list[1:],param_list[:-1])/2.
xwidth_left = np.append(xwidth[0] , xwidth)
xwidth_right = np.append(xwidth,xwidth[-1])
print("xwidth : ", xwidth)
fig = plt.figure()
ax = fig.add_axes([0.2,0.15,0.75,0.8])
if True:
for ml_classifier_index, ml_classifier in enumerate(ml_classifiers):
ml_classifiers_dict[ml_classifier]= []
for param in param_list:
p_values = np.loadtxt(os.environ['learningml']+"/GoF/optimisation_and_evaluation/"+ml_folder_name+"/"+ml_classifier+"/"+ml_file_name.format(param,ml_classifier,ml_classifiers_bin)).tolist()
p_values_in_CL = sum(i < (1-CL) for i in p_values)
ml_classifiers_dict[ml_classifier].append(p_values_in_CL)
ml_classifiers_dict[ml_classifier]= np.divide(ml_classifiers_dict[ml_classifier],100.)
ax.errorbar(param_list,ml_classifiers_dict['nn'], yerr=binomial_error(ml_classifiers_dict['nn']), linestyle='-', marker='s', markeredgewidth=0.0, markersize=12, color=ml_classifiers_colors[0], label=r'$ANN$',clip_on=False)
print("bdt : ", ml_classifiers_dict['bdt'])
print("xgb : ", ml_classifiers_dict['xgb'])
ml_classifiers_dict['BDT_best']= [max(item1,item2) for item1, item2 in zip(ml_classifiers_dict['bdt'],ml_classifiers_dict['xgb'])]
print("BDT : ", ml_classifiers_dict['BDT_best'])
ax.errorbar(param_list,ml_classifiers_dict['BDT_best'], yerr=binomial_error(ml_classifiers_dict['BDT_best']), linestyle='-', marker='o', markeredgewidth=0.0, markersize=12, color=ml_classifiers_colors[1], label=r'$BDT$', clip_on=False)
ax.errorbar(param_list,ml_classifiers_dict['svm'], yerr=binomial_error(ml_classifiers_dict['svm']), linestyle='-', marker='^', markeredgewidth=0.0, markersize=12, color=ml_classifiers_colors[2], label=r'$SVM$', clip_on=False)
for chi2_split_index, chi2_split in enumerate(chi2_splits):
chi2_splits_dict[str(chi2_split)]=[]
chi2_best = []
for param in param_list:
chi2_best_dim = []
for chi2_split_index, chi2_split in enumerate(chi2_splits):
p_values = np.loadtxt(os.environ['learningml']+"/GoF/chi2/"+chi2_folder_name+"/"+chi2_file_name.format(param,chi2_split)).tolist()
p_values_in_CL = sum(i < (1-CL) for i in p_values)
temp = float(p_values_in_CL) /100.
chi2_splits_dict[str(chi2_split)].append(temp)
chi2_best_dim.append(temp)
temp_best = np.max(chi2_best_dim)
#print(str(dim)+"D chi2_best_dim : ", chi2_best_dim)
#print(str(dim)+"D temp_best : ",np.max(temp_best))
chi2_best.append(temp_best)
#print("chi2_best : ",chi2_best)
for chi2_thrust_split_index, chi2_thrust_split in enumerate(chi2_splits):
chi2_thrust_splits_dict[str(chi2_thrust_split)]=[]
chi2_thrust_best = []
for param in param_list:
chi2_thrust_best_dim = []
for chi2_thrust_split_index, chi2_thrust_split in enumerate(chi2_splits):
p_values = np.loadtxt(os.environ['learningml']+"/GoF/chi2/"+chi2_thrust_folder_name+"/"+chi2_thrust_file_name.format(param,chi2_thrust_split)).tolist()
p_values_in_CL = sum(i < (1-CL) for i in p_values)
temp = float(p_values_in_CL) /100.
chi2_thrust_splits_dict[str(chi2_thrust_split)].append(temp)
chi2_thrust_best_dim.append(temp)
temp_best = np.max(chi2_thrust_best_dim)
#print(str(dim)+"D chi2_thrust_best_dim : ", chi2_thrust_best_dim)
#print(str(dim)+"D temp_best : ",np.max(temp_best))
chi2_thrust_best.append(temp_best)
#print("chi2_thrust_best : ",chi2_thrust_best)
print("param_list : ",param_list)
print("chi2_best : ", chi2_best)
print("chi2_splits_dict : ", chi2_splits_dict)
ax.errorbar(param_list,chi2_best, yerr=binomial_error(chi2_best), linestyle='--', marker='$\chi$', markeredgecolor='none', markersize=18, color='black', label=r'$\chi^2$', clip_on=False)
ax.errorbar(param_list,chi2_thrust_best, yerr=binomial_error(chi2_thrust_best), linestyle='--', marker='$\chi$', markeredgecolor='none', markersize=18, color='grey', label=r'$\chi^2 Thrust$', clip_on=False)
print("ml_classifiers_dict : ",ml_classifiers_dict)
print("chi2_best : ", chi2_best)
ax.plot((0.1365,0.1365),(0.,1.),c="grey",linestyle="--")
ax.set_xlim([0.129,0.1405])
ax.set_ylim([0.,1.])
ax.set_xlabel(r"$\alpha_{S}$")
ax.set_ylabel("Fraction rejected")
a, b, c = [0.130,0.133], [0.1365],[0.14]
ax.set_xticks(a+b+c)
xx, locs = plt.xticks()
ll = ['%.3f' % y for y in a] + ['%.4f' % y for y in b] + ['%.3f' % y for y in c]
plt.xticks(xx, ll)
#ax.legend(loc='lower left', frameon=False, numpoints=1)
fig_leg = plt.figure(figsize=(8,2.7))
ax_leg = fig_leg.add_axes([0.0,0.0,1.0,1.0])
plt.tick_params(axis='x',which='both',bottom='off', top='off', labelbottom='off')
plt.tick_params(axis='y',which='both',bottom='off', top='off', labelbottom='off')
ax_leg.yaxis.set_ticks_position('none')
ax_leg.set_frame_on(False)
plt.figlegend(*ax.get_legend_handles_labels(), loc = 'upper left',frameon=False, numpoints=1,ncol=2)
fig_leg.savefig("event_shapes_lower_level_analysis_legend.pdf")
#fig_name=name+"_alphaSvalue_analysis"
fig_name="event_shapes_lower_level_analysis"
fig.savefig(fig_name+".pdf")
fig.savefig(fig_name+"_"+time.strftime("%b_%d_%Y")+".pdf")
print("Saved the figure as" , fig_name+".pdf")
| mit |
azjps/bokeh | bokeh/charts/__init__.py | 6 | 1468 | from __future__ import absolute_import
from ..util.dependencies import import_required
import_required(
'pandas',
'The bokeh.charts interface requires Pandas (http://pandas.pydata.org) to be installed.'
)
# defaults and constants
from ..plotting.helpers import DEFAULT_PALETTE
# main components
from .chart import Chart, defaults
# operations and attributes for users to input into Charts
from .attributes import color, marker, cat
from .operations import stack, blend
from .stats import bins
# builders
from .builders.line_builder import Line
from .builders.histogram_builder import Histogram
from .builders.bar_builder import Bar
from .builders.scatter_builder import Scatter
from .builders.boxplot_builder import BoxPlot
from .builders.step_builder import Step
from .builders.timeseries_builder import TimeSeries
from .builders.dot_builder import Dot
from .builders.area_builder import Area
from .builders.horizon_builder import Horizon
from .builders.heatmap_builder import HeatMap
from .builders.donut_builder import Donut
from .builders.chord_builder import Chord
# easy access to required bokeh components
from ..models import ColumnDataSource
from ..io import (
curdoc, output_file, output_notebook, output_server, push,
reset_output, save, show, gridplot, vplot, hplot)
# Silence pyflakes
(curdoc, output_file, output_notebook, output_server, push,
reset_output, save, show, gridplot, vplot, hplot, ColumnDataSource,
DEFAULT_PALETTE)
| bsd-3-clause |
gizela/gizela | gizela/pyplot/FigureLayoutBase.py | 1 | 19848 | # gizela
#
# Copyright (C) 2010 Michal Seidl, Tomas Kubin
# Author: Tomas Kubin <[email protected]>
# URL: <http://slon.fsv.cvut.cz/gizela>
#
# $Id: FigureLayoutBase.py 117 2011-01-05 23:28:15Z tomaskubin $
import matplotlib
import gizela
from gizela.util.Error import Error
from gizela.pyplot.PlotPoint import PlotPoint
from gizela.pyplot.BackendSingleton import BackendSingleton
#import math
class FigureLayoutBaseError(Error): pass
class FigureLayoutBase(object):
'''
object with matplotlib Figure instance
base class for other Layouts
just figure with one axes rectangle
work with: axes orientation
axes scale
save figure as image
plot points xy and x, y, or z coordinate separately
plot standard deviation along vertical axis
plot error ellipses
'''
# matplotlib backend
#backend = None # backend - just the first instance can set backend
#@classmethod
#def set_backend(cls, backend):
# if cls.backend == None:
# try:
# matplotlib.use(backend)
# cls.backend = backend
# except:
# raise FigureLayoutBaseError, "Backend set error"
# else:
# if backend != cls.backend:
# raise FigureLayoutBaseError, "Different backend can not set"
def __init__(self,
axesOri="en",
figScale=None,
configFileName=None):
"""
axesOri: orientation of axes ne, en, sw, ws, ...
figScale: scale of data in axes
configFileName ... name of configuration file
"""
# sets self.config dictionary
self.parse_config_file(name=configFileName)
# first of all set backend
# call the BackendSingleton class
if "backend" in self.config and "name" in self.config["backend"]:
self.backend = BackendSingleton(self.config["backend"]["name"])
else:
self.backend = BackendSingleton("GTK") # implicit backend
# set figure
import matplotlib.pyplot
self.figure = matplotlib.pyplot.figure() #figure instance
# set size of figure
if "figure" in self.config and "size" in self.config["figure"]:
self.figSize = [float(s)
for s in self.config["figure"]["size"].split(",")]
else:
# default value A4
self.figSize = [297, 210]
# figure size in milimeters
self.figWidth = self.figSize[0]
self.figHeight = self.figSize[1]
sizei = [i / 25.4 for i in self.figSize]
self.figure.set_size_inches(sizei, forward=True)
# forward figure size to window size
# works just for GTK* and WX* backends
# compute sizes
if "figure" in self.config and "border" in self.config["figure"]:
self.border = self.config["figure"]["border"]
else: self.border = 5 # implicit value
if "axes" in self.config and "tickLabelSpace" in self.config["axes"]:
tickSpace = [float(i)
for i in self.config["axes"]["tickLabelSpace"].split(",")]
else: tickSpace = [7.0, 10.0] # implicit value
self.tickSpaceX = tickSpace[0]
self.tickSpaceY = tickSpace[1]
# position of axes in 0-1
self.posAxes = [self.border/self.figWidth,
self.border/self.figHeight,
1 - (2*self.border + self.tickSpaceY)/self.figWidth,
1 - (2*self.border + self.tickSpaceX)/self.figHeight
]
# offset for posAxes
self.posTickSpace = [self.tickSpaceY/self.figWidth,
self.tickSpaceX/self.figHeight,
0, 0]
# set axes
self.figure.add_axes(self.posAxes)
self.set_axes(axesOri)
# set adjustable and autoscale
self.gca().set_adjustable("datalim")
#self.gca().set_autoscale_on(False)
# set tick formatter
import matplotlib.ticker as ticker
formatter = ticker.ScalarFormatter(useOffset=False)
formatter.set_powerlimits((-4,10))
self.gca().xaxis.set_major_formatter(formatter)
self.gca().yaxis.set_major_formatter(formatter)
# scale
self.figScale = None
if figScale is not None:
self.set_scale_ratio(figScale)
self.figScale = figScale
# sets logo
self.logo = self.figure.text(1 - self.border/2/self.figWidth,
self.border/2/self.figHeight,
">--Gizela-%s-->" % gizela.__version__,
fontsize=6,
verticalalignment="center",
horizontalalignment="right")
#transform=self.figure.tranFigure)
# set scale bar
#if "scaleBar" in self.config and figScale is not None:
# from matplotlib.patches import Rectangle
# if self.config["scaleBar"]["visible"] == "on":
# # plot scale bar
# width = self.config["scaleBar"]["width"]/100
# height = self.config["scaleBar"]["height"]/100
# offset = self.config["scaleBar"]["offset"]/100
# width_m = width * self.figWidth / figScale
# #: width of bar in meters in real
# exp = 10**math.round(math.log10(width_m))
# widthi_m = width_m/4.0
# xy = [1 - width - offset, offset]
# widthi = width/4
# facecolor="white"
# trnax = self.gca().transAxes
# for i in xrange(4):
# self.gca().add_patch(Rectangle(xy=xy,
# width=widthi,
# height=height,
# transform=trnax,
# facecolor=facecolor))
# xy[0] += widthi
# if facecolor is "white":
# facecolor="black"
# else:
# facecolor="white"
def update_(self, axesOri=None, figScale=None):
"updates properties of figure"
if axesOri is not None:
self.set_axes(axesOri=axesOri)
if figScale is not None:
self.set_scale_ratio(figScale)
def set_axes(self, axesOri="en", xLabel="X", yLabel="Y"):
"""
axesXY: orientation of axes: ne, nw, se, sw
en, wn, es, ws
sets direction and position of ticks and its properties
sets _swapXY for drawing
sets position of axes object and posAxes attribute
"""
#import sys
#print >>sys.stderr, "set_axes", axesOri
ax = self.gca()
self._axesOri = axesOri
if axesOri == "ne" or axesOri == "en":
self.posTickSpace[0] = self.tickSpaceY/self.figWidth
self.posTickSpace[1] = self.tickSpaceX/self.figHeight
elif axesOri == "sw" or axesOri == "ws":
# direction of axes
if not ax.xaxis_inverted():
ax.invert_xaxis()
if not ax.yaxis_inverted():
ax.invert_yaxis()
# ticks position
for tick in ax.xaxis.get_major_ticks():
tick.label1On = False
tick.label2On = True
for tick in ax.yaxis.get_major_ticks():
tick.label1On = False
tick.label2On = True
# position of axes
self.posTickSpace[0] = 0
self.posTickSpace[1] = 0
elif axesOri == "es" or axesOri == "se":
# direction of axes
if not ax.yaxis_inverted():
ax.invert_yaxis()
# ticks position
for tick in ax.xaxis.get_major_ticks():
tick.label1On = False
tick.label2On = True
# position of axes
self.posTickSpace[0] = self.tickSpaceY/self.figWidth
self.posTickSpace[1] = 0
elif axesOri == "wn" or axesOri == "nw":
# direction of axes
if not ax.xaxis_inverted():
ax.invert_xaxis()
# ticks position
for tick in ax.yaxis.get_major_ticks():
tick.label1On = False
tick.label2On = True
# position of axes
self.posTickSpace[0] = 0
self.posTickSpace[1] = self.tickSpaceX/self.figHeight
else:
raise FigureLayoutBaseError, "Unknown axes orientation %s" % axesOri
# set axes position
self._set_axes_position()
# set ticks label properties
for l in ax.xaxis.get_ticklabels():
if "axes" in self.config and "tickFontSize" in self.config["axes"]:
l.set_fontsize(self.config["axes"]["tickFontSize"])
else:
l.set_fontsize(6)
for l in ax.yaxis.get_ticklabels():
if "axes" in self.config and "tickFontSize" in self.config["axes"]:
l.set_fontsize(self.config["axes"]["tickFontSize"])
else:
l.set_fontsize(6)
# set swapXY
if axesOri == "ne" or axesOri == "nw" \
or axesOri == "se" or axesOri == "sw":
self._swapXY = True
else:
self._swapXY = False
#sets label of x-axis
if axesOri=="en" or axesOri=="wn" or axesOri=="es" or axesOri=="ws":
ax.xaxis.set_label_text(xLabel)
if axesOri=="es" or axesOri=="ws":
ax.xaxis.set_label_position("top")
else:
ax.xaxis.set_label_position("bottom")
else:
ax.yaxis.set_label_text(xLabel)
if axesOri=="se" or axesOri=="ne":
ax.yaxis.set_label_position("left")
else:
ax.yaxis.set_label_position("right")
#sets label of y axis
if axesOri=="ne" or axesOri=="nw" or axesOri=="se" or axesOri=="sw":
ax.xaxis.set_label_text(yLabel)
if axesOri=="se" or axesOri=="sw":
ax.xaxis.set_label_position("top")
else:
ax.xaxis.set_label_position("bottom")
else:
ax.yaxis.set_label_text(yLabel)
if axesOri=="es" or axesOri=="en":
ax.yaxis.set_label_position("left")
else:
ax.yaxis.set_label_position("right")
def _set_axes_position(self):
self.gca().set_position([i+j for i, j in zip(self.posAxes,
self.posTickSpace)])
def get_axes_ori(self): return self._axesOri
def gca(self):
"returns current axes"
return self.figure.gca()
def plot_xy(self, x, y):
"plots data to axes with respect to axes orientation"
if type(x) != list and type(x) != tuple:
x = [x]
if type(y) != list and type(y) != tuple:
y = [y]
if self._swapXY:
return self.gca().plot(y, x)
else:
return self.gca().plot(x, y)
def set_aspect_equal(self):
"sets equal aspect ratio for axes"
self.gca().set_aspect("equal")
def is_swap_xy(self):
return self._swapXY
def get_scale_ratio(self):
"""
returns scale ratio for x and y axis
supposed that data are in meters
"""
xmin, xmax = self.gca().get_xbound()
ymin, ymax = self.gca().get_ybound()
return (self.posAxes[2]*self.figWidth/1000)/(xmax - xmin),\
(self.posAxes[3]*self.figHeight/1000)/(ymax - ymin)
def get_scale_ratio_x(self):
return self.get_scale_ratio()[0]
def get_scale_ratio_y(self):
return self.get_scale_ratio()[1]
def set_scale_ratio(self, ratio):
"set scale ration of both x and y axes"
self.set_scale_ratio_x(ratio)
self.set_scale_ratio_y(ratio)
def set_scale_ratio_x(self, ratio):
"""
sets scale ratio of x axis
manipulating xlim properties of axes object
"""
dx_ = self.posAxes[2]*self.figWidth/1000/ratio
xmin, xmax = self.gca().get_xbound()
dx = xmax - xmin
ddx = dx_ - dx
xmin, xmax = xmin - ddx/2, xmax + ddx/2
self.gca().set_xbound(xmin, xmax)
def set_scale_ratio_y(self, ratio):
"""
sets scale ratio of y axis
manipulating ylim properties of axes object
"""
dy_ = self.posAxes[3]*self.figHeight/1000/ratio
ymin, ymax = self.gca().get_ybound()
dy = ymax - ymin
ddy = dy_ - dy
ymin, ymax = ymin - ddy/2, ymax + ddy/2
self.gca().set_ybound(ymin, ymax)
@staticmethod
def get_scale_ratio_string(ratio):
if ratio > 1.0:
if round(ratio) - ratio > 1e-5:
return "%.5f : 1" % ratio
else:
return "%.0f : 1" % ratio
else:
ratio = 1.0/ratio
if round(ratio) - ratio > 1e-5:
return "1 : %.5f" % ratio
else:
return "1 : %.0f" % ratio
def get_scale_ratio_string_min(self):
"returns string with min scale ratio"
return self.get_scale_ratio_string(min(self.get_scale_ratio()))
def get_scale_ratio_string_y(self):
"returns scale ratio of y axis - vertical axis"
return self.get_scale_ratio_string(self.get_scale_ratio()[1])
def show_(self, mainloop=True):
"""
show figure
"""
if self.figScale is not None:
self.set_scale_ratio(self.figScale)
import matplotlib.pyplot
matplotlib.pyplot.show(mainloop)
def set_free_space(self, border=10, equal=False):
"""
border: white space around drawing in percents
equal: equal border for x and y direction?
"""
xmin, xmax = self.gca().get_xlim()
ymin, ymax = self.gca().get_ylim()
dx = xmax - xmin
dy = ymax - ymin
dxp = dx * border/100
dyp = dy * border/100
if equal:
dxyp = (dxp + dyp)/2 # mean value
dxp = dxyp
dyp = dxyp
self.gca().set_xlim((xmin - dxp, xmax + dxp))
self.gca().set_ylim((ymin - dyp, ymax + dyp))
def save_as(self, fileName="figure"):
"saves figure as image"
if self.figScale is not None:
self.set_scale_ratio(self.figScale)
dpi = self.config["figure"]["dpi"]
# set image size
sizem = self.config["figure"]["size"]
sizei = [float(i) / 25.4 for i in sizem.split(",")]
self.figure.set_size_inches(sizei)
import sys
print >>sys.stderr, "Figure name:", fileName,\
"size (mm):", sizem, "DPI:", dpi
#self.figure.set_dpi(dpi)
self.figure.savefig(fileName, dpi=dpi)
def parse_config_file(self, name):
"parser for configuration file"
import ConfigParser, os, sys
configParser = ConfigParser.SafeConfigParser()
configParser.optionxform = str # to make options case sensitive
defaults = os.path.sep.join(["gizela", "pyplot", "default.cfg"])
path = [p + os.path.sep + defaults for p in sys.path]
if name is not None:
path.extend([p + os.path.sep + name for p in sys.path])
path.append(name)
readed = configParser.read(path)
#os.path.expanduser("~/" + name),
#"./" + name])
print >>sys.stderr, \
"Figure configuration file(s) readed: %s" % ", ".join(readed)
self.config = {}
for sec in configParser.sections():
self.config[sec] = {}
for p,v in configParser.items(sec):
try:
v=float(v)
except:
pass
self.config[sec][p] = v
def get_config_section(self, section):
"returns configuration section items in dictionary"
return self.config[section]
def set_style(self, style, artist):
"""
sets properties of artist according to
configuration file.
styleType: the name of section in config file or
dictionary with properties
artist: instance of graphic object (line, text, ...)
"""
if type(style) is str:
style = self.get_config_section(style)
for p, v in style.items():
fun = getattr(artist, "set_" + p)
fun(v)
def get_style_dict(self, style):
"returns style dictionary of properties"
return self.get_config_section(style)
def get_label_tran(self):
"return transformation for text labels"
from matplotlib.transforms import offset_copy
offset = self.get_config_section("pointLabelOffset")
return offset_copy(self.gca().transData, self.figure,
offset["x"], offset["y"],
units="points")
def plot_point_dot(self, point):
PlotPoint.plot_point_dot(self, point, style="pointDotStyle")
def plot_point_fix_dot(self, point):
PlotPoint.plot_point_dot(self, point, style="pointFixDotStyle")
def plot_point_con_dot(self, point):
PlotPoint.plot_point_dot(self, point, style="pointConDotStyle")
def plot_point_adj_dot(self, point):
PlotPoint.plot_point_dot(self, point, style="pointAdjDotStyle")
def plot_point_label(self, point):
PlotPoint.plot_point_label(self, point, style="pointLabelStyle")
def plot_point_fix_label(self, point):
PlotPoint.plot_point_label(self, point, style="pointFixLabelStyle")
def plot_point_con_label(self, point):
PlotPoint.plot_point_label(self, point, style="pointConLabelStyle")
def plot_point_adj_label(self, point):
PlotPoint.plot_point_label(self, point, style="pointAdjLabelStyle")
def plot_point_x(self, point, x):
PlotPoint.plot_point_x(self, point, x, style="pointDotStyle")
def plot_point_y(self, point, x):
PlotPoint.plot_point_y(self, point, x, style="pointDotStyle")
def plot_point_z(self, point, x):
PlotPoint.plot_point_z(self, point, x, style="pointDotStyle")
#def plot_point_error_ellipse(self, point): pass
#def plot_point_x_stdev(self, point, x): pass
#def plot_point_y_stdev(self, point, x): pass
#def plot_point_z_stdev(self, point, x): pass
#def plot_point_error_z(self, point): pass
def plot_scale_bar(self):pass
if __name__ == "__main__":
fig = FigureLayoutBase(figScale=1e-5)
fig.set_axes("sw")
fig.plot_xy([1e3, 1.001e3, 1.001e3, 1e3, 1e3],
[0.5e3, 0.5e3, 0.501e3, 0.501e3, 0.5e3])
scalex, scaley = fig.get_scale_ratio()
print 1/scalex, 1/scaley
fig.set_aspect_equal()
print 1/scalex, 1/scaley
fig.show_()
#fig.save_as()
| gpl-3.0 |
behzadnouri/scipy | scipy/integrate/_bvp.py | 61 | 39966 | """Boundary value problem solver."""
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy.linalg import norm, pinv
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse.linalg import splu
from scipy.optimize import OptimizeResult
EPS = np.finfo(float).eps
def estimate_fun_jac(fun, x, y, p, f0=None):
"""Estimate derivatives of an ODE system rhs with forward differences.
Returns
-------
df_dy : ndarray, shape (n, n, m)
Derivatives with respect to y. An element (i, j, q) corresponds to
d f_i(x_q, y_q) / d (y_q)_j.
df_dp : ndarray with shape (n, k, m) or None
Derivatives with respect to p. An element (i, j, q) corresponds to
d f_i(x_q, y_q, p) / d p_j. If `p` is empty, None is returned.
"""
n, m = y.shape
if f0 is None:
f0 = fun(x, y, p)
dtype = y.dtype
df_dy = np.empty((n, n, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(y))
for i in range(n):
y_new = y.copy()
y_new[i] += h[i]
hi = y_new[i] - y[i]
f_new = fun(x, y_new, p)
df_dy[:, i, :] = (f_new - f0) / hi
k = p.shape[0]
if k == 0:
df_dp = None
else:
df_dp = np.empty((n, k, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(p))
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
f_new = fun(x, y, p_new)
df_dp[:, i, :] = (f_new - f0) / hi
return df_dy, df_dp
def estimate_bc_jac(bc, ya, yb, p, bc0=None):
"""Estimate derivatives of boundary conditions with forward differences.
Returns
-------
dbc_dya : ndarray, shape (n + k, n)
Derivatives with respect to ya. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dyb : ndarray, shape (n + k, n)
Derivatives with respect to yb. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dp : ndarray with shape (n + k, k) or None
Derivatives with respect to p. An element (i, j) corresponds to
d bc_i / d p_j. If `p` is empty, None is returned.
"""
n = ya.shape[0]
k = p.shape[0]
if bc0 is None:
bc0 = bc(ya, yb, p)
dtype = ya.dtype
dbc_dya = np.empty((n, n + k), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(ya))
for i in range(n):
ya_new = ya.copy()
ya_new[i] += h[i]
hi = ya_new[i] - ya[i]
bc_new = bc(ya_new, yb, p)
dbc_dya[i] = (bc_new - bc0) / hi
dbc_dya = dbc_dya.T
h = EPS**0.5 * (1 + np.abs(yb))
dbc_dyb = np.empty((n, n + k), dtype=dtype)
for i in range(n):
yb_new = yb.copy()
yb_new[i] += h[i]
hi = yb_new[i] - yb[i]
bc_new = bc(ya, yb_new, p)
dbc_dyb[i] = (bc_new - bc0) / hi
dbc_dyb = dbc_dyb.T
if k == 0:
dbc_dp = None
else:
h = EPS**0.5 * (1 + np.abs(p))
dbc_dp = np.empty((k, n + k), dtype=dtype)
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
bc_new = bc(ya, yb, p_new)
dbc_dp[i] = (bc_new - bc0) / hi
dbc_dp = dbc_dp.T
return dbc_dya, dbc_dyb, dbc_dp
def compute_jac_indices(n, m, k):
"""Compute indices for the collocation system Jacobian construction.
See `construct_global_jac` for the explanation.
"""
i_col = np.repeat(np.arange((m - 1) * n), n)
j_col = (np.tile(np.arange(n), n * (m - 1)) +
np.repeat(np.arange(m - 1) * n, n**2))
i_bc = np.repeat(np.arange((m - 1) * n, m * n + k), n)
j_bc = np.tile(np.arange(n), n + k)
i_p_col = np.repeat(np.arange((m - 1) * n), k)
j_p_col = np.tile(np.arange(m * n, m * n + k), (m - 1) * n)
i_p_bc = np.repeat(np.arange((m - 1) * n, m * n + k), k)
j_p_bc = np.tile(np.arange(m * n, m * n + k), n + k)
i = np.hstack((i_col, i_col, i_bc, i_bc, i_p_col, i_p_bc))
j = np.hstack((j_col, j_col + n,
j_bc, j_bc + (m - 1) * n,
j_p_col, j_p_bc))
return i, j
def stacked_matmul(a, b):
"""Stacked matrix multiply: out[i,:,:] = np.dot(a[i,:,:], b[i,:,:]).
In our case a[i, :, :] and b[i, :, :] are always square.
"""
# Empirical optimization. Use outer Python loop and BLAS for large
# matrices, otherwise use a single einsum call.
if a.shape[1] > 50:
out = np.empty_like(a)
for i in range(a.shape[0]):
out[i] = np.dot(a[i], b[i])
return out
else:
return np.einsum('...ij,...jk->...ik', a, b)
def construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, df_dp,
df_dp_middle, dbc_dya, dbc_dyb, dbc_dp):
"""Construct the Jacobian of the collocation system.
There are n * m + k functions: m - 1 collocations residuals, each
containing n components, followed by n + k boundary condition residuals.
There are n * m + k variables: m vectors of y, each containing n
components, followed by k values of vector p.
For example, let m = 4, n = 2 and k = 1, then the Jacobian will have
the following sparsity structure:
1 1 2 2 0 0 0 0 5
1 1 2 2 0 0 0 0 5
0 0 1 1 2 2 0 0 5
0 0 1 1 2 2 0 0 5
0 0 0 0 1 1 2 2 5
0 0 0 0 1 1 2 2 5
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
Zeros denote identically zero values, other values denote different kinds
of blocks in the matrix (see below). The blank row indicates the separation
of collocation residuals from boundary conditions. And the blank column
indicates the separation of y values from p values.
Refer to [1]_ (p. 306) for the formula of n x n blocks for derivatives
of collocation residuals with respect to y.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
k : int
Number of the unknown parameters.
i_jac, j_jac : ndarray
Row and column indices returned by `compute_jac_indices`. They
represent different blocks in the Jacobian matrix in the following
order (see the scheme above):
* 1: m - 1 diagonal n x n blocks for the collocation residuals.
* 2: m - 1 off-diagonal n x n blocks for the collocation residuals.
* 3 : (n + k) x n block for the dependency of the boundary
conditions on ya.
* 4: (n + k) x n block for the dependency of the boundary
conditions on yb.
* 5: (m - 1) * n x k block for the dependency of the collocation
residuals on p.
* 6: (n + k) x k block for the dependency of the boundary
conditions on p.
df_dy : ndarray, shape (n, n, m)
Jacobian of f with respect to y computed at the mesh nodes.
df_dy_middle : ndarray, shape (n, n, m - 1)
Jacobian of f with respect to y computed at the middle between the
mesh nodes.
df_dp : ndarray with shape (n, k, m) or None
Jacobian of f with respect to p computed at the mesh nodes.
df_dp_middle: ndarray with shape (n, k, m - 1) or None
Jacobian of f with respect to p computed at the middle between the
mesh nodes.
dbc_dya, dbc_dyb : ndarray, shape (n, n)
Jacobian of bc with respect to ya and yb.
dbc_dp: ndarray with shape (n, k) or None
Jacobian of bc with respect to p.
Returns
-------
J : csc_matrix, shape (n * m + k, n * m + k)
Jacobian of the collocation system in a sparse form.
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
df_dy = np.transpose(df_dy, (2, 0, 1))
df_dy_middle = np.transpose(df_dy_middle, (2, 0, 1))
h = h[:, np.newaxis, np.newaxis]
dtype = df_dy.dtype
# Computing diagonal n x n blocks.
dPhi_dy_0 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_0[:] = -np.identity(n)
dPhi_dy_0 -= h / 6 * (df_dy[:-1] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[:-1])
dPhi_dy_0 -= h**2 / 12 * T
# Computing off-diagonal n x n blocks.
dPhi_dy_1 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_1[:] = np.identity(n)
dPhi_dy_1 -= h / 6 * (df_dy[1:] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[1:])
dPhi_dy_1 += h**2 / 12 * T
values = np.hstack((dPhi_dy_0.ravel(), dPhi_dy_1.ravel(), dbc_dya.ravel(),
dbc_dyb.ravel()))
if k > 0:
df_dp = np.transpose(df_dp, (2, 0, 1))
df_dp_middle = np.transpose(df_dp_middle, (2, 0, 1))
T = stacked_matmul(df_dy_middle, df_dp[:-1] - df_dp[1:])
df_dp_middle += 0.125 * h * T
dPhi_dp = -h/6 * (df_dp[:-1] + df_dp[1:] + 4 * df_dp_middle)
values = np.hstack((values, dPhi_dp.ravel(), dbc_dp.ravel()))
J = coo_matrix((values, (i_jac, j_jac)))
return csc_matrix(J)
def collocation_fun(fun, y, p, x, h):
"""Evaluate collocation residuals.
This function lies in the core of the method. The solution is sought
as a cubic C1 continuous spline with derivatives matching the ODE rhs
at given nodes `x`. Collocation conditions are formed from the equality
of the spline derivatives and rhs of the ODE system in the middle points
between nodes.
Such method is classified to Lobbato IIIA family in ODE literature.
Refer to [1]_ for the formula and some discussion.
Returns
-------
col_res : ndarray, shape (n, m - 1)
Collocation residuals at the middle points of the mesh intervals.
y_middle : ndarray, shape (n, m - 1)
Values of the cubic spline evaluated at the middle points of the mesh
intervals.
f : ndarray, shape (n, m)
RHS of the ODE system evaluated at the mesh nodes.
f_middle : ndarray, shape (n, m - 1)
RHS of the ODE system evaluated at the middle points of the mesh
intervals (and using `y_middle`).
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
f = fun(x, y, p)
y_middle = (0.5 * (y[:, 1:] + y[:, :-1]) -
0.125 * h * (f[:, 1:] - f[:, :-1]))
f_middle = fun(x[:-1] + 0.5 * h, y_middle, p)
col_res = y[:, 1:] - y[:, :-1] - h / 6 * (f[:, :-1] + f[:, 1:] +
4 * f_middle)
return col_res, y_middle, f, f_middle
def prepare_sys(n, m, k, fun, bc, fun_jac, bc_jac, x, h):
"""Create the function and the Jacobian for the collocation system."""
x_middle = x[:-1] + 0.5 * h
i_jac, j_jac = compute_jac_indices(n, m, k)
def col_fun(y, p):
return collocation_fun(fun, y, p, x, h)
def sys_jac(y, p, y_middle, f, f_middle, bc0):
if fun_jac is None:
df_dy, df_dp = estimate_fun_jac(fun, x, y, p, f)
df_dy_middle, df_dp_middle = estimate_fun_jac(
fun, x_middle, y_middle, p, f_middle)
else:
df_dy, df_dp = fun_jac(x, y, p)
df_dy_middle, df_dp_middle = fun_jac(x_middle, y_middle, p)
if bc_jac is None:
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(bc, y[:, 0], y[:, -1],
p, bc0)
else:
dbc_dya, dbc_dyb, dbc_dp = bc_jac(y[:, 0], y[:, -1], p)
return construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy,
df_dy_middle, df_dp, df_dp_middle, dbc_dya,
dbc_dyb, dbc_dp)
return col_fun, sys_jac
def solve_newton(n, m, h, col_fun, bc, jac, y, p, B, bvp_tol):
"""Solve the nonlinear collocation system by a Newton method.
This is a simple Newton method with a backtracking line search. As
advised in [1]_, an affine-invariant criterion function F = ||J^-1 r||^2
is used, where J is the Jacobian matrix at the current iteration and r is
the vector or collocation residuals (values of the system lhs).
The method alters between full Newton iterations and the fixed-Jacobian
iterations based
There are other tricks proposed in [1]_, but they are not used as they
don't seem to improve anything significantly, and even break the
convergence on some test problems I tried.
All important parameters of the algorithm are defined inside the function.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
h : ndarray, shape (m-1,)
Mesh intervals.
col_fun : callable
Function computing collocation residuals.
bc : callable
Function computing boundary condition residuals.
jac : callable
Function computing the Jacobian of the whole system (including
collocation and boundary condition residuals). It is supposed to
return csc_matrix.
y : ndarray, shape (n, m)
Initial guess for the function values at the mesh nodes.
p : ndarray, shape (k,)
Initial guess for the unknown parameters.
B : ndarray with shape (n, n) or None
Matrix to force the S y(a) = 0 condition for a problems with the
singular term. If None, the singular term is assumed to be absent.
bvp_tol : float
Tolerance to which we want to solve a BVP.
Returns
-------
y : ndarray, shape (n, m)
Final iterate for the function values at the mesh nodes.
p : ndarray, shape (k,)
Final iterate for the unknown parameters.
singular : bool
True, if the LU decomposition failed because Jacobian turned out
to be singular.
References
----------
.. [1] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations"
"""
# We know that the solution residuals at the middle points of the mesh
# are connected with collocation residuals r_middle = 1.5 * col_res / h.
# As our BVP solver tries to decrease relative residuals below a certain
# tolerance it seems reasonable to terminated Newton iterations by
# comparison of r_middle / (1 + np.abs(f_middle)) with a certain threshold,
# which we choose to be 1.5 orders lower than the BVP tolerance. We rewrite
# the condition as col_res < tol_r * (1 + np.abs(f_middle)), then tol_r
# should be computed as follows:
tol_r = 2/3 * h * 5e-2 * bvp_tol
# We also need to control residuals of the boundary conditions. But it
# seems that they become very small eventually as the solver progresses,
# i. e. the tolerance for BC are not very important. We set it 1.5 orders
# lower than the BVP tolerance as well.
tol_bc = 5e-2 * bvp_tol
# Maximum allowed number of Jacobian evaluation and factorization, in
# other words the maximum number of full Newton iterations. A small value
# is recommended in the literature.
max_njev = 4
# Maximum number of iterations, considering that some of them can be
# performed with the fixed Jacobian. In theory such iterations are cheap,
# but it's not that simple in Python.
max_iter = 8
# Minimum relative improvement of the criterion function to accept the
# step (Armijo constant).
sigma = 0.2
# Step size decrease factor for backtracking.
tau = 0.5
# Maximum number of backtracking steps, the minimum step is then
# tau ** n_trial.
n_trial = 4
col_res, y_middle, f, f_middle = col_fun(y, p)
bc_res = bc(y[:, 0], y[:, -1], p)
res = np.hstack((col_res.ravel(order='F'), bc_res))
njev = 0
singular = False
recompute_jac = True
for iteration in range(max_iter):
if recompute_jac:
J = jac(y, p, y_middle, f, f_middle, bc_res)
njev += 1
try:
LU = splu(J)
except RuntimeError:
singular = True
break
step = LU.solve(res)
cost = np.dot(step, step)
y_step = step[:m * n].reshape((n, m), order='F')
p_step = step[m * n:]
alpha = 1
for trial in range(n_trial + 1):
y_new = y - alpha * y_step
if B is not None:
y_new[:, 0] = np.dot(B, y_new[:, 0])
p_new = p - alpha * p_step
col_res, y_middle, f, f_middle = col_fun(y_new, p_new)
bc_res = bc(y_new[:, 0], y_new[:, -1], p_new)
res = np.hstack((col_res.ravel(order='F'), bc_res))
step_new = LU.solve(res)
cost_new = np.dot(step_new, step_new)
if cost_new < (1 - 2 * alpha * sigma) * cost:
break
if trial < n_trial:
alpha *= tau
y = y_new
p = p_new
if njev == max_njev:
break
if (np.all(np.abs(col_res) < tol_r * (1 + np.abs(f_middle))) and
np.all(bc_res < tol_bc)):
break
# If the full step was taken, then we are going to continue with
# the same Jacobian. This is the approach of BVP_SOLVER.
if alpha == 1:
step = step_new
cost = cost_new
recompute_jac = False
else:
recompute_jac = True
return y, p, singular
def print_iteration_header():
print("{:^15}{:^15}{:^15}{:^15}".format(
"Iteration", "Max residual", "Total nodes", "Nodes added"))
def print_iteration_progress(iteration, residual, total_nodes, nodes_added):
print("{:^15}{:^15.2e}{:^15}{:^15}".format(
iteration, residual, total_nodes, nodes_added))
class BVPResult(OptimizeResult):
pass
TERMINATION_MESSAGES = {
0: "The algorithm converged to the desired accuracy.",
1: "The maximum number of mesh nodes is exceeded.",
2: "A singular Jacobian encountered when solving the collocation system."
}
def estimate_rms_residuals(fun, sol, x, h, p, r_middle, f_middle):
"""Estimate rms values of collocation residuals using Lobatto quadrature.
The residuals are defined as the difference between the derivatives of
our solution and rhs of the ODE system. We use relative residuals, i.e.
normalized by 1 + np.abs(f). RMS values are computed as sqrt from the
normalized integrals of the squared relative residuals over each interval.
Integrals are estimated using 5-point Lobatto quadrature [1]_, we use the
fact that residuals at the mesh nodes are identically zero.
In [2] they don't normalize integrals by interval lengths, which gives
a higher rate of convergence of the residuals by the factor of h**0.5.
I chose to do such normalization for an ease of interpretation of return
values as RMS estimates.
Returns
-------
rms_res : ndarray, shape (m - 1,)
Estimated rms values of the relative residuals over each interval.
References
----------
.. [1] http://mathworld.wolfram.com/LobattoQuadrature.html
.. [2] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
x_middle = x[:-1] + 0.5 * h
s = 0.5 * h * (3/7)**0.5
x1 = x_middle + s
x2 = x_middle - s
y1 = sol(x1)
y2 = sol(x2)
y1_prime = sol(x1, 1)
y2_prime = sol(x2, 1)
f1 = fun(x1, y1, p)
f2 = fun(x2, y2, p)
r1 = y1_prime - f1
r2 = y2_prime - f2
r_middle /= 1 + np.abs(f_middle)
r1 /= 1 + np.abs(f1)
r2 /= 1 + np.abs(f2)
r1 = np.sum(np.real(r1 * np.conj(r1)), axis=0)
r2 = np.sum(np.real(r2 * np.conj(r2)), axis=0)
r_middle = np.sum(np.real(r_middle * np.conj(r_middle)), axis=0)
return (0.5 * (32 / 45 * r_middle + 49 / 90 * (r1 + r2))) ** 0.5
def create_spline(y, yp, x, h):
"""Create a cubic spline given values and derivatives.
Formulas for the coefficients are taken from interpolate.CubicSpline.
Returns
-------
sol : PPoly
Constructed spline as a PPoly instance.
"""
from scipy.interpolate import PPoly
n, m = y.shape
c = np.empty((4, n, m - 1), dtype=y.dtype)
slope = (y[:, 1:] - y[:, :-1]) / h
t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h
c[0] = t / h
c[1] = (slope - yp[:, :-1]) / h - t
c[2] = yp[:, :-1]
c[3] = y[:, :-1]
c = np.rollaxis(c, 1)
return PPoly(c, x, extrapolate=True, axis=1)
def modify_mesh(x, insert_1, insert_2):
"""Insert nodes into a mesh.
Nodes removal logic is not established, its impact on the solver is
presumably negligible. So only insertion is done in this function.
Parameters
----------
x : ndarray, shape (m,)
Mesh nodes.
insert_1 : ndarray
Intervals to each insert 1 new node in the middle.
insert_2 : ndarray
Intervals to each insert 2 new nodes, such that divide an interval
into 3 equal parts.
Returns
-------
x_new : ndarray
New mesh nodes.
Notes
-----
`insert_1` and `insert_2` should not have common values.
"""
# Because np.insert implementation apparently varies with a version of
# numpy, we use a simple and reliable approach with sorting.
return np.sort(np.hstack((
x,
0.5 * (x[insert_1] + x[insert_1 + 1]),
(2 * x[insert_2] + x[insert_2 + 1]) / 3,
(x[insert_2] + 2 * x[insert_2 + 1]) / 3
)))
def wrap_functions(fun, bc, fun_jac, bc_jac, k, a, S, D, dtype):
"""Wrap functions for unified usage in the solver."""
if fun_jac is None:
fun_jac_wrapped = None
if bc_jac is None:
bc_jac_wrapped = None
if k == 0:
def fun_p(x, y, _):
return np.asarray(fun(x, y), dtype)
def bc_wrapped(ya, yb, _):
return np.asarray(bc(ya, yb), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, _):
return np.asarray(fun_jac(x, y), dtype), None
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, _):
dbc_dya, dbc_dyb = bc_jac(ya, yb)
return (np.asarray(dbc_dya, dtype),
np.asarray(dbc_dyb, dtype), None)
else:
def fun_p(x, y, p):
return np.asarray(fun(x, y, p), dtype)
def bc_wrapped(x, y, p):
return np.asarray(bc(x, y, p), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, p):
df_dy, df_dp = fun_jac(x, y, p)
return np.asarray(df_dy, dtype), np.asarray(df_dp, dtype)
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, p):
dbc_dya, dbc_dyb, dbc_dp = bc_jac(ya, yb, p)
return (np.asarray(dbc_dya, dtype), np.asarray(dbc_dyb, dtype),
np.asarray(dbc_dp, dtype))
if S is None:
fun_wrapped = fun_p
else:
def fun_wrapped(x, y, p):
f = fun_p(x, y, p)
if x[0] == a:
f[:, 0] = np.dot(D, f[:, 0])
f[:, 1:] += np.dot(S, y[:, 1:]) / (x[1:] - a)
else:
f += np.dot(S, y) / (x - a)
return f
if fun_jac is not None:
if S is None:
fun_jac_wrapped = fun_jac_p
else:
Sr = S[:, :, np.newaxis]
def fun_jac_wrapped(x, y, p):
df_dy, df_dp = fun_jac_p(x, y, p)
if x[0] == a:
df_dy[:, :, 0] = np.dot(D, df_dy[:, :, 0])
df_dy[:, :, 1:] += Sr / (x[1:] - a)
else:
df_dy += Sr / (x - a)
return df_dy, df_dp
return fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped
def solve_bvp(fun, bc, x, y, p=None, S=None, fun_jac=None, bc_jac=None,
tol=1e-3, max_nodes=1000, verbose=0):
"""Solve a boundary-value problem for a system of ODEs.
This function numerically solves a first order system of ODEs subject to
two-point boundary conditions::
dy / dx = f(x, y, p) + S * y / (x - a), a <= x <= b
bc(y(a), y(b), p) = 0
Here x is a 1-dimensional independent variable, y(x) is a n-dimensional
vector-valued function and p is a k-dimensional vector of unknown
parameters which is to be found along with y(x). For the problem to be
determined there must be n + k boundary conditions, i.e. bc must be
(n + k)-dimensional function.
The last singular term in the right-hand side of the system is optional.
It is defined by an n-by-n matrix S, such that the solution must satisfy
S y(a) = 0. This condition will be forced during iterations, so it must not
contradict boundary conditions. See [2]_ for the explanation how this term
is handled when solving BVPs numerically.
Problems in a complex domain can be solved as well. In this case y and p
are considered to be complex, and f and bc are assumed to be complex-valued
functions, but x stays real. Note that f and bc must be complex
differentiable (satisfy Cauchy-Riemann equations [4]_), otherwise you
should rewrite your problem for real and imaginary parts separately. To
solve a problem in a complex domain, pass an initial guess for y with a
complex data type (see below).
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(x, y)``,
or ``fun(x, y, p)`` if parameters are present. All arguments are
ndarray: ``x`` with shape (m,), ``y`` with shape (n, m), meaning that
``y[:, i]`` corresponds to ``x[i]``, and ``p`` with shape (k,). The
return value must be an array with shape (n, m) and with the same
layout as ``y``.
bc : callable
Function evaluating residuals of the boundary conditions. The calling
signature is ``bc(ya, yb)``, or ``bc(ya, yb, p)`` if parameters are
present. All arguments are ndarray: ``ya`` and ``yb`` with shape (n,),
and ``p`` with shape (k,). The return value must be an array with
shape (n + k,).
x : array_like, shape (m,)
Initial mesh. Must be a strictly increasing sequence of real numbers
with ``x[0]=a`` and ``x[-1]=b``.
y : array_like, shape (n, m)
Initial guess for the function values at the mesh nodes, i-th column
corresponds to ``x[i]``. For problems in a complex domain pass `y`
with a complex data type (even if the initial guess is purely real).
p : array_like with shape (k,) or None, optional
Initial guess for the unknown parameters. If None (default), it is
assumed that the problem doesn't depend on any parameters.
S : array_like with shape (n, n) or None
Matrix defining the singular term. If None (default), the problem is
solved without the singular term.
fun_jac : callable or None, optional
Function computing derivatives of f with respect to y and p. The
calling signature is ``fun_jac(x, y)``, or ``fun_jac(x, y, p)`` if
parameters are present. The return must contain 1 or 2 elements in the
following order:
* df_dy : array_like with shape (n, n, m) where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d (y_q)_j.
* df_dp : array_like with shape (n, k, m) where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d p_j.
Here q numbers nodes at which x and y are defined, whereas i and j
number vector components. If the problem is solved without unknown
parameters df_dp should not be returned.
If `fun_jac` is None (default), the derivatives will be estimated
by the forward finite differences.
bc_jac : callable or None, optional
Function computing derivatives of bc with respect to ya, yb and p.
The calling signature is ``bc_jac(ya, yb)``, or ``bc_jac(ya, yb, p)``
if parameters are present. The return must contain 2 or 3 elements in
the following order:
* dbc_dya : array_like with shape (n, n) where an element (i, j)
equals to d bc_i(ya, yb, p) / d ya_j.
* dbc_dyb : array_like with shape (n, n) where an element (i, j)
equals to d bc_i(ya, yb, p) / d yb_j.
* dbc_dp : array_like with shape (n, k) where an element (i, j)
equals to d bc_i(ya, yb, p) / d p_j.
If the problem is solved without unknown parameters dbc_dp should not
be returned.
If `bc_jac` is None (default), the derivatives will be estimated by
the forward finite differences.
tol : float, optional
Desired tolerance of the solution. If we define ``r = y' - f(x, y)``
where y is the found solution, then the solver tries to achieve on each
mesh interval ``norm(r / (1 + abs(f)) < tol``, where ``norm`` is
estimated in a root mean squared sense (using a numerical quadrature
formula). Default is 1e-3.
max_nodes : int, optional
Maximum allowed number of the mesh nodes. If exceeded, the algorithm
terminates. Default is 1000.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations.
Returns
-------
Bunch object with the following fields defined:
sol : PPoly
Found solution for y as `scipy.interpolate.PPoly` instance, a C1
continuous cubic spline.
p : ndarray or None, shape (k,)
Found parameters. None, if the parameters were not present in the
problem.
x : ndarray, shape (m,)
Nodes of the final mesh.
y : ndarray, shape (n, m)
Solution values at the mesh nodes.
yp : ndarray, shape (n, m)
Solution derivatives at the mesh nodes.
rms_residuals : ndarray, shape (m - 1,)
RMS values of the relative residuals over each mesh interval (see the
description of `tol` parameter).
niter : int
Number of completed iterations.
status : int
Reason for algorithm termination:
* 0: The algorithm converged to the desired accuracy.
* 1: The maximum number of mesh nodes is exceeded.
* 2: A singular Jacobian encountered when solving the collocation
system.
message : string
Verbal description of the termination reason.
success : bool
True if the algorithm converged to the desired accuracy (``status=0``).
Notes
-----
This function implements a 4-th order collocation algorithm with the
control of residuals similar to [1]_. A collocation system is solved
by a damped Newton method with an affine-invariant criterion function as
described in [3]_.
Note that in [1]_ integral residuals are defined without normalization
by interval lengths. So their definition is different by a multiplier of
h**0.5 (h is an interval length) from the definition used here.
.. versionadded:: 0.18.0
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
.. [2] L.F. Shampine, P. H. Muir and H. Xu, "A User-Friendly Fortran BVP
Solver".
.. [3] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations".
.. [4] `Cauchy-Riemann equations
<https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
Wikipedia.
Examples
--------
In the first example we solve Bratu's problem::
y'' + k * exp(y) = 0
y(0) = y(1) = 0
for k = 1.
We rewrite the equation as a first order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -exp(y1)
>>> def fun(x, y):
... return np.vstack((y[1], -np.exp(y[0])))
Implement evaluation of the boundary condition residuals:
>>> def bc(ya, yb):
... return np.array([ya[0], yb[0]])
Define the initial mesh with 5 nodes:
>>> x = np.linspace(0, 1, 5)
This problem is known to have two solutions. To obtain both of them we
use two different initial guesses for y. We denote them by subscripts
a and b.
>>> y_a = np.zeros((2, x.size))
>>> y_b = np.zeros((2, x.size))
>>> y_b[0] = 3
Now we are ready to run the solver.
>>> from scipy.integrate import solve_bvp
>>> res_a = solve_bvp(fun, bc, x, y_a)
>>> res_b = solve_bvp(fun, bc, x, y_b)
Let's plot the two found solutions. We take an advantage of having the
solution in a spline form to produce a smooth plot.
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot_a = res_a.sol(x_plot)[0]
>>> y_plot_b = res_b.sol(x_plot)[0]
>>> import matplotlib.pyplot as plt
>>> plt.plot(x_plot, y_plot_a, label='y_a')
>>> plt.plot(x_plot, y_plot_b, label='y_b')
>>> plt.legend()
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
We see that the two solutions have similar shape, but differ in scale
significantly.
In the second example we solve a simple Sturm-Liouville problem::
y'' + k**2 * y = 0
y(0) = y(1) = 0
It is known that a non-trivial solution y = A * sin(k * x) is possible for
k = pi * n, where n is an integer. To establish the normalization constant
A = 1 we add a boundary condition::
y'(0) = k
Again we rewrite our equation as a first order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -k**2 * y1
>>> def fun(x, y, p):
... k = p[0]
... return np.vstack((y[1], -k**2 * y[0]))
Note that parameters p are passed as a vector (with one element in our
case).
Implement the boundary conditions:
>>> def bc(ya, yb, p):
... k = p[0]
... return np.array([ya[0], yb[0], ya[1] - k])
Setup the initial mesh and guess for y. We aim to find the solution for
k = 2 * pi, to achieve that we set values of y to approximately follow
sin(2 * pi * x):
>>> x = np.linspace(0, 1, 5)
>>> y = np.zeros((2, x.size))
>>> y[0, 1] = 1
>>> y[0, 3] = -1
Run the solver with 6 as an initial guess for k.
>>> sol = solve_bvp(fun, bc, x, y, p=[6])
We see that the found k is approximately correct:
>>> sol.p[0]
6.28329460046
And finally plot the solution to see the anticipated sinusoid:
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot = sol.sol(x_plot)[0]
>>> plt.plot(x_plot, y_plot)
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
"""
x = np.asarray(x, dtype=float)
if x.ndim != 1:
raise ValueError("`x` must be 1 dimensional.")
h = np.diff(x)
if np.any(h <= 0):
raise ValueError("`x` must be strictly increasing.")
a = x[0]
y = np.asarray(y)
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
if y.ndim != 2:
raise ValueError("`y` must be 2 dimensional.")
if y.shape[1] != x.shape[0]:
raise ValueError("`y` is expected to have {} columns, but actually "
"has {}.".format(x.shape[0], y.shape[1]))
if p is None:
p = np.array([])
else:
p = np.asarray(p, dtype=dtype)
if p.ndim != 1:
raise ValueError("`p` must be 1 dimensional.")
if tol < 100 * EPS:
warn("`tol` is too low, setting to {:.2e}".format(100 * EPS))
tol = 100 * EPS
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
n = y.shape[0]
k = p.shape[0]
if S is not None:
S = np.asarray(S, dtype=dtype)
if S.shape != (n, n):
raise ValueError("`S` is expected to have shape {}, "
"but actually has {}".format((n, n), S.shape))
# Compute I - S^+ S to impose necessary boundary conditions.
B = np.identity(n) - np.dot(pinv(S), S)
y[:, 0] = np.dot(B, y[:, 0])
# Compute (I - S)^+ to correct derivatives at x=a.
D = pinv(np.identity(n) - S)
else:
B = None
D = None
fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped = wrap_functions(
fun, bc, fun_jac, bc_jac, k, a, S, D, dtype)
f = fun_wrapped(x, y, p)
if f.shape != y.shape:
raise ValueError("`fun` return is expected to have shape {}, "
"but actually has {}.".format(y.shape, f.shape))
bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
if bc_res.shape != (n + k,):
raise ValueError("`bc` return is expected to have shape {}, "
"but actually has {}.".format((n + k,), bc_res.shape))
status = 0
iteration = 0
if verbose == 2:
print_iteration_header()
while True:
m = x.shape[0]
col_fun, jac_sys = prepare_sys(n, m, k, fun_wrapped, bc_wrapped,
fun_jac_wrapped, bc_jac_wrapped, x, h)
y, p, singular = solve_newton(n, m, h, col_fun, bc_wrapped, jac_sys,
y, p, B, tol)
iteration += 1
col_res, y_middle, f, f_middle = collocation_fun(fun_wrapped, y,
p, x, h)
# This relation is not trivial, but can be verified.
r_middle = 1.5 * col_res / h
sol = create_spline(y, f, x, h)
rms_res = estimate_rms_residuals(fun_wrapped, sol, x, h, p,
r_middle, f_middle)
max_rms_res = np.max(rms_res)
if singular:
status = 2
break
insert_1, = np.nonzero((rms_res > tol) & (rms_res < 100 * tol))
insert_2, = np.nonzero(rms_res >= 100 * tol)
nodes_added = insert_1.shape[0] + 2 * insert_2.shape[0]
if m + nodes_added > max_nodes:
status = 1
if verbose == 2:
nodes_added = "({})".format(nodes_added)
print_iteration_progress(iteration, max_rms_res, m,
nodes_added)
break
if verbose == 2:
print_iteration_progress(iteration, max_rms_res, m, nodes_added)
if nodes_added > 0:
x = modify_mesh(x, insert_1, insert_2)
h = np.diff(x)
y = sol(x)
else:
status = 0
break
if verbose > 0:
if status == 0:
print("Solved in {} iterations, number of nodes {}, "
"maximum relative residual {:.2e}."
.format(iteration, x.shape[0], max_rms_res))
elif status == 1:
print("Number of nodes is exceeded after iteration {}, "
"maximum relative residual {:.2e}."
.format(iteration, max_rms_res))
elif status == 2:
print("Singular Jacobian encountered when solving the collocation "
"system on iteration {}, maximum relative residual {:.2e}."
.format(iteration, max_rms_res))
if p.size == 0:
p = None
return BVPResult(sol=sol, p=p, x=x, y=y, yp=f, rms_residuals=rms_res,
niter=iteration, status=status,
message=TERMINATION_MESSAGES[status], success=status == 0)
| bsd-3-clause |
Cysu/dlearn | docs/conf.py | 1 | 8603 | # -*- coding: utf-8 -*-
#
# dlearn documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 3 15:41:15 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import mock
# Mock some third-party modules
mock_modules = ['numpy', 'theano', 'theano.tensor',
'matplotlib', 'matplotlib.pyplot', 'mpl_toolkits.axes_grid1',
'skimage', 'skimage.transform', 'skimage.color',
'sklearn', 'sklearn.preprocessing']
for m in mock_modules:
sys.modules[m] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinxcontrib.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dlearn'
copyright = u'2014, Tong Xiao'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'dev'
# The full version, including alpha/beta/rc tags.
release = 'dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dlearndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'dlearn.tex', u'dlearn Documentation',
u'Tong Xiao', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dlearn', u'dlearn Documentation',
[u'Tong Xiao'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'dlearn', u'dlearn Documentation', u'Tong Xiao',
'dlearn', 'One line description of project.', 'Miscellaneous')
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
thekatiebr/NEMO_II | NEMO.py | 1 | 14784 | #!/usr/bin/env python
from KnowledgeBase import KnowledgeBase
from Classifiers import ML_Controller, KnowledgeIntegrator
from collections import deque
from sklearn.model_selection import train_test_split
import pandas
import numpy
from sklearn.utils import shuffle
import MySQLdb
import threading
import sys
import os
import time
import json
#test comment for git
#one stop event, pass in the queue and number of seconds to spend optimizing
def optimizeAlgorithmWorker(ml, stp):
while not stp.is_set():
ml.optimizeAlgorithm()
def optimizeWorker(queue, stp, secs):
while not stp.is_set():
task = queue.popleft()
#print "Optimizing " + task.getID()
opt_stp = threading.Event()
thrd = threading.Thread(target=optimizeAlgorithmWorker, args=(task, opt_stp))
thrd.start()
time.sleep(secs)
opt_stp.set()
thrd.join()
queue.append(task)
class NEMO:
def __init__(self, filename):
self.kb = KnowledgeBase.KnowledgeBase(filename)
self.ml = [] #list of machine learners
self.secs = 10
self.queue = deque()
self.optimization_thread = None
self.stop_event = None
self.checkForCurrentModels()
self.checkForOptimizingModels()
with open(filename) as fd:
json_data = json.load(fd)
info = json_data['KNOWLEDGE_INTEGRATOR']
self.stacking_classifier = info["STACKER"]
self.other_predictions = info["OTHER_PREDICTIONS"] if info['OTHER_PREDICTIONS'] != "None" else None
def findAlgorithmBasedOnID(self, id):
for model in self.ml:
if id == model.getID():
return model
def verifyID(self, id):
stmt = "select algorithm_id from ModelRepository"
self.kb.executeQuery(stmt)
#self.kb.cursor.execute(stmt)
ids = self.kb.fetchAll()
return (id,) in ids
def getAlgorithmType(self, id):
#assumes id has already been verified
stmt = "select algorithm_name from ModelRepository where algorithm_id = " + id
#print stmt
self.kb.executeQuery(stmt)
types = self.kb.fetchOne()
#print types
return types[0]
#same model, different id
def createModelBasedONID(self):
#self.printModelInformation()
id = raw_input("Enter ID Here --> ")
if self.verifyID(id):
type = self.getAlgorithmType(id)
new_ml = ML_Controller.ML_Controller(self.kb, type)
new_ml.createModel(id)
self.kb.updateDatabaseWithModel(new_ml.algorithm)
self.kb.addCurrentModel(new_ml.algorithm)
new_ml.runAlgorithm()
new_ml.updateDatabaseWithResults()
self.ml.append(new_ml)
else:
print "ID does not exist in Model Repository"
#makes a copy w/ same id
def copyML(self):
#self.printModelInformation()
this_id = raw_input("Enter ID Here --> ")
print this_id
if self.verifyID(this_id):
if self.findAlgorithmBasedOnID(id) is not None:
print "This model has already been created. . . "
else:
self.copy(this_id)
else:
print "ID does not exist in Model Repository"
def copy(self, this_id):
algorithm_type = ""
try:
algorithm_type = self.getAlgorithmType(this_id)
except:
this_id = this_id + "*"
algorithm_type = self.getAlgorithmType(this_id)
new_ml = ML_Controller.ML_Controller(self.kb, algorithm_type)
new_ml.copyModel(this_id)
#self.kb.removeModelFromRepository(new_ml.algorithm)
self.kb.updateDatabaseWithModel(new_ml.algorithm)
self.kb.addCurrentModel(new_ml.algorithm)
new_ml.runAlgorithm()
new_ml.updateDatabaseWithResults()
self.ml.append(new_ml)
return new_ml
def setupNewML(self):
models = ['Neural Network', 'Decision Tree', 'SVM', 'Random Forest']
possible_choices = range(1, len(models)+1)
ch_strs = map(str, possible_choices)
input = ""
while input not in ch_strs:
print "Pick A Model Type"
for i in range(0, len(models)):
print ch_strs[i] + ". " + models[i]
input = raw_input("--> ")
input = models[int(input)-1]
self.createML(input)
def createML(self, input):
new_ml = ML_Controller.ML_Controller(self.kb, input)
new_ml.createModel()
self.kb.updateDatabaseWithModel(new_ml.algorithm)
self.kb.addCurrentModel(new_ml.algorithm)
new_ml.runAlgorithm()
new_ml.updateDatabaseWithResults()
self.ml.append(new_ml)
return new_ml.getID()
def runAlgorithm(self, id=None):
if id is None:
id = raw_input("Enter ID of Model --> ")
model = self.findAlgorithmBasedOnID(id)
if model is not None:
res = model.runAlgorithm()
model.updateDatabaseWithResults()
return res
else:
print "Model with ID " + id + " does not exist"
############################################################################################################
def optimizeAllModels(self):
for model in self.ml:
self.optimizeTask(model.getID())
def optimizeTask(self, id):
# retrieve model from id
model = self.findAlgorithmBasedOnID(id)
if model is not None and not model.isCurrentlyOptimizing: # check to see if optimization flag is true
print "Adding Model"
# add to currently optimizing table
self.addToCurrentlyOptimizingTable(id)
# set optimization flag to true
model.isCurrentlyOptimizing = True
# enqueue to optimization queue
self.queue.append(model)
else:
print "Error adding model with ID: " + id
def startOptimization(self):
# init thread with optimize worker
if self.queue is not None:
if len(self.queue) > 0:
self.stop_event = threading.Event()
self.optimization_thread = threading.Thread(target=optimizeWorker, args=(self.queue, self.stop_event, self.secs))
self.optimization_thread.start()
def pauseOptimzation(self):
# issue stop event and stop thread
if self.stop_event is not None and self.optimization_thread is not None:
self.stop_event.set()
self.optimization_thread.join()
def cancelOptimization(self):
# issue stop event and stop thread
self.pauseOptimzation()
# dequeue through queue setting flags to false
self.queue.clear()
for m in self.ml:
m.isCurrentlyOptimizing = False
self.removeFromCurrentlyOptimizingTable(m.getID())
def cancelSingleOptimizationTask(self, id):
self.pauseOptimzation()
to_remove = None
for m in self.queue:
if m.getID() == id:
to_remove = m
if to_remove is not None:
self.queue.remove(to_remove)
self.removeFromCurrentlyOptimizingTable(id)
self.startOptimization()
def printInformationOnCurrentlyOptimizingModels(self):
stmt = "select algorithm_id from CurrentlyOptimizingModels"
self.kb.executeQuery(stmt)
row = self.kb.fetchOne()
current_id = ""
while row != None:
id = row[0]
self.printModelInformation(id)
row = self.kb.fetchOne()
def removeFromCurrentlyOptimizingTable(self,id):
stmt = "select algorithm_id from CurrentlyOptimizingModels"
self.kb.executeQuery(stmt)
#self.kb.cursor.execute(stmt)
ids = self.kb.fetchAll()
if (id,) in ids:
stmt = "delete from CurrentlyOptimizingModels where algorithm_id = " + id
self.kb.executeQuery(stmt)
def addToCurrentlyOptimizingTable(self, id):
try:
stmt = "insert into CurrentlyOptimizingModels(algorithm_id) values (%s)"
self.kb.executeQuery(stmt,(id,))
except (MySQLdb.IntegrityError):
print "Algorithm is already in queue for optimization"
############################################################################################################
def printAlgorithmResults(self):
self.pauseOptimzation()
stmt = "select * from AlgorithmResults"
self.kb.executeQuery(stmt)
#self.kb.cursor.execute(stmt)
print "Algorithm ID\t\t\tAlgorithm Name\t\t\tAccuracy\t\t\tPrecision\t\t\tRecall\t\t\tF1 Score\t\t\t"
row = self.kb.fetchOne()
while row != None:
print "%s\t\t\t%s\t\t\t%s\t\t\t%s\t\t\t%s\t\t\t%s\t\t\t" % (row[0], row[1], row[2], row[3], row[4], row[5])
row = self.kb.fetchOne()
#self.startOptimization()
def printModelInformation(self, id=None):
self.pauseOptimzation()
if id is None:
stmt = "select * from ModelRepository"
else:
stmt = "select * from ModelRepository where algorithm_id = " + id
self.kb.executeQuery(stmt)
#self.kb.cursor.execute(stmt)
row = self.kb.fetchOne()
current_id = ""
while row != None:
#print row
if current_id != row[0]:
print "\nCurrent Algorithm ID: " + row[0] + "\nAlgorithm Type: " + row[1]
current_id = row[0]
val = row[3] if row[3] is not None else "None"
print row[2] + " = " + val
row = self.kb.fetchOne()
print "\nNo Model Information to Show"
#self.startOptimization()
def printCurrentModelInformation(self):
for model in self.ml:
self.printModelInformation(model.getID())
def checkForCurrentModels(self):
#self.pauseOptimzation()
stmt = "select algorithm_id from CurrentModel"
self.kb.executeQuery(stmt)
#self.kb.cursor.execute(stmt)
row = self.kb.fetchOne()
i = 0
while row is not None:
copy(row[0])
#self.startOptimization()
def checkForOptimizingModels(self):
stmt = "select * from CurrentlyOptimizingModels"
self.kb.executeQuery(stmt)
row = self.kb.fetchOne()
while row is not None:
id = row[0] #get id
#print id
mdl = self.findAlgorithmBasedOnID(id)
if mdl is None:
mdl = self.copy(id)
print "created model"
# set optimization flag to true
mdl.isCurrentlyOptimizing = True
# enqueue to optimization queue
self.queue.append(mdl)
row = self.kb.fetchOne()
print "Finished checking for models"
self.menu()
self.startOptimization()
def runKnowledgeIntegrator(self):
self.pauseOptimzation()
try:
ki = KnowledgeIntegrator.KnowledgeIntegrator(self.kb, self.ml, self.stacking_classifier, self.other_predictions)
data = self.kb.getData()
shuffled_data = shuffle(data)
splits = numpy.array_split(shuffled_data, 10)
ki_res = ki.testKI(splits,10,0)
self.kb.updateDatabaseWithResults(ki)
print "Run KnowledgeIntegrator"
except:
print "Error running Knowledge Integrator. Please ensure models are created and try again"
def splitIntoFolds(self, data, k, seed):
shuffled_data = shuffle(data, random_state=seed)
#print shuffled_data
folds = []
num_in_folds = len(data) / k
start = 0
end = num_in_folds - 1
for i in range(0,k):
fold = shuffled_data.iloc[start:end]
start = end
end = end + num_in_folds - 1
#print fold
folds.append(self.splitIntoXY(fold))
return folds
def getTestTraining(self, curr, others):
xtest = curr[0]
ytest = curr[1]
xtrainsets = []
ytrainsets = []
for curr in others:
xtrainsets.append(pandas.DataFrame(curr[0]))
ytrainsets.append(pandas.DataFrame(curr[1]))
xtrain = pandas.concat(xtrainsets)
ytrain = pandas.concat(ytrainsets)
return xtrain, xtest, ytrain, ytest
def splitIntoXY(self, data):
#print data
#print(data.columns.tolist())
y = data[self.kb.Y] #need to change to reflect varying data...
#print y
x = data[self.kb.X]
#print x
return (x,y)
def menu(self):
#TODO
#1. Create New Model\n
#2. Recreate Model Based on ID \n
#3. Create a copy of a model based on ID\n
#3. Run Model\n => provide ID
#4. Add model to optimization queue\n => list all current models in queue with current optimization status => have user pick which to add to queue
#5. Optimize all models => init optimization threads
#6. Output Model Results (Any current optimization task will be halted and restarted)\n
#7. View Information on All Model(s)\n => pause all models optimization, print information in modelrepository table
#8. View Information on Current Model(s)\n => pause all models optimization, print information in current model table where id = current
#9. Cancel Selected Optimization Task => Print list of models undergoing optimization => Input ID => Cancel Optimization
#9. Cancel All Optimization Tasks\n => totally cancel all optimization tasks, optimization flags go false
#10. Quit NEMO\n--> "
options = ['Create New Model', 'Create New Model Based on ID', 'Create a Copy of a Model Based on ID', 'Run Model', 'Run Knowledge Integrator', 'Add Model to Optimization Queue', 'Optimize All Models',
'Output All Model Results (Any current optimization task will be halted and restarted)', 'View Information on All Models (Any current optimization task will be halted and restarted)',
'View Information on Current Models (Any current optimization task will be halted and restarted)', 'View Models in Optimization Queue (Any current optimization task will be halted and restarted)',
'Cancel Selected Optimization Task', 'Cancel All Optimization Tasks', 'Quit NEMO']
possible_choices = range(1, len(options)+1)
ch_strs = map(str, possible_choices)
input = ""
while input not in ch_strs:
print "Main Menu"
for i in range(0, len(options)):
print ch_strs[i] + ". " + options[i]
input = raw_input("--> ")
choice = options[int(input)-1]
self.processChoice(choice)
def processChoice(self, choice):
if choice == 'Create New Model':
self.setupNewML()
elif choice == 'Create New Model Based on ID':
self.createModelBasedONID()
elif choice == 'Create a Copy of a Model Based on ID':
self.copyML()
elif choice == 'Run Model':
self.runAlgorithm()
elif choice == 'Add Model to Optimization Queue':
id = raw_input("Enter ID --> ")
self.optimizeTask(id)
self.startOptimization()
elif choice == 'Optimize All Models':
self.optimizeAllModels()
self.startOptimization()
elif choice == 'Output All Model Results (Any current optimization task will be halted and restarted)':
self.printAlgorithmResults()
elif choice == 'View Information on All Models (Any current optimization task will be halted and restarted)':
self.printModelInformation()
elif choice == 'View Information on Current Models (Any current optimization task will be halted and restarted)':
self.printCurrentModelInformation()
elif choice == 'Cancel All Optimization Tasks':
self.cancelOptimization()
elif choice == 'Cancel Selected Optimization Task':
id = raw_input("Enter ID --> ")
self.cancelSingleOptimizationTask(id)
elif choice == 'View Models in Optimization Queue (Any current optimization task will be halted and restarted)':
self.printInformationOnCurrentlyOptimizingModels()
elif choice == 'Run Knowledge Integrator':
#self.runKnowledgeIntegrator()
self.runKnowledgeIntegrator()
#print "Run KnowledgeIntegrator"
else:
self.cancelOptimization()
sys.exit()
def main():
pid = str(os.getpid())
dir = os.path.dirname(os.path.realpath(__file__))
print dir
pidfile = "tmp/NEMO.pid"
if os.path.isfile(pidfile):
print "%s already exists, exiting" % pidfile
sys.exit()
file(pidfile, 'w').write(pid)
try:
run(dir)
finally:
os.unlink(pidfile)
def run(dir=None):
if dir is not None:
nemo = NEMO(dir + "/config/config.json")
else:
nemo = NEMO("config/config.json")
while True:
nemo.menu()
if __name__ == "__main__":
main()
| apache-2.0 |
gclenaghan/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 25 | 2004 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
droundy/deft | papers/thesis-scheirer/final/RG_fn.py | 2 | 14458 | from __future__ import division
import scipy as sp
from scipy.optimize import fsolve
from scipy.interpolate import interp1d
import pylab as plt
import matplotlib
import RG
import SW
import numpy as np
import time
import integrate
import os
import sys
###############################################################################################
# Author: Ryan Scheirer #
# Email: [email protected] #
# Date: February 2016 #
#
# Uses fsolve to find the common tangent of the free energy density vs number density... #
# ...this then constructs the temp vs filling fraction liquid-vapor coexistence plot, total...#
# ...grand free energy per volume, and many more fun plots. #
###############################################################################################
if len(sys.argv) < 2:
print("Usage: %s TEMPERATURE" % sys.argv[0])
exit(1)
temp=float(sys.argv[1])
################################## START INITIALIZATION #######################################
# #
# #
sigma = 2 #Sphere diameter
k_B = 1 #Boltzman's constant
numdata=30 #Number of numdensity data points
max_fillingfraction_handled = 0.55
sphere_volume = (sigma**3*np.pi/6)
numdensity = plt.linspace(0.0008,max_fillingfraction_handled/sphere_volume,numdata) #number density
num_init = 1000
num_right = 200
num_left = 200
num_mid = 1000
datadir = 'data13'
try:
os.mkdir(datadir)
except:
pass
fbase = datadir+'/fit_T%.5f_f'%temp
print(temp)
def RG_first_pass(T,n,i):
fnaught = RG.SWfid(T,n) + RG.SWfhs(T,n) + RG.a2(n)/k_B/T*n # SW (and Hughes) a2/kT is the same as Forte's f2
f = fnaught
return f + RG_dfi(n)
def RG_later_pass(n):
f = f01_ext(n)
return f + RG_dfi(n)
def RG_dfi(n):
maxn = (max_fillingfraction_handled+0.0001)/sphere_volume
# warning: Forte defines x as a density, we define it
# as a dimensionless quantity that scales the density.
maxx = np.minimum(1,maxn/n-1)
if abs(maxx) < 1e-42:
print 'maxx is zero'
print 'maxx is zero'
print 'maxx is zero'
print 'maxx is zero'
print 'maxx is zero'
T=temp
# eqn (5) from Forte 2011:
IDvalue = ID2(n, maxx)
IDvalueStar = ID2star(n, maxx)
return -k_B*T*(IDvalue-IDvalueStar)/RG.VD(fn) # eqn (7), Forte 2011
def firstPass():
global f01interp
f01interp=lambda n: RG.fiterative(temp,n,0)
data=[]
t = time.time()
lastprint = t
for i in range(numdata):
n = numdensity[i]
print "%d of %d: "%(i,numdata),
free_energy = RG_first_pass(temp,n,1)
data.append([n,free_energy])
elapsed = time.time() - t
print(elapsed)
np.savetxt(fsave,data)
def laterPass():
f01_load()
data=[]
f02 = []
for i in range(numdata):
print "%d of %d: "%(i,numdata),
n = numdensity[i]
free_energy = RG_later_pass(n)
f02.append(free_energy)
data.append([n,free_energy])
np.savetxt(fsave,data)
fn=1
while os.path.isfile(fbase+'%d.out'%fn):
fn+=1
fload = fbase + '%d.out'%(fn-1)
fsave = fbase + '%d.out'%fn
print('fn:',fn)
# #
# #
############################### END INITIALIATION #############################################
########################### START PLOTTING STUFF ##############################################
# #
# #
def f0(n):
return RG.fiterative(temp,n,0)
def f01_load():
global f01interp
f01data=np.loadtxt(fload)
f01 = [f01data[i][1] for i in range(0,len(f01data))]
numdensity = [f01data[i][0] for i in range(0,len(f01data))]
f01interp = interp1d(numdensity,f01,kind='cubic')
##integrand_xs = []
##integrand_args = []
def ID2(n, maxx):
## This is $I_D$ from Forte's paper.
## global integrandIDlistx
## global integrandIDlistarg
maxpower = -1e99
dx = maxx/num_init
xpoints = np.arange(dx/2,maxx, dx)
kmax=0
for k in range(len(xpoints)):
if maxpower<onlyPower(n,xpoints[k],fn):
maxpower=onlyPower(n,xpoints[k],fn)
kmax=k
#########FIND LEFT SIDE
x_left = xpoints[kmax]
k_left = kmax
while k_left>0 and integrand_ID2(n,maxpower,x_left)>0.1:
k_left -= 1
x_left = xpoints[k_left]
#########FIND RIGHT SIDE
x_right = xpoints[kmax]
k_right = kmax
while k_right<len(xpoints)-1 and integrand_ID2(n,maxpower,x_right)>0.1:
k_right += 1
x_right = xpoints[k_right]
integral_left = 0
dx_left = x_left/num_left
xpoints_left = np.arange(dx_left/2,x_left,dx_left)
max_power_left = -1e99
for k in range(len(xpoints_left)):
max_power_left = max(max_power_left,onlyPower(n,xpoints_left[k],fn))
integral_right = 0
dx_right = (maxx-x_right)/num_right
xpoints_right = np.arange(x_right+dx_right/2,maxx,dx_right)
max_power_right = -1e99
for k in range(len(xpoints_right)):
max_power_right = max(max_power_right,onlyPower(n,xpoints_right[k],fn))
integral_mid = 0
dx_mid = (x_right-x_left)/num_mid
xpoints_mid = np.arange(x_left+dx_mid/2,x_right,dx_mid)
max_power_mid = -1e99
for k in range(len(xpoints_mid)):
max_power_mid = max(max_power_mid,onlyPower(n,xpoints_mid[k],fn))
if max_power_left > max_power_mid:
x_left = 0
integral_left = 0
else:
integral_left += integrate.midpoint(lambda x: integrand_ID2(n,max_power_left,x),0,x_left,num_left)*n
if max_power_right > max_power_mid:
x_right = maxx
integral_right = 0
else:
integral_right += integrate.midpoint(lambda x: integrand_ID2(n,max_power_right,x),x_right,maxx,num_right)*n
if x_left != 0 and x_right != maxx:
integral_mid += integrate.midpoint(lambda x: integrand_ID2(n,max_power_mid,x),x_left,x_right,num_mid)*n
else:
dx_mid = (x_right-x_left)/num_mid
xpoints_mid = np.arange(x_left+dx_mid/2,x_right,dx_mid)
max_power_mid = -1e99
for k in range(len(xpoints_mid)):
max_power_mid = max(max_power_mid,onlyPower(n,xpoints_mid[k],fn))
integral_mid += integrate.midpoint(lambda x: integrand_ID2(n,max_power_mid,x),x_left,x_right,num_mid)*n
if integral_left == 0 and integral_right == 0:
print "TTm=%.2E \r"%(max_power_mid),
return np.log(integral_mid)+max_power_mid
if integral_left != 0 and integral_right == 0:
print "FTlm=%.2E \r"%(max_power_left-max_power_mid),
return np.log(integral_left*np.exp(max_power_left-max_power_mid)+integral_mid)+max_power_mid
if integral_left == 0 and integral_right != 0:
print "TFrm=%.2E \r"%(max_power_right-max_power_mid),
return np.log(integral_right*np.exp(max_power_right-max_power_mid)+integral_mid)+max_power_mid
print "FFlm=%.2E FFrm=%.2E \r"%(max_power_left-max_power_mid,max_power_right-max_power_mid),
return np.log(integral_left*np.exp(max_power_left-max_power_mid)+integral_right*np.exp(max_power_right-max_power_mid)+integral_mid)+max_power_mid
def onlyPower(n,x,i):
return (-RG.VD(i)/k_B/temp*(fbarD(temp,n,x,i) + ubarD(temp,n,x,i)))
##integrandIDlistn = []
##integrandIDlistx = []
##integrandIDlistarg = []
def integrand_ID2(n,maxpower,x):
argument = np.exp(-maxpower-RG.VD(fn)/k_B/temp*(fbarD(temp,n,x,fn) + ubarD(temp,n,x,fn)))
## integrandIDlistn.append(n)
## integrandIDlistx.append(x)
## integrandIDlistarg.append(argument)
return argument
def fbarD(T,n,x,i):
iplusx = f01_ext(n*(1+x))
iminusx = f01_ext(n*(1-x))
nochangex = f01_ext(n)
return (iplusx + iminusx)/2 - nochangex
# Averge scaled potential
# eqn (11), Forte 2011
def ubarD(T,n,x,i):
return (RG.u(temp,n*(1+x),0,i) + RG.u(temp,n*(1-x),0,i))/2 - RG.u(temp,n,0,i)
def f01_ext(numdensity):
if numdensity > 0.0008 and numdensity < max_fillingfraction_handled/sphere_volume:
return f01interp(numdensity)
return RG.fiterative(temp,numdensity,0)
def onlyPowerStar(n,x,i):
return (-RG.VD(i)/k_B/temp*fbarD(temp,n,x,i))
def ID2star(n, maxx):
maxpower = -1e99
dx = maxx/num_init
xpoints = np.arange(dx/2,maxx, dx)
kmax=0
for k in range(len(xpoints)):
if maxpower<onlyPowerStar(n,xpoints[k],fn):
maxpower = onlyPowerStar(n,xpoints[k],fn)
kmax=k
#########FIND LEFT SIDE
x_left = xpoints[kmax]
k_left = kmax
while k_left>0 and integrand_ID2star(n,maxpower,x_left)>0.1:
k_left -= 1
x_left = xpoints[k_left]
#########FIND RIGHT SIDE
x_right = xpoints[kmax]
k_right = kmax
while k_right<len(xpoints)-1 and integrand_ID2star(n,maxpower,x_right)>0.1:
k_right += 1
x_right = xpoints[k_right]
integral_left = 0
dx_left = x_left/num_left
xpoints_left = np.arange(dx_left/2,x_left,dx_left)
max_power_left = -1e99
for k in range(len(xpoints_left)):
max_power_left = max(max_power_left,onlyPowerStar(n,xpoints_left[k],fn))
integral_right = 0
dx_right = (maxx-x_right)/num_right
xpoints_right = np.arange(x_right+dx_right/2,maxx,dx_right)
max_power_right = -1e99
for k in range(len(xpoints_right)):
max_power_right = max(max_power_right,onlyPowerStar(n,xpoints_right[k],fn))
integral_mid = 0
dx_mid = (x_right-x_left)/num_mid
xpoints_mid = np.arange(x_left+dx_mid/2,x_right,dx_mid)
max_power_mid = -1e99
for k in range(len(xpoints_mid)):
max_power_mid = max(max_power_mid,onlyPowerStar(n,xpoints_mid[k],fn))
if max_power_left > max_power_mid:
x_left = 0
integral_left = 0
else:
integral_left += integrate.midpoint(lambda x: integrand_ID2star(n,max_power_left,x),0,x_left,num_left)*n
if max_power_right > max_power_mid:
x_right = maxx
integral_right = 0
else:
integral_right += integrate.midpoint(lambda x: integrand_ID2star(n,max_power_right,x),x_right,maxx,num_right)*n
if x_left != 0 and x_right != maxx:
integral_mid += integrate.midpoint(lambda x: integrand_ID2star(n,max_power_mid,x),x_left,x_right,num_mid)*n
else:
dx_mid = (x_right-x_left)/num_mid
xpoints_mid = np.arange(x_left+dx_mid/2,x_right,dx_mid)
max_power_mid = -1e99
for k in range(len(xpoints_mid)):
max_power_mid = max(max_power_mid,onlyPowerStar(n,xpoints_mid[k],fn))
integral_mid += integrate.midpoint(lambda x: integrand_ID2star(n,max_power_mid,x),x_left,x_right,num_mid)*n
if integral_left == 0 and integral_right == 0:
#print "TTm=%.2E \r"%(max_power_mid),
return np.log(integral_mid)+max_power_mid
if integral_left != 0 and integral_right == 0:
#print "FTlm=%.2E \r"%(max_power_left-max_power_mid),
return np.log(integral_left*np.exp(max_power_left-max_power_mid)+integral_mid)+max_power_mid
if integral_left == 0 and integral_right != 0:
#print "TFrm=%.2E \r"%(max_power_right-max_power_mid),
return np.log(integral_right*np.exp(max_power_right-max_power_mid)+integral_mid)+max_power_mid
#print "FFlm=%.2E FFrm=%.2E \r"%(max_power_left-max_power_mid,max_power_right-max_power_mid),
return np.log(integral_left*np.exp(max_power_left-max_power_mid)+integral_right*np.exp(max_power_right-max_power_mid)+integral_mid)+max_power_mid
def integrand_ID2star(n,maxpower,x):
argument = np.exp(-maxpower-RG.VD(fn)/k_B/temp*fbarD(temp,n,x,fn))
return argument
if fn == 1:
firstPass()
print('First pass done')
exit(0)
laterPass()
##plt.figure()
##plt.title('integrand vs x')
##plt.ylabel('integrand')
##plt.xlabel('x')
##for i in range(len(integrand_xs)):
## plt.plot(integrand_xs[i], integrand_args[i])
##
##plt.show()
# #
# #
######################################## END PLOTTING STUFF ###################################
| gpl-2.0 |
ajdawson/eof2 | examples/cdms2/sst_example.py | 1 | 2059 | """
Compute and plot the leading EOF of sea surface temperature in the
central and northern Pacific during winter time.
The spatial pattern of this EOF is the canonical El Nino pattern, and
the associated time series shows large peaks and troughs for well-known
El Nino and La Nina events.
This example uses the metadata-retaining cdms2 interface.
"""
import cdms2
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import numpy as np
from eof2 import Eof
# Read SST anomalies using the cdms2 module from CDAT. The file contains
# November-March averages of SST anomaly in the central and northern Pacific.
ncin = cdms2.open('../../example_data/sst_ndjfm_anom.nc')
sst = ncin('sst')
ncin.close()
# Create an EOF solver to do the EOF analysis. Square-root of cosine of
# latitude weights are applied before the computation of EOFs.
solver = Eof(sst, weights='coslat')
# Retrieve the leading EOF, expressed as the correlation between the leading
# PC time series and the input SST anomalies at each grid point, and the
# leading PC time series itself.
eof1 = solver.eofsAsCorrelation(neofs=1)
pc1 = solver.pcs(npcs=1, pcscaling=1)
# Plot the leading EOF expressed as correlation in the Pacific domain.
m = Basemap(projection='cyl', llcrnrlon=120, llcrnrlat=-20,
urcrnrlon=260, urcrnrlat=60)
lons, lats = eof1.getLongitude()[:], eof1.getLatitude()[:]
x, y = m(*np.meshgrid(lons, lats))
clevs = np.linspace(-1, 1, 11)
m.contourf(x, y, eof1(squeeze=True), clevs, cmap=plt.cm.RdBu_r)
m.drawcoastlines()
m.drawparallels([-20, 0, 20, 40, 60])
m.drawmeridians([120, 140, 160, 180, 200, 220, 240, 260])
cb = plt.colorbar(orientation='horizontal')
cb.set_label('correlation coefficient', fontsize=12)
plt.title('EOF1 expressed as correlation', fontsize=16)
# Plot the leading PC time series.
plt.figure()
years = range(1962, 2012)
plt.plot(years, pc1, color='b', linewidth=2)
plt.axhline(0, color='k')
plt.title('PC1 Time Series')
plt.xlabel('Year')
plt.ylabel('Normalized Units')
plt.xlim(1962, 2012)
plt.ylim(-3, 3)
plt.show()
| gpl-3.0 |
mahidharc/drdo.ids | src/parser.py | 1 | 1063 | from numpy import *
import matplotlib.pyplot as plt; plt.rcdefaults()
import matplotlib.pyplot as plt
def parse(filename):
"""
This module read from the input file. It also groups packets into the 6 groups and plots the packet count graph
"""
#Open the file and read the data
flag = [line.strip() for line in open(filename)]
#Intialise the counters
flags = []
packetCount =zeros(6)
for i in range(len(flag)):
flags.append(int(flag[i],0))
flagArray = array([])
type1 = 4
#Classigy the packets into groups
for i in flags:
if i&type1 == True:
flagArray = append(flagArray,1)
packetCount[0] += 1
elif i == 2:
flagArray = append(flagArray,2)
packetCount[1] += 1
elif i == 16:
flagArray = append(flagArray,3)
packetCount[2] += 1
elif i == 17:
flagArray = append(flagArray,4)
packetCount[3] += 1
elif i == 24:
flagArray = append(flagArray,5)
packetCount[4] += 1
else:
flagArray = append(flagArray,6)
packetCount[5] += 1
return flagArray
| mit |
johnchase/q2d2 | q2d2/wui.py | 1 | 7817 | from IPython.html import widgets
from IPython.display import clear_output
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#This is the function that creates the interface
def metadata_controls(df, callback, extras=None):
def filter_category(filters):
newdf = df
for filter_ in filters.children:
key = filter_.children[0].value
value = filter_.children[1].value
newdf = newdf[newdf[key].isin(value)]
return newdf
def create_updater(group):
def group_updater(_, value):
group.options = [str(e) for e in df[value].dropna().unique().tolist()]
return group_updater
def create_category(filters):
def category_widget():
cat = widgets.Dropdown(options=df.columns.tolist())
group = widgets.SelectMultiple(width=130)
group_updater = create_updater(group)
cat.on_trait_change(group_updater, 'value')
group.on_displayed(lambda: group_updater(cat.value))
category = widgets.VBox(children=[cat, group])
filters.children += (category,)
return category_widget
def remove_category(filters):
def remove_cat_widget():
remove = filters.children[-1]
filters.children = filters.children[:-1]
remove.close()
return remove_cat_widget
def onclick(category, filters):
extra_values = []
if extras is not None:
extra_values = [w.value for w in extras.children]
clear_output()
callback(category, filter_category(filters), *extra_values)
filter_add = widgets.Button(description="Add Filter")
filter_rem = widgets.Button(description="Remove Filter")
go = widgets.Button(description="Go!")
plt_cat = widgets.Dropdown(options=df.columns.tolist(), description='Plot')
menu = widgets.HBox(children=[plt_cat, go, filter_add, filter_rem])
filters = widgets.HBox(children=[])
filter_add.on_click(lambda _: create_category(filters)())
filter_rem.on_click(lambda _: remove_category(filters)())
go.on_click(lambda _: onclick(plt_cat.value, filters))
children = [menu, filters]
if extras is not None:
children.append(extras)
return widgets.VBox(children=children)
#These are functions necessary for the alpha diversity plots
def get_df_intersection(df1, df2):
intersect_ids = set.intersection(set(df1.index.tolist()), set(df2.index.tolist()))
df1 = df1.loc[intersect_ids, ]
df2 = df2.loc[intersect_ids, ]
return df1, df2
def merge_metadata_alpha_div(metadata, alpha_div):
metadata, alpha_div = get_df_intersection(metadata, alpha_div)
#return pd.concat([metadata, alpha_div], axis=1)
metadata['Alpha diversity'] = alpha_div
return metadata
def plot_alpha(metadata, category, hue, metric):
import seaborn as sns
with plt.rc_context(dict(sns.axes_style("darkgrid"),
**sns.plotting_context("notebook", font_scale=2))):
width = len(metadata[category].unique())
plt.figure(figsize=(width*4, 8))
fig = sns.boxplot(x=category, y='Alpha diversity',
data=metadata.sort(category), hue=hue, palette='cubehelix')
fig.set_title(metric)
def plot_alpha_diversity(metadata, alpha_div, category, hue=None, metric=None):
metadata_alpha_div = merge_metadata_alpha_div(metadata, alpha_div)
plot_alpha(metadata_alpha_div, category, hue, metric)
def interactive_plot_alpha_diversity(metadata, alpha_divs):
def on_update(category, metadata, Hue, check, metric):
alpha_diversity = alpha_divs[metric]
if not check:
Hue = None
plot_alpha_diversity(metadata, alpha_diversity, category, Hue, metric)
check = widgets.Checkbox(Description='Plot Hue', Value=True)
plt_hue = widgets.Dropdown(options=metadata.columns.tolist(), description='Hue')
metric_but = widgets.Dropdown(options=list(alpha_divs.keys()), description='Metrics')
extras = widgets.HBox(children=[plt_hue, check, metric_but])
return metadata_controls(metadata, on_update, extras)
###########Functions for the taxa sumamry plots#####################
def get_taxa_counts(metadata_df, otu_df, category):
cols = metadata_df[category].dropna().unique()
indices = otu_df.index.tolist()
taxa_counts_df = pd.DataFrame(index=indices, columns=cols)
for col in cols:
id_s = metadata_df[metadata_df[category] == col].index
otu_sums = otu_df[id_s].sum(axis=1)
taxa_counts_df.loc[otu_sums.index, col] = otu_sums
return taxa_counts_df
def normalize(df):
for col in df.columns:
normalized_col = df[col]/df[col].sum()
df[col] = normalized_col
return df
#plotting functions
def plot_stacked_bar(df):
df = df.sort(axis=1)
import seaborn as sns
with plt.rc_context(dict(sns.axes_style("darkgrid"),
**sns.plotting_context("notebook", font_scale=1.8))):
f, ax = plt.subplots(1, figsize=(10, 10))
x = list(range(len(df.columns)))
bottom = np.array([0] * len(df.columns))
cat_percents = []
for id_ in df.index:
color = '#' + ''.join(np.random.choice(list('ABCDEF123456789'), 6))
ax.bar(x, df.loc[id_], color=color, bottom=bottom, align='center')
bottom = df.loc[id_] + bottom
cat_percents.append(''.join(["[{0:.2f}] ".format(x) for x in df.loc[id_].tolist()]))
legend_labels = [' '.join(e) for e in zip(cat_percents, df.index.tolist())]
ax.set_xticks(x)
ax.set_xticklabels(df.columns.tolist())
ax.set_ylim([0, 1])
ax.legend(legend_labels, loc='center left', bbox_to_anchor=(1, 0.5))
def filter_by_dic(df, dic):
for key, value in dic.items():
df = df[df[key].isin(value)]
return df
def plot_taxa_summary(otu_table, metadata, taxonomy, category, level='Phylum', min_percent=1):
otu_counts = get_taxa_counts(metadata, otu_table, category)
normalized_otus = normalize(otu_counts)
normalized_otus = normalized_otus[normalized_otus.sum(axis=1) >= min_percent/100]
normalized_otus = normalize(normalized_otus)
normalized_taxa = pd.concat([normalized_otus, taxonomy], axis=1)
normalized_taxa = normalized_taxa.groupby(level).sum()
normalized_taxa = normalized_taxa.dropna()
plot_stacked_bar(normalized_taxa)
def interactive_plot_taxa_summary(metadata, otu_df, taxa_df):
shared_sample_ids = set.intersection(set(metadata.index.tolist()), set(otu_df.columns.tolist()))
shared_otu_ids = set.intersection(set(taxa_df.index.tolist()), set(otu_df.index.tolist()))
if len(shared_otu_ids) == 0:
raise ValueError("There are no OTU ids in common between your OTU table and your "
"OTU metadata file.")
metadata = metadata.loc[shared_sample_ids, ]
otu_df = otu_df[list(shared_sample_ids)]
for index, level in enumerate(['Kingdom', 'Phylum', 'Class', 'Order', 'Family', 'Genus']):
taxa_df[level] = taxa_df['taxonomy'].apply(lambda x: ' '.join(x.split(' ')[:index + 1]))
def on_update(category, metadata, level, min_percent):
plot_taxa_summary(otu_df, metadata, taxa_df, category, level, min_percent=min_percent)
plt_level = widgets.Dropdown(options=['Kingdom', 'Phylum', 'Class', 'Order', 'Family', 'Genus', 'Species'],
description='Level')
min_percent = widgets.BoundedFloatText(width=60,
value=1,
min=0.0,
max=100.0,
description='Min Percent:')
extras = widgets.HBox(children=[plt_level, min_percent])
return metadata_controls(metadata, on_update, extras) | bsd-3-clause |
dsquareindia/scikit-learn | sklearn/mixture/gmm.py | 19 | 32365 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
# Important note for the deprecation cleaning of 0.20 :
# All the functions and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
# - 'sklearn/mixture/test_gmm.py'
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array, deprecated
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
@deprecated("The function log_multivariate_normal_density is deprecated in 0.18"
" and will be removed in 0.20.")
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
@deprecated("The function sample_gaussian is deprecated in 0.18"
" and will be removed in 0.20."
" Use numpy.random.multivariate_normal instead.")
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array
Randomly generated sample. The shape depends on `n_samples`:
(n_features,) if `1`
(n_features, n_samples) otherwise
"""
_sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None)
def _sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class _GMMBase(BaseEstimator):
"""Gaussian Mixture Model.
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state : RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. The best results is kept.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the Dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) # doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) # doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
self.n_components = n_components
self.covariance_type = covariance_type
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance."""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = _sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: Due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate.
.. versionadded:: 0.17
*fit_predict* method in Gaussian Mixture Model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2,
estimator=self)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < self.tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
"""Perform the Mstep of the EM algorithm and return the cluster weights.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic : float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic : float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
@deprecated("The class GMM is deprecated in 0.18 and will be "
" removed in 0.20. Use class GaussianMixture instead.")
class GMM(_GMMBase):
"""
Legacy Gaussian Mixture Model
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.GaussianMixture` instead.
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
super(GMM, self).__init__(
n_components=n_components, covariance_type=covariance_type,
random_state=random_state, tol=tol, min_covar=min_covar,
n_iter=n_iter, n_init=n_init, params=params,
init_params=init_params, verbose=verbose)
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model."""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model."""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if cv.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model."""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values."""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
@deprecated("The functon distribute_covar_matrix_to_match_covariance_type"
"is deprecated in 0.18 and will be removed in 0.20.")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template."""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for diagonal cases."""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Perform the covariance M step for spherical cases."""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for full cases."""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for tied cases."""
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
yuealves/machine-learning-toolkits | similarity-based method/rbf.py | 1 | 2024 | # -*- coding: utf-8 -*-
# Created in: Tue Dec 8 15:16:18 2015
# implementation for radius basis function(both nonparametric rbf
# and parametric rbf)
__author__ = "Linwei Li"
import numpy as np
import matplotlib.pyplot as plt
def gauss_kernel(r):
return np.exp(-(r**2)/2)
def non_param_rbf(X_train, Y_train, kernel=gauss_kernel, radius=1):
def f(x):
alpha = (X_train - x)
alpha = np.array([np.linalg.norm(x) for x in alpha]) / radius
alpha = kernel(alpha)
alpha /= alpha.sum()
return Y_train.dot(alpha)
return f
def param_rbf(X_train, Y_train, kernel=gauss_kernel, radius=1):
def transform2z(x):
"""transform features: x(d,1) to z(n,1)"""
z = (X_train - x)
z = np.array([np.linalg.norm(x) for x in z]) / radius
z = kernel(z)
return z
def f(x):
Z = np.array([transform2z(x) for x in X_train])
w = np.linalg.pinv(Z).dot(Y_train)
return w.dot(transform2z(x))
return f
if __name__ == '__main__':
X_train = np.array([2,3,4,5])
Y_train = np.array([6,10,8,5])
r_range = (0.1, 0.4, 1)
f_non_param_rbf = [non_param_rbf(X_train, Y_train, radius=r) for r in r_range]
f_param_rbf = [param_rbf(X_train, Y_train, radius=r) for r in r_range]
X = np.arange(10, step=0.05)
Y_non_param_rbf = np.array([np.array([f(x) for x in X]) for f in f_non_param_rbf])
Y_param_rbf = np.array([np.array([f(x) for x in X]) for f in f_param_rbf])
fig, axs = plt.subplots(2, 3, sharey=True)
for i, r in enumerate(r_range):
axs[0][i].set_title('r = %.2f' % r)
axs[0][i].plot(X, Y_non_param_rbf[i])
axs[0][i].scatter(X_train, Y_train)
# rightPos = axs[0][1].get_position()
# fig.text(rightPos.xmax, (rightPos.ymin+rightPos.ymax)/2, 'nonparametric rbf')
for i, r in enumerate(r_range):
axs[1][i].set_title('r = %.2f' % r)
axs[1][i].plot(X, Y_param_rbf[i])
axs[1][i].scatter(X_train, Y_train)
plt.show()
| mit |
olologin/scikit-learn | sklearn/feature_extraction/hashing.py | 74 | 6153 | # Author: Lars Buitinck
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional, default np.float64
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional, default "dict"
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
JohannesUIBK/oggm | deployment/fabfile.py | 2 | 41370 |
#---------written by Felix Oesterle (FSO)-----------------
#-DESCRIPTION:
# This is based on fabfile from Raincloud Project (simplified)
#
#-Last modified: Thu Jul 09, 2015 13:10
#@author Felix Oesterle
#-----------------------------------------------------------
from __future__ import with_statement, print_function
from fabric.api import *
import boto.ec2
from boto.vpc import VPCConnection
from boto.ec2.blockdevicemapping import EBSBlockDeviceType, BlockDeviceMapping
import os
import time
import sys
import socket
import datetime
import math
from collections import defaultdict
#-----------------------------------------------------------
# SHORT DOCU
#-----------------------------------------------------------
# -------- SETUP BOTO and Fabric-----------------
# Virtualenv/Generic pip:
# pip install boto fabric
#
# Conda:
# conda install boto fabric
#
# Debian/Ubuntu:
# apt-get install fabric python-boto
#
# install all other missing modules from list above (start eg. ipython an copy all imports above and see what's missing;
# all modules should be available via pip or easy_install)
#
# Create credentials file: ~/.boto and fill it with the info given by admin (most likely Ben)
# (replace XXXX with what you want to use in fabfile)
#
# [profile XXXX]
# aws_access_key_id = YOUR Access Key ID HERE
# aws_secret_access_key = YOUR Secret Access Key HERE
#
# If you don't want to be prompted to accept ssh keys with every new instance, place these lines into the ~/.ssh/config file:
#
# Host *amazonaws.com
# User root
# StrictHostKeyChecking no
# UserKnownHostsFile /dev/null
#
#
# ------------RUNNING-------------
# look at fabfile.py
#
# to list all possible task of fabfile:
# fab -l
#
# A few first steps:
# 1. Go through setup below and adjust at least: ec2Profile, def_logfile
# 2. Create instance with
# fab cloud_make
# If you are using spot instances and require your instances to be in the same region
# fab instance_start
# This will use the region configured in def_default_avz.
# 3. Takes between 5 - 10 minutes (if still using spot as def_default_requesttype)
# 4. Use
# fab install_node_software
# to setup a virtualenv ready for OGGM.
#
# If you already setup a virtualenv on your user volume
# fab install_node_apt
# to install only required system components.
# 5. Use
# fab connect
# to ssh into instance
# 6. play around with the instance, install software etc
# 7. look at current costs with
# fab calc_approx_costs_running
# or list all instances with
# fab cloud_list
# 8. Once you have enough, shut down your instance via
# fab terminate_one
# Or terminate all running instances if you are sure they all belong to you
# fab cloud_terminate
# you can also delete volumes with:
# fab terminate_perm_user_vol:name='your_volume_name'
#-----------------------------------------------------------
# SETUP
#-----------------------------------------------------------
env.disable_known_hosts=True
env.user = 'ubuntu'
# FSO--- default name used in tags and instance names:
# set this eg. to your name
def_cn = 'AWS'
# Change to a string identifying yourself
user_identifier = None
# FSO--- ssh and credentials setup
# FSO---the name of the amazon keypair (will be created if it does not exist)
keyn=(user_identifier or 'None') + '_oggm'
# FSO--- the same name as you used in boto setup XXXX (see Readme)
ec2Profile = 'OGGM'
def_key_dir=os.path.expanduser('~/.ssh')
# FSO--- Amazon AWS region setup
def_regions = ['us-east-1','eu-west-1'] #regions for spot search
def_default_avz = 'eu-west-1a' #Default availability zone if ondemand is used
# FSO--- type of instance pricing, either:
# ondemand: faster availability, more expensive
# spot: cheaper, takes longer to start up, might be shutdown without warning
def_default_requesttype = 'spot'
# def_default_requesttype = 'ondemand'
# FSO--- the AMI to use
def_ami = dict()
def_ami['eu-west-1'] = 'ami-c32610a5' #eu Ubuntu 16.04 LTS oggm-base
def_ami['us-east-1'] = 'ami-9f3e9689' #us Ubuntu 16.04 LTS oggm-base
# Subnet to use per AVZ, expects a tuple (vpc-id, subnet-id)
def_subnet = dict()
def_subnet['eu-west-1a'] = ('vpc-61f04204', 'subnet-306ff847')
def_subnet['eu-west-1b'] = ('vpc-61f04204', 'subnet-6ad17933')
def_subnet['us-west-1c'] = ('vpc-61f04204', 'subnet-2e2f414b')
# Size of the rootfs of created instances
rootfs_size_gb = 50
# Name and size of the persistent /work file system
home_volume_ebs_name = "ebs_" + (user_identifier or 'None') # Set to None to disable home volume
new_homefs_size_gb = 50 # GiB, only applies to newly created volumes
# FSO---log file with timestamps to analyse cloud performance
# look at it with tail -f cloudexecution.log
def_logfile = os.path.expanduser('~/cloudexecution.log')
# Default instance type, index into instance_infos array below
def_inst_type = 1
#-----------------------------------------------------------
# SETUP END
#-----------------------------------------------------------
fabfile_dir = os.path.dirname(os.path.abspath(__file__))
if user_identifier is None:
raise RuntimeError('user identifier must be set')
instance_infos = [
{
'type': 't2.micro',
'vcpus': 1,
'price': 0.014,
},
{
'type': 'm4.xlarge',
'vcpus': 4,
'price': 0.264,
},
{
'type': 'c4.2xlarge',
'vcpus': 8,
'price': 0.477,
},
{
'type': 'c4.8xlarge',
'vcpus': 36,
'price': 1.906,
},
]
def_price = instance_infos[def_inst_type]['price']
def update_key_filename(region):
key_name = get_keypair_name(region)
key_dir = os.path.expanduser(def_key_dir)
key_dir = os.path.expandvars(key_dir)
env.key_filename = os.path.join(key_dir, key_name + '.pem')
print('Current key filename: %s' % env.key_filename)
def find_inst_info(inst_type):
for info in instance_infos:
if info['type'] == inst_type:
return info
return None
@task
def cloud_make(cn=def_cn):
"""
Start and prepare instance -THIS IS THE MAIN ACTIVITY-
"""
# t = time.time()
log_with_ts("fabric started ------------------------------")
log_with_ts("Instance: " + instance_infos[def_inst_type]['type'] + "(" + str(instance_infos[def_inst_type]['vcpus']) + " CPUs)")
# FSO---set best avz
if def_default_requesttype == 'spot':
best_avz,request_type = get_cheapest_availability_zone(def_price)
else:
best_avz = def_default_avz
request_type = 'ondemand'
print(best_avz, request_type)
log_with_ts('avz: ' + best_avz)
log_with_ts('request_type: ' + request_type)
# FSO--- start instances
instance_start(cn=cn,avz=best_avz,rt=request_type)
print("Done setting up instance")
log_with_ts("instance ready")
# t_init = time.time()
# # FSO--- run workflow and get cost of nodes back
# this is an example, adjust as needed
# tf = run_workflow(cn=cn,avz=best_avz)
# # FSO--- get costs of and log
# costs = calc_approx_costs_running(cn=cn')
# log_with_ts('Ondemand costs: '+str(costs['ondemand'])+'USD')
# log_with_ts('Actual costs: '+str(costs['running'])+'USD')
# # FSO--- terminate instances
# uncomment if you want to terminate your instances automatically
# cloud_terminate(cn=cn)
# log_with_ts("all instances terminated")
# t_end = time.time()
# print "Time needed for init (min)", (t_init - t)/60.
# print "Time needed for workflow and terminate", (t_end - t_init)/60.
# log_with_ts("fabric end")
@task
def list_ubuntu_amis(regions=def_regions):
"""
List all available ubuntu 14.04 AMIs in all configured regions
"""
for region in regions:
print("Region:", region)
cloud = boto.ec2.connect_to_region(region,profile_name=ec2Profile)
imgs = cloud.get_all_images(owners=['099720109477'], filters={'architecture': 'x86_64', 'name': 'ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-*'})
for img in sorted(imgs, key=lambda v: v.name):
print(img.id,':',img.name)
print()
@task
def instance_start(cn=def_cn,
avz=def_default_avz,
rt=def_default_requesttype):
"""
Start and prepare instances
"""
# FSO---find already existing nodes
cloud = boto.ec2.connect_to_region(avz[:-1],profile_name=ec2Profile)
filters = {'tag:type': cn+'node'}
insta = cloud.get_all_instances(filters=filters)
# FSO---install each new node
print("Requesting new instance")
log_with_ts("Requesting new instance")
nodenumber = len(insta) + 1
node_install(cn=cn, avz=avz, rt=rt, idn=nodenumber)
log_with_ts('Finished installing instance')
cloud_list()
def print_instance(inst):
if inst.state != 'terminated':
cu_time = datetime.datetime.utcnow()
it = datetime.datetime.strptime(inst.launch_time,'%Y-%m-%dT%H:%M:%S.000Z')
else:
try:
cu_time = datetime.datetime.strptime(inst.tags.get('terminate_time'),'%Y-%m-%dT%H:%M:%S.%f')
except:
cu_time = datetime.datetime.utcnow()
it = datetime.datetime.strptime(inst.launch_time,'%Y-%m-%dT%H:%M:%S.000Z')
time_taken = cu_time - it
hours, rest = divmod(time_taken.total_seconds(),3600)
minutes, seconds = divmod(rest, 60)
print(inst.id, inst.instance_type, \
inst.tags.get('Name'), \
inst.tags.get('type'), \
inst.state, \
inst.dns_name, \
inst.private_ip_address, \
inst.private_dns_name, \
inst.tags.get('current_price'), \
inst.tags.get('billable_hours'), \
inst.tags.get('terminate_time'), \
inst.placement, \
'Subnet:%s' % inst.subnet_id, \
'Owner:%s' % inst.tags.get('node-owner'))
print("running for: ", hours,'h', minutes, "min")
def print_volume(vol):
info = ""
if 'vol-lifetime' in vol.tags:
info += '\tLifetime: ' + vol.tags['vol-lifetime']
if 'vol-user-name' in vol.tags:
info += '\tUservolume Name: ' + vol.tags['vol-user-name']
if 'vol-owner' in vol.tags:
info += '\tOwner: ' + vol.tags['vol-owner']
print(vol.id, "\t", vol.zone, "\t", vol.status, '\t', vol.size, info)
@task
def cloud_list(cn=def_cn,itype='all',regions=def_regions):
"""
List all ec2 instances.
"""
for region in regions:
cloud = boto.ec2.connect_to_region(region,profile_name=ec2Profile)
instances = cloud.get_all_instances()
vols = cloud.get_all_volumes()
print()
print("-------CURRENT RUNNING-----------")
print(" REGION:", region)
print()
print("Instances:")
print()
update_costs(cn=cn,regions=regions,itype=itype)
for reservation in instances:
for inst in reservation.instances:
print_instance(inst)
print()
print()
print("Volumes:")
print()
for vol in vols:
print_volume(vol)
def check_keypair(cloud, keynames):
# Check to see if specified keypair already exists.
# If we get an InvalidKeyPair.NotFound error back from EC2,
# it means that it doesn't exist and we need to create it.
key_dir = def_key_dir
try:
cloud.get_all_key_pairs(keynames=[keynames])[0]
except cloud.ResponseError as e:
if e.code == 'InvalidKeyPair.NotFound':
print('Creating keypair: %s' % keynames)
# Create an SSH key to use when logging into instances.
key = cloud.create_key_pair(keynames)
# Make sure the specified key_dir actually exists.
# If not, create it.
key_dir = os.path.expanduser(key_dir)
key_dir = os.path.expandvars(key_dir)
# if not os.path.isdir(key_dir):
# os.mkdir(key_dir, 0700)
#
# AWS will store the public key but the private key is
# generated and returned and needs to be stored locally.
# The save method will also chmod the file to protect
# your private key.
key.save(key_dir)
else:
raise
def get_keypair_name(region):
key_dir = def_key_dir
key_dir = os.path.expanduser(key_dir)
key_dir = os.path.expandvars(key_dir)
un_file = os.path.join(key_dir, '%s_unique.txt' % keyn)
if os.path.exists(un_file):
with open(un_file, 'r') as un:
unique_part = un.read().strip()
else:
import uuid
unique_part = str(uuid.uuid4().get_hex().upper()[0:8])
with open(un_file, 'w') as un:
un.write(unique_part)
return keyn + '_' + region + '_' + unique_part
def get_user_persist_ebs(cloud, avz):
if home_volume_ebs_name is None:
return None
vols = cloud.get_all_volumes(filters={'tag:vol-user-name':home_volume_ebs_name, 'availability-zone': avz})
if len(vols) == 0:
print("Creating new EBS volume for user volume %s" % home_volume_ebs_name)
vol = cloud.create_volume(new_homefs_size_gb, avz)
vol.add_tag('vol-user-name', home_volume_ebs_name)
vol.add_tag('vol-lifetime', 'perm')
vol.add_tag('vol-owner', user_identifier)
else:
vol = vols[0]
print("Found existing volume %s for user volume %s!" % (vol.id, home_volume_ebs_name))
if vol.status != 'available':
print("But it's not available...")
return None
return vol
@task
def node_install(cn=def_cn,inst_type_idx=def_inst_type,idn=0,
avz=def_default_avz,rt=def_default_requesttype,
group_name='oggmssh',
ssh_port=22,
cidr='0.0.0.0/0'):
"""
Request and prepare single instance
"""
# FSO---connect
cloud = boto.ec2.connect_to_region(avz[:-1],profile_name=ec2Profile)
aminfo = cloud.get_image(def_ami[avz[:-1]])
vpcconn = VPCConnection(region=cloud.region, profile_name=ec2Profile)
try:
vpc_id, subnet_id = def_subnet[avz]
vpc = vpcconn.get_all_vpcs(vpc_ids=[vpc_id])[0]
except:
vpc_id = None
subnet_id = None
vpc = None
# FSO---check if node with same name already exists
if node_exists(cn + '_node' + str(idn)):
print("Node already exists")
sys.exit()
# Check if ssh keypair exists
key_name = get_keypair_name(avz[:-1])
check_keypair(cloud, key_name)
# FSO---create a bigger root device
dev_sda1 = EBSBlockDeviceType()
dev_sda1.size = rootfs_size_gb
dev_sda1.delete_on_termination = True
bdm = BlockDeviceMapping()
bdm['/dev/sda1'] = dev_sda1
dev_sdf_vol = get_user_persist_ebs(cloud, avz)
# Check to see if specified security group already exists.
# If we get an InvalidGroup.NotFound error back from EC2,
# it means that it doesn't exist and we need to create it.
try:
group = cloud.get_all_security_groups(groupnames=[group_name])[0]
except cloud.ResponseError as e:
if e.code == 'InvalidGroup.NotFound':
print('Creating Security Group: %s' % group_name)
# Create a security group to control access to instance via SSH.
group = cloud.create_security_group(group_name, 'A group that allows SSH access')
else:
raise
# Authorize all Intra-VPC traffic
if vpc is not None:
try:
group.authorize('-1', -1, -1, vpc.cidr_block)
except cloud.ResponseError as e:
if e.code != 'InvalidPermission.Duplicate':
raise
# Add a rule to the security group to authorize SSH traffic
# on the specified port.
try:
group.authorize('tcp', ssh_port, ssh_port, cidr)
except cloud.ResponseError as e:
if e.code == 'InvalidPermission.Duplicate':
print('Security Group: %s already authorized' % group_name)
else:
raise
log_with_ts("request node "+str(idn))
print('Reserving instance for node', aminfo.id, instance_infos[inst_type_idx]['type'], aminfo.name, aminfo.region)
if rt == 'spot':
print("placing node in ",avz)
requests = cloud.request_spot_instances(def_price,
def_ami[avz[:-1]],
count=1,
type='one-time',
security_group_ids=[group.id],
key_name=key_name,
placement=avz,
subnet_id=subnet_id,
ebs_optimized=True,
instance_type=instance_infos[inst_type_idx]['type'],
block_device_map=bdm)
req_ids = [request.id for request in requests]
instance_ids = wait_for_fulfillment(cloud,req_ids)
instances = cloud.get_only_instances(instance_ids=instance_ids)
node = instances[0]
log_with_ts("fullfilled spot node "+str(idn))
else:
print("placing node in ",avz)
reservation = cloud.run_instances(image_id=def_ami[avz[:-1]],
key_name=key_name,
placement=avz,
subnet_id=subnet_id,
security_group_ids=[group.id],
ebs_optimized=True,
instance_type=instance_infos[inst_type_idx]['type'],
block_device_map=bdm)
node = reservation.instances[0]
log_with_ts("fullfilled ondemand node "+str(idn))
time.sleep(2)
while not node.update() == 'running':
print('waiting for', cn, 'node', idn, 'to boot...')
time.sleep(5)
log_with_ts("booted node "+str(idn))
if dev_sdf_vol is not None:
cloud.attach_volume(dev_sdf_vol.id, node.id, "/dev/sdf")
node.add_tag('Name', cn+'_node'+str(idn))
node.add_tag('type', cn+'node')
node.add_tag('node-owner', user_identifier)
# FSO---set delete on termination flag to true for ebs block device
node.modify_attribute('blockDeviceMapping', { '/dev/sda1' : True })
# FSO--- test socket connect to ssh service
ssh_test(node)
log_with_ts("reachable node "+str(idn))
update_key_filename(node.region.name)
# Mount potential user volume
if dev_sdf_vol is not None:
use_user_volume(node.dns_name)
log_with_ts("finished node "+str(idn))
@task
def install_node_software(nn=''):
"""
Setup ready-for-use virtualenv for OGGM on instance
"""
inst = select_instance(nn)
install_node_apt('', inst)
install_node_pip('', inst)
run('echo Rebooting... && sleep 1 && sudo shutdown -r now')
@task
def install_node_pip(nn='', inst=None):
"""
Install oggm dependencies via pip
"""
if inst is None:
inst = select_instance(nn)
update_key_filename(inst.region.name)
env.host_string = inst.dns_name
env.user = 'ubuntu'
run("""
export LC_ALL=C &&
source ~/.virtenvrc &&
workon oggm_env &&
pip install --upgrade pip &&
pip install numpy &&
pip install scipy &&
pip install pandas shapely cython &&
pip install matplotlib &&
pip install gdal==1.11.2 --install-option="build_ext" --install-option="--include-dirs=/usr/include/gdal" &&
pip install fiona --install-option="build_ext" --install-option="--include-dirs=/usr/include/gdal" &&
pip install mpi4py &&
pip install pyproj rasterio Pillow geopandas netcdf4 scikit-image configobj joblib xarray boto3 motionless pytest progressbar2 &&
pip install git+https://github.com/fmaussion/salem.git &&
sed -i 's/^backend.*/backend : Agg/' "${WORKON_HOME}"/oggm_env/lib/python?.?/site-packages/matplotlib/mpl-data/matplotlibrc
""", pty=False)
@task
def install_node_apt(nn='', inst=None):
"""
Install required OGGM apt dependencies
"""
if inst is None:
inst = select_instance(nn)
update_key_filename(inst.region.name)
env.host_string = inst.dns_name
env.user = 'ubuntu'
run("""
export LC_ALL=C &&
export DEBIAN_FRONTEND=noninteractive &&
sudo apt-get -y update &&
sudo apt-get -y dist-upgrade &&
sudo apt-get -y install build-essential liblapack-dev gfortran libproj-dev gdal-bin libgdal-dev netcdf-bin ncview python3-netcdf4 tk-dev python3-tk python3-dev python3-numpy-dev ttf-bitstream-vera python3-pip git awscli virtualenvwrapper openmpi-bin libopenmpi-dev
""", pty=False)
copy_files = ['~/.aws/credentials', '~/.aws/config', '~/.screenrc', '~/.gitconfig']
for cf in copy_files:
if not os.path.exists(os.path.expanduser(cf)):
continue
run('mkdir -p %s' % os.path.dirname(cf))
put(cf, cf)
run("""
if [ -e /work/ubuntu ]; then
mkdir -p /work/ubuntu/.pyvirtualenvs
echo '# Virtual environment options' > ~/.virtenvrc
echo 'export WORKON_HOME="/work/ubuntu/.pyvirtualenvs"' >> ~/.virtenvrc
echo 'source /usr/share/virtualenvwrapper/virtualenvwrapper_lazy.sh' >> ~/.virtenvrc
else
mkdir -p ~/.pyvirtualenvs
echo '# Virtual environment options' > ~/.virtenvrc
echo 'export WORKON_HOME="${HOME}/.pyvirtualenvs"' >> ~/.virtenvrc
echo 'source /usr/share/virtualenvwrapper/virtualenvwrapper_lazy.sh' >> ~/.virtenvrc
fi
if ! grep virtenvrc ~/.bashrc; then
echo >> ~/.bashrc
echo 'source ~/.virtenvrc' >> ~/.bashrc
fi
""")
# bashrc is not sourced for non-interactive shells, so source the virtenvrc explicitly
run("""
export LC_ALL=C
source ~/.virtenvrc
if ! [ -d ${WORKON_HOME}/oggm_env ]; then
mkvirtualenv oggm_env -p /usr/bin/python3
fi
""")
@task
def install_node_nfs_master(nn='', inst=None):
"""
Setup the node to act as NFS server, serving /home and /work
"""
if inst is None:
inst = select_instance(nn)
update_key_filename(inst.region.name)
env.host_string = inst.dns_name
env.user = 'ubuntu'
run("""
export LC_ALL=C &&
export DEBIAN_FRONTEND=noninteractive &&
sudo apt-get -y install nfs-kernel-server &&
sudo mkdir -p /work/ubuntu /export/work /export/home &&
sudo chown ubuntu:ubuntu /work/ubuntu &&
echo '/export *(rw,fsid=0,insecure,no_subtree_check,async)' > /tmp/exports &&
echo '/export/work *(rw,nohide,insecure,no_subtree_check,async)' >> /tmp/exports &&
echo '/export/home *(rw,nohide,insecure,no_subtree_check,async)' >> /tmp/exports &&
sudo cp --no-preserve=all /tmp/exports /etc/exports &&
cp /etc/fstab /tmp/fstab &&
echo '/work /export/work none bind 0 0' >> /tmp/fstab &&
echo '/home /export/home none bind 0 0' >> /tmp/fstab &&
sudo cp --no-preserve=all /tmp/fstab /etc/fstab &&
sudo mount /export/work &&
sudo mount /export/home &&
sudo sed -i 's/NEED_SVCGSSD=.*/NEED_SVCGSSD="no"/' /etc/default/nfs-kernel-server &&
sudo service nfs-kernel-server restart &&
echo "%s slots=$(( $(grep '^processor' /proc/cpuinfo | tail -n1 | cut -d ':' -f2 | xargs) + 1 ))" > /work/ubuntu/mpi_hostfile &&
ssh-keygen -t rsa -b 4096 -f ~/.ssh/id_rsa -N "" &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys &&
echo Done
""" % inst.private_ip_address)
@task
def install_node_nfs_client(master_ip, nn='', inst=None):
"""
Setup the node to act as NFS client on the given master_ip.
"""
if inst is None:
inst = select_instance(nn)
update_key_filename(inst.region.name)
env.host_string = inst.dns_name
env.user = 'ubuntu'
run("""
export LC_ALL=C &&
cd / &&
sudo mkdir /work &&
export DEBIAN_FRONTEND=noninteractive &&
sudo apt-get -y install nfs-common &&
cp /etc/fstab /tmp/fstab &&
echo '%s:/work /work nfs4 _netdev,auto 0 0' >> /tmp/fstab
echo '%s:/home /home nfs4 _netdev,auto 0 0' >> /tmp/fstab
sudo cp --no-preserve=all /tmp/fstab /etc/fstab &&
sudo mount /work &&
echo "%s slots=$(( $(grep '^processor' /proc/cpuinfo | tail -n1 | cut -d ':' -f2 | xargs) + 1 ))" >> /work/ubuntu/mpi_hostfile &&
echo Rebooting... && sleep 1 && sudo shutdown -r now
""" % (master_ip, master_ip, inst.private_ip_address))
@task
def terminate_perm_user_vol(name=home_volume_ebs_name,regions=def_regions):
"""
Terminate the permanent user volume
"""
print(regions)
for region in regions:
cloud = boto.ec2.connect_to_region(region, profile_name=ec2Profile)
vols = cloud.get_all_volumes(filters={'tag:vol-user-name':name})
for vol in vols:
if vol.status == 'available':
print(vol.id,"\t", vol.status, "... deleted")
vol.delete()
else:
print(vol.id,"\t", vol.status, "... in use")
@task
def cloud_terminate(cn=def_cn,itype='all',regions=def_regions):
"""
Terminate all instances
"""
print(regions)
for region in regions:
print()
print("-------CURRENT RUNNING-----------")
print(" REGION:",region)
cloud = boto.ec2.connect_to_region(region, profile_name=ec2Profile)
instances = cloud.get_all_instances()
vol = cloud.get_all_volumes()
update_costs(cn=cn,itype=itype)
for reservation in instances:
for inst in reservation.instances:
if inst.state != 'terminated':
if itype == 'all':
print('TERMINATING', inst.tags.get('Name'), inst.dns_name)
inst.add_tag('Name', 'term')
inst.add_tag('type', 'term')
inst.terminate()
stati2 = datetime.datetime.utcnow()
inst.add_tag('terminate_time', stati2.isoformat())
elif itype == 'node' and inst.tags.get('type') == cn+'node':
print('TERMINATING', inst.tags.get('Name'), inst.dns_name)
inst.add_tag('Name', 'term')
inst.add_tag('type', 'term')
inst.terminate()
stati2 = datetime.datetime.utcnow()
inst.add_tag('terminate_time', stati2.isoformat())
elif itype == 'master' and inst.tags.get('type') == cn+'master':
print('TERMINATING', inst.tags.get('Name'), inst.dns_name)
inst.add_tag('Name', 'term')
inst.add_tag('type', 'term')
inst.terminate()
stati2 = datetime.datetime.utcnow()
inst.add_tag('terminate_time', stati2.isoformat())
for unattachedvol in vol:
if 'vol-lifetime' in unattachedvol.tags and unattachedvol.tags['vol-lifetime'] == 'perm':
print(unattachedvol.id,"\t", unattachedvol.status, "... is marked permanent")
elif unattachedvol.status == 'available':
print(unattachedvol.id,"\t", unattachedvol.status, "... deleted")
unattachedvol.delete()
else:
print(unattachedvol.id,"\t", unattachedvol.status, "... not deleted")
def select_instance(nn='', regions=def_regions):
"""
Prompt the user to select an instance
"""
instlist = list()
i = 0
for region in regions:
print()
print("-------CURRENT RUNNING-----------")
print(" REGION: ", region)
print()
cloud = boto.ec2.connect_to_region(region, profile_name=ec2Profile)
reservations = cloud.get_all_instances()
for reserv in reservations:
for inst in reserv.instances:
if inst.state == 'terminated':
continue
print('Instance %s:' % i)
print_instance(inst)
print()
instlist.append(inst)
i += 1
print()
if nn == '' or nn is None:
nn = prompt('Instance index:')
nn = int(nn)
if nn < 0 or nn >= len(instlist):
print('Instance index out of range!')
sys.exit(-1)
return instlist[nn]
def select_volume(nn='', regions=def_regions):
"""
Prompt the user to select a volume
"""
vollist = list()
i = 0
for region in regions:
print()
print("-------CURRENT RUNNING-----------")
print(" REGION: ", region)
print()
cloud = boto.ec2.connect_to_region(region, profile_name=ec2Profile)
vols = cloud.get_all_volumes()
for vol in vols:
print("Volume %s:" % i)
print_volume(vol)
print()
vollist.append(vol)
i += 1
print()
if nn == '' or nn is None:
nn = prompt('Volume index:')
nn = int(nn)
if nn < 0 or nn >= len(vollist):
print('Volume index out of range!')
sys.exit(-1)
return vollist[nn]
@task
def terminate_one(regions=def_regions, nn=''):
"""
Terminate one instance
"""
print('Select instance to terminate:')
print()
inst = select_instance(nn, regions)
inst.add_tag('Name', 'term')
inst.add_tag('type', 'term')
inst.terminate()
stati2 = datetime.datetime.utcnow()
inst.add_tag('terminate_time', stati2.isoformat())
@task
def terminate_volume(regions=def_regions, nn=''):
"""
Terminate one volume
"""
print('Select volume to terminate:')
print()
vol = select_volume(nn, regions)
vol.delete()
@task
def calc_approx_costs_running(cn=def_cn,regions=def_regions,itype ='all'):
"""
calculate compute costs (network or storage not included)
only running instances are considered
From amazon: The instances will be billed at the then-current Spot Price regardless of the actual bid
"""
# FSO---update the price tags for each node
update_costs(cn=cn,regions=regions,itype=itype)
costs = dict()
costs['running'] = 0.0
costs['ondemand'] = 0.0
for region in regions:
cloud = boto.ec2.connect_to_region(region,profile_name=ec2Profile)
instances = cloud.get_all_instances()
print()
print("----------REGION:",region,itype,'-----------')
for reservation in instances:
for inst in reservation.instances:
if inst.state == 'running' and (inst.tags.get('type')==cn+itype or itype=='all'):
hours = float(inst.tags.get('billable_hours'))
cu_price = float(inst.tags.get('current_price'))
cu_ondemand_price = hours * find_inst_info(inst.instance_type)['price']
print()
print(inst.id, inst.instance_type, \
inst.tags.get('Name'), \
inst.dns_name,\
inst.tags.get('current_price')+'USD', \
inst.tags.get('billable_hours')+'h', \
inst.placement)
# print 'Billable hours ',hours
# print 'Current price', cu_price
# print 'Current ondemand price', cu_ondemand_price
costs['ondemand'] += cu_ondemand_price
if inst.spot_instance_request_id is None:
print('ondemand instance')
costs['running'] = cu_ondemand_price
else:
print('spot instance')
costs['running'] += cu_price
print()
print('Total ondemand: ', costs['ondemand'])
print('Total of running: ' , costs['running'])
return costs
@task
def connect(nn='', user='ubuntu'):
"""
SSH to cloud instances
"""
inst = select_instance(nn)
update_key_filename(inst.region.name)
print('ssh', '-i', os.path.expanduser(env.key_filename), '%s@%s' % (user, inst.dns_name))
print('...')
print()
os.execlp('ssh', 'ssh', '-i', os.path.expanduser(env.key_filename), '%s@%s' % (user, inst.dns_name))
def get_cheapest_availability_zone(ondemand_price):
"""
get the cheapest avz and check if below ondemand_price
BEWARE: does not necessarily get the cheapest avz at the moment, but the one with the lowest maxium price
in the last 24 hours. Hopefully that's the most stable price-wise
"""
avz_prices_nodes = defaultdict(list)
for region in def_regions:
cloud = boto.ec2.connect_to_region(region,profile_name=ec2Profile)
stati2 = datetime.datetime.utcnow()
stati1 = stati2 - datetime.timedelta(hours=3)
prices = cloud.get_spot_price_history(
# instance_type=def_itype,
# instance_type=['m1.small','m1.medium'],
start_time=stati1.isoformat(),
end_time= stati2.isoformat(),
product_description='Linux/UNIX')
# FSO---split in availability_zones
for price in prices:
if price.instance_type == instance_infos[def_inst_type]['type']:
avz_prices_nodes[str(price.availability_zone)].append(price)
# FSO---remove us-east-1c as access is constrained
try:
del avz_prices_nodes['us-east-1c']
except:
print( "no us-east-1c")
maxprice_nodes = dict()
for key in avz_prices_nodes:
allpr_nodes = [k.price for k in avz_prices_nodes[key]]
maxprice_nodes[key] = max(allpr_nodes)
best_avz_nodes = min(maxprice_nodes, key=maxprice_nodes.get) # gets just the first if serveral avz's are the same
print("Cheapest nodes: ", best_avz_nodes, maxprice_nodes[best_avz_nodes])
print("Ondemand nodes (EU):", ondemand_price)
if maxprice_nodes[best_avz_nodes] < ondemand_price:
return best_avz_nodes,'spot'
else:
return def_default_avz,'ondemand'
def wait_for_fulfillment(cloud,pending_ids):
"""
Wait for fulfillment of spot instance requests
"""
instances = list()
while not len(pending_ids) == 0:
pending = cloud.get_all_spot_instance_requests(pending_ids)
for request in pending:
if request.status.code == 'fulfilled':
pending_ids.pop(pending_ids.index(request.id))
print("spot request `{}` fulfilled!".format(request.id))
#print request.__dict__
instances.append(request.instance_id)
cloud.cancel_spot_instance_requests(request.id)
elif request.state == 'cancelled':
pending_ids.pop(pending_ids.index(request.id))
print("spot request `{}` cancelled!".format(request.id))
else:
print("waiting on `{}`".format(request.id))
time.sleep(5)
print("all spots fulfilled!")
return instances
def update_costs(cn=def_cn,itype='all',regions=def_regions):
"""
Updates the price tags of all running instances
"""
for region in regions:
cloud = boto.ec2.connect_to_region(region,profile_name=ec2Profile)
instances = cloud.get_all_instances()
for reservation in instances:
for inst in reservation.instances:
total_price = 0.0
if inst.state != 'terminated':
cu_time = datetime.datetime.utcnow()
it = datetime.datetime.strptime(inst.launch_time,'%Y-%m-%dT%H:%M:%S.000Z')
time_taken = cu_time - it
hours = int(math.ceil(time_taken.total_seconds()/3600.))
# FSO---for spot instances
if inst.spot_instance_request_id is not None:
# FSO---loop through hours. spot instances are billed according to the price at each full hour!
for i in range(hours):
price = cloud.get_spot_price_history(instance_type=inst.instance_type,
start_time = it.isoformat(),
end_time= it.isoformat(),
product_description='Linux/UNIX',
availability_zone=inst.placement)
# print "Hour: ", it, "price=",price
it = it + datetime.timedelta(hours=1)
total_price = total_price + price[0].price
# FSO---ondemand instances
else:
total_price = hours * find_inst_info(inst.instance_type)['price']
inst.add_tag('current_price', total_price)
inst.add_tag('billable_hours', hours)
def log_with_ts(logtext="no text given",lf=def_logfile):
"""
Helper function to write logs with timestamps
"""
# logtime = time.time()
# st = datetime.datetime.fromtimestamp(logtime).strftime('%Y-%m-%d %H:%M:%S')
st = str(datetime.datetime.utcnow())
with open(lf, "a+") as myfile:
myfile.writelines('['+st+' UTC] '+ logtext+'\n')
def spot_price(cloud,launch_time,inst_type):
"""
Helper function to get spot price"
"""
prices = dict()
#stati = datetime.datetime.utcnow()
#stati = stati - datetime.timedelta(hours=1)
#print stati
# Get prices for instance, AZ and time range
price = cloud.get_spot_price_history(instance_type=inst_type,
# start_time=stati.isoformat(),
start_time = launch_time,
end_time= launch_time,
product_description='Linux/UNIX',
availability_zone='eu-west-1a')
prices['a'] = price[0].price
price = cloud.get_spot_price_history(instance_type=inst_type,
start_time = launch_time,
end_time= launch_time,
product_description='Linux/UNIX',
availability_zone='eu-west-1b')
prices['b'] = price[0].price
price = cloud.get_spot_price_history(instance_type=inst_type,
start_time = launch_time,
end_time= launch_time,
product_description='Linux/UNIX',
availability_zone='eu-west-1c')
prices['c'] = price[0].price
cloudus = boto.ec2.connect_to_region('us-east-1')
price = cloudus.get_spot_price_history(instance_type=inst_type,
start_time = launch_time,
end_time= launch_time,
product_description='Linux/UNIX',
availability_zone='us-east-1c')
if len(price) > 0:
prices['usc'] = price[0].price
else:
prices['usc'] = 0.0
price = cloudus.get_spot_price_history(instance_type=inst_type,
start_time = launch_time,
end_time= launch_time,
product_description='Linux/UNIX',
availability_zone='us-east-1b')
if len(price) > 0:
prices['usb'] = price[0].price
else:
prices['usb'] = 0.0
#for price in price:
#print price.timestamp, price.price
return prices
def node_find(node,avz=def_default_avz):
"""
Return the instance object of a given node hostname.
"""
cloud = boto.ec2.connect_to_region(avz[:-1])
instances = cloud.get_all_instances()
for reservation in instances:
for inst in reservation.instances:
if inst.tags.get('Name') == node and inst.state == 'running':
print('found', inst.tags.get('Name'), inst.dns_name)
return inst
def node_exists(node,avz=def_default_avz):
"""
checks if node with given name exists
"""
cloud = boto.ec2.connect_to_region(avz[:-1],profile_name=ec2Profile)
instances = cloud.get_all_instances()
for reservation in instances:
for inst in reservation.instances:
if inst.tags.get('Name') == node and inst.state == 'running':
print('found', inst.tags.get('Name'), inst.dns_name)
return True
return False
def enable_root(host):
"""
Enable root access on instance
"""
env.host_string = host
env.user = 'ubuntu'
run("sudo perl -i -pe 's/disable_root: 1/disable_root: 0/' /etc/cloud/cloud.cfg")
run("sudo perl -i -pe 's/#PermitRootLogin .*/PermitRootLogin without-password/' /etc/ssh/sshd_config")
run('sudo cp -f /home/ubuntu/.ssh/authorized_keys /root/.ssh/authorized_keys', shell=True, pty=True)
run("sudo reload ssh")
def use_user_volume(host):
"""
Setup and mount user /work volume
"""
env.host_string = host
env.user = 'ubuntu'
run("test -e /dev/xvdf1 || ( sudo sgdisk -o -g -n 1:2048:0 /dev/xvdf && sudo mkfs.ext4 /dev/xvdf1 )")
run("sudo mkdir /work")
run("sudo mount -o defaults,discard /dev/xvdf1 /work")
run("echo \"/dev/xvdf1 /work ext4 defaults,discard 0 0\" | sudo tee -a /etc/fstab")
run("test -e /work/ubuntu || ( sudo mkdir /work/ubuntu && sudo chown ubuntu:ubuntu /work/ubuntu )")
def ssh_test(inst):
"""
checks for ssh connectability
"""
while True:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(4)
sock.connect((inst.dns_name, 22))
break
except:
print('waiting for ssh daemon...')
time.sleep(5)
finally:
sock.close()
| gpl-3.0 |
ky822/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 49 | 13124 | import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.validation import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 disticnt words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X,
invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X,
invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
assert_allclose(_dirichlet_expectation_1d(x),
np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
| bsd-3-clause |
paulrbrenner/GOS | examples/multiscale-migration/data.py | 2 | 1392 | import numpy as np
import pandas as pd
def file_path(name):
"""
Shortcut function to get the relative path to the directory
which contains the data.
"""
return "./data/%s" % name
def country_codes():
"""
Build country rows from their names, ISO codes, and Numeric
Country Codes.
"""
return (
pd.read_csv(
file_path(
"Country_List_ISO_3166_Codes_Latitude_Longitude.csv"),
usecols=[0, 2, 3],
index_col=1,
keep_default_na=False))
def freedom_index():
"""
Read data from the Freedom Index.
"""
# TODO: Add xlrd to requirements.
xl = pd.ExcelFile(file_path("Freedom_index.xlsx"))
return xl.parse(1)
def ab_values():
"""
Read generated A/B values for each country.
"""
return pd.read_excel(file_path("A&B values for RTS.xlsx")).T
def passport_index():
"""
Read data from the Passport Index.
"""
return pd.read_excel(file_path("PassportIndex.xlsx"))
def un_stock():
"""
Read from "Trends in International Migrant Stock: Migrants by
Destination and Origin"
"""
un_pd = pd.read_excel(
file_path(
"UN_MigrantStockByOriginAndDestination_2015.xlsx"
),
skiprows=15
)
un_np = np.array(un_pd)
ccountry_names = un_np[:,1]
num_codes = un_np[:,3]
return un_pd
| apache-2.0 |
TNT-Samuel/Coding-Projects | DNS Server/Source/Lib/site-packages/dask/dataframe/io/hdf.py | 4 | 15348 | from __future__ import absolute_import, division, print_function
from fnmatch import fnmatch
from glob import glob
import os
import uuid
from warnings import warn
import pandas as pd
from toolz import merge
from .io import _link
from ...base import get_scheduler
from ..core import DataFrame, new_dd_object
from ... import config, multiprocessing
from ...base import tokenize, compute_as_if_collection
from ...bytes.utils import build_name_function
from ...compatibility import PY3
from ...delayed import Delayed, delayed
from ...utils import get_scheduler_lock
def _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None):
""" A wrapper function around pd_to_hdf that enables locking"""
if lock:
lock.acquire()
try:
pd_to_hdf(*args, **kwargs)
finally:
if lock:
lock.release()
return None
def to_hdf(df, path, key, mode='a', append=False, get=None, scheduler=None,
name_function=None, compute=True, lock=None, dask_kwargs={},
**kwargs):
""" Store Dask Dataframe to Hierarchical Data Format (HDF) files
This is a parallel version of the Pandas function of the same name. Please
see the Pandas docstring for more detailed information about shared keyword
arguments.
This function differs from the Pandas version by saving the many partitions
of a Dask DataFrame in parallel, either to many files, or to many datasets
within the same file. You may specify this parallelism with an asterix
``*`` within the filename or datapath, and an optional ``name_function``.
The asterix will be replaced with an increasing sequence of integers
starting from ``0`` or with the result of calling ``name_function`` on each
of those integers.
This function only supports the Pandas ``'table'`` format, not the more
specialized ``'fixed'`` format.
Parameters
----------
path: string
Path to a target filename. May contain a ``*`` to denote many filenames
key: string
Datapath within the files. May contain a ``*`` to denote many locations
name_function: function
A function to convert the ``*`` in the above options to a string.
Should take in a number from 0 to the number of partitions and return a
string. (see examples below)
compute: bool
Whether or not to execute immediately. If False then this returns a
``dask.Delayed`` value.
lock: Lock, optional
Lock to use to prevent concurrency issues. By default a
``threading.Lock``, ``multiprocessing.Lock`` or ``SerializableLock``
will be used depending on your scheduler if a lock is required. See
dask.utils.get_scheduler_lock for more information about lock
selection.
**other:
See pandas.to_hdf for more information
Examples
--------
Save Data to a single file
>>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP
Save data to multiple datapaths within the same file:
>>> df.to_hdf('output.hdf', '/data-*') # doctest: +SKIP
Save data to multiple files:
>>> df.to_hdf('output-*.hdf', '/data') # doctest: +SKIP
Save data to multiple files, using the multiprocessing scheduler:
>>> df.to_hdf('output-*.hdf', '/data', scheduler='processes') # doctest: +SKIP
Specify custom naming scheme. This writes files as
'2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf', etc..
>>> from datetime import date, timedelta
>>> base = date(year=2000, month=1, day=1)
>>> def name_function(i):
... ''' Convert integer 0 to n to a string '''
... return base + timedelta(days=i)
>>> df.to_hdf('*.hdf', '/data', name_function=name_function) # doctest: +SKIP
Returns
-------
None: if compute == True
delayed value: if compute == False
See Also
--------
read_hdf:
to_parquet:
"""
name = 'to-hdf-' + uuid.uuid1().hex
pd_to_hdf = getattr(df._partition_type, 'to_hdf')
single_file = True
single_node = True
# if path is string, format using i_name
if isinstance(path, str):
if path.count('*') + key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in file "
"path and dataset key")
fmt_obj = lambda path, i_name: path.replace('*', i_name)
if '*' in path:
single_file = False
else:
if key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in "
"dataset key")
fmt_obj = lambda path, _: path
if '*' in key:
single_node = False
if 'format' in kwargs and kwargs['format'] not in ['t', 'table']:
raise ValueError("Dask only support 'table' format in hdf files.")
if mode not in ('a', 'w', 'r+'):
raise ValueError("Mode must be one of 'a', 'w' or 'r+'")
if name_function is None:
name_function = build_name_function(df.npartitions - 1)
# we guarantee partition order is preserved when its saved and read
# so we enforce name_function to maintain the order of its input.
if not (single_file and single_node):
formatted_names = [name_function(i) for i in range(df.npartitions)]
if formatted_names != sorted(formatted_names):
warn("To preserve order between partitions name_function "
"must preserve the order of its input")
# If user did not specify scheduler and write is sequential default to the
# sequential scheduler. otherwise let the _get method choose the scheduler
if (get is None and
not config.get('get', None) and
scheduler is None and
not config.get('scheduler', None) and
single_node and single_file):
scheduler = 'single-threaded'
# handle lock default based on whether we're writing to a single entity
_actual_get = get_scheduler(get=get, collections=[df], scheduler=scheduler)
if lock is None:
if not single_node:
lock = True
elif not single_file and _actual_get is not multiprocessing.get:
# if we're writing to multiple files with the multiprocessing
# scheduler we don't need to lock
lock = True
else:
lock = False
if lock:
lock = get_scheduler_lock(get, df, scheduler=scheduler)
kwargs.update({'format': 'table', 'mode': mode, 'append': append})
dsk = dict()
i_name = name_function(0)
dsk[(name, 0)] = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, 0), fmt_obj(path, i_name),
key.replace('*', i_name)], kwargs)
kwargs2 = kwargs.copy()
if single_file:
kwargs2['mode'] = 'a'
if single_node:
kwargs2['append'] = True
filenames = []
for i in range(0,df.npartitions):
i_name = name_function(i)
filenames.append(fmt_obj(path, i_name))
for i in range(1, df.npartitions):
i_name = name_function(i)
task = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, i), fmt_obj(path, i_name),
key.replace('*', i_name)], kwargs2)
if single_file:
link_dep = i - 1 if single_node else 0
task = (_link, (name, link_dep), task)
dsk[(name, i)] = task
dsk = merge(df.dask, dsk)
if single_file and single_node:
keys = [(name, df.npartitions - 1)]
else:
keys = [(name, i) for i in range(df.npartitions)]
if compute:
compute_as_if_collection(DataFrame, dsk, keys, get=get,
scheduler=scheduler, **dask_kwargs)
return filenames
else:
return delayed([Delayed(k, dsk) for k in keys])
dont_use_fixed_error_message = """
This HDFStore is not partitionable and can only be use monolithically with
pandas. In the future when creating HDFStores use the ``format='table'``
option to ensure that your dataset can be parallelized"""
read_hdf_error_msg = """
The start and stop keywords are not supported when reading from more than
one file/dataset.
The combination is ambiguous because it could be interpreted as the starting
and stopping index per file, or starting and stopping index of the global
dataset."""
def _read_single_hdf(path, key, start=0, stop=None, columns=None,
chunksize=int(1e6), sorted_index=False, lock=None,
mode='a'):
"""
Read a single hdf file into a dask.dataframe. Used for each file in
read_hdf.
"""
def get_keys_stops_divisions(path, key, stop, sorted_index, chunksize):
"""
Get the "keys" or group identifiers which match the given key, which
can contain wildcards. This uses the hdf file identified by the
given path. Also get the index of the last row of data for each matched
key.
"""
with pd.HDFStore(path, mode=mode) as hdf:
keys = [k for k in hdf.keys() if fnmatch(k, key)]
stops = []
divisions = []
for k in keys:
storer = hdf.get_storer(k)
if storer.format_type != 'table':
raise TypeError(dont_use_fixed_error_message)
if stop is None:
stops.append(storer.nrows)
elif stop > storer.nrows:
raise ValueError("Stop keyword exceeds dataset number "
"of rows ({})".format(storer.nrows))
else:
stops.append(stop)
if sorted_index:
division = [storer.read_column('index', start=start, stop=start + 1)[0]
for start in range(0, storer.nrows, chunksize)]
division_end = storer.read_column('index',
start=storer.nrows - 1,
stop=storer.nrows)[0]
division.append(division_end)
divisions.append(division)
else:
divisions.append(None)
return keys, stops, divisions
def one_path_one_key(path, key, start, stop, columns, chunksize, division, lock):
"""
Get the data frame corresponding to one path and one key (which should
not contain any wildcards).
"""
empty = pd.read_hdf(path, key, mode=mode, stop=0)
if columns is not None:
empty = empty[columns]
token = tokenize((path, os.path.getmtime(path), key, start,
stop, empty, chunksize, division))
name = 'read-hdf-' + token
if empty.ndim == 1:
base = {'name': empty.name, 'mode': mode}
else:
base = {'columns': empty.columns, 'mode': mode}
if start >= stop:
raise ValueError("Start row number ({}) is above or equal to stop "
"row number ({})".format(start, stop))
def update(s):
new = base.copy()
new.update({'start': s, 'stop': s + chunksize})
return new
dsk = dict(((name, i), (_pd_read_hdf, path, key, lock,
update(s)))
for i, s in enumerate(range(start, stop, chunksize)))
if division:
divisions = division
else:
divisions = [None] * (len(dsk) + 1)
return new_dd_object(dsk, name, empty, divisions)
keys, stops, divisions = get_keys_stops_divisions(path, key, stop, sorted_index, chunksize)
if (start != 0 or stop is not None) and len(keys) > 1:
raise NotImplementedError(read_hdf_error_msg)
from ..multi import concat
return concat([one_path_one_key(path, k, start, s, columns, chunksize, d, lock)
for k, s, d in zip(keys, stops, divisions)])
def _pd_read_hdf(path, key, lock, kwargs):
""" Read from hdf5 file with a lock """
if lock:
lock.acquire()
try:
result = pd.read_hdf(path, key, **kwargs)
finally:
if lock:
lock.release()
return result
def read_hdf(pattern, key, start=0, stop=None, columns=None,
chunksize=1000000, sorted_index=False, lock=True, mode='a'):
"""
Read HDF files into a Dask DataFrame
Read hdf files into a dask dataframe. This function is like
``pandas.read_hdf``, except it can read from a single large file, or from
multiple files, or from multiple keys from the same file.
Parameters
----------
pattern : string, list
File pattern (string), buffer to read from, or list of file
paths. Can contain wildcards.
key : group identifier in the store. Can contain wildcards
start : optional, integer (defaults to 0), row number to start at
stop : optional, integer (defaults to None, the last row), row number to
stop at
columns : list of columns, optional
A list of columns that if not None, will limit the return
columns (default is None)
chunksize : positive integer, optional
Maximal number of rows per partition (default is 1000000).
sorted_index : boolean, optional
Option to specify whether or not the input hdf files have a sorted
index (default is False).
lock : boolean, optional
Option to use a lock to prevent concurrency issues (default is True).
mode : {'a', 'r', 'r+'}, default 'a'. Mode to use when opening file(s).
'r'
Read-only; no data can be modified.
'a'
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
'r+'
It is similar to 'a', but the file must already exist.
Returns
-------
dask.DataFrame
Examples
--------
Load single file
>>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP
Load multiple files
>>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP
>>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x') # doctest: +SKIP
Load multiple datasets
>>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP
"""
if lock is True:
lock = get_scheduler_lock()
key = key if key.startswith('/') else '/' + key
if isinstance(pattern, str):
paths = sorted(glob(pattern))
else:
paths = pattern
if (start != 0 or stop is not None) and len(paths) > 1:
raise NotImplementedError(read_hdf_error_msg)
if chunksize <= 0:
raise ValueError("Chunksize must be a positive integer")
if (start != 0 or stop is not None) and sorted_index:
raise ValueError("When assuming pre-partitioned data, data must be "
"read in its entirety using the same chunksizes")
from ..multi import concat
return concat([_read_single_hdf(path, key, start=start, stop=stop,
columns=columns, chunksize=chunksize,
sorted_index=sorted_index,
lock=lock, mode=mode)
for path in paths])
if PY3:
from ..core import _Frame
_Frame.to_hdf.__doc__ = to_hdf.__doc__
| gpl-3.0 |
YinongLong/scikit-learn | sklearn/gaussian_process/kernels.py | 4 | 66652 | """Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import math
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..externals import six
from ..base import clone
from sklearn.externals.funcsigs import signature
def _check_length_scale(X, length_scale):
length_scale = np.squeeze(length_scale).astype(float)
if np.ndim(length_scale) > 1:
raise ValueError("length_scale cannot be of dimension greater than 1")
if np.ndim(length_scale) == 1 and X.shape[1] != length_scale.shape[0]:
raise ValueError("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (length_scale.shape[0], X.shape[1]))
return length_scale
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
Attributes
----------
name : string
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : string
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default: None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if not isinstance(bounds, six.string_types) or bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = isinstance(bounds, six.string_types) and bounds == "fixed"
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
# This is mainly a testing utility to check that two hyperparameters
# are equal.
def __eq__(self, other):
return (self.name == other.name and
self.value_type == other.value_type and
np.all(self.bounds == other.bounds) and
self.n_elements == other.n_elements and
self.fixed == other.fixed)
class Kernel(six.with_metaclass(ABCMeta)):
"""Base class for all kernels."""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
params[arg] = getattr(self, arg, None)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta. """
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = []
for attr in dir(self):
if attr.startswith("hyperparameter_"):
r.append(getattr(self, attr))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
params = self.get_params()
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(params[hyperparameter.name])
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
params = self.get_params()
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
params[hyperparameter.name] = np.exp(
theta[i:i + hyperparameter.n_elements])
i += hyperparameter.n_elements
else:
params[hyperparameter.name] = np.exp(theta[i])
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
self.set_params(**params)
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
bounds.append(hyperparameter.bounds)
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
class NormalizedKernelMixin(object):
"""Mixin for kernels which are normalized: k(X, X)=1."""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin(object):
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y)."""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels."""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims:(i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators. """
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.k1.hyperparameters:
r.append(Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
class Sum(KernelOperator):
"""Sum-kernel k1 + k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the sum-kernel
k2 : Kernel object
The second base-kernel of the sum-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""Product-kernel k1 * k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the product-kernel
k2 : Kernel object
The second base-kernel of the product-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""Exponentiate kernel by given exponent.
The resulting kernel is defined as
k_exp(X, Y) = k(X, Y) ** exponent
Parameters
----------
kernel : Kernel object
The base kernel
exponent : float
The exponent for the base kernel
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
class ConstantKernel(StationaryKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
k(x_1, x_2) = constant_value for all x_1, x_2
Parameters
----------
constant_value : float, default: 1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on constant_value
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
@property
def hyperparameter_constant_value(self):
return Hyperparameter(
"constant_value", "numeric", self.constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = self.constant_value * np.ones((X.shape[0], Y.shape[0]))
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, self.constant_value
* np.ones((X.shape[0], X.shape[0], 1)))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.constant_value * np.ones(X.shape[0])
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise-component of the signal. Tuning its parameter
corresponds to estimating the noise-level.
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
Parameters
----------
noise_level : float, default: 1.0
Parameter controlling the noise level
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on noise_level
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
@property
def hyperparameter_noise_level(self):
return Hyperparameter(
"noise_level", "numeric", self.noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(X.shape[0])
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(X.shape[0])[:, :, np.newaxis])
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
else:
return np.zeros((X.shape[0], Y.shape[0]))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.noise_level * np.ones(X.shape[0])
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length-scale
parameter length_scale>0, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2)
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.length_scale_bounds = length_scale_bounds
@property
def anisotropic(self):
return np.iterable(self.length_scale) and len(self.length_scale) > 1
@property
def hyperparameter_length_scale(self):
if self.anisotropic:
return Hyperparameter("length_scale", "numeric",
self.length_scale_bounds,
len(self.length_scale))
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0])
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
See Rasmussen and Williams 2006, pp84 for details regarding the
different variants of the Matern kernel.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
nu: float, default: 1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super(Matern, self).__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D \
/ np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else:
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0],
self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length-scales. It is
parameterized by a length-scale parameter length_scale>0 and a scale
mixture parameter alpha>0. Only the isotropic variant where length_scale is
a scalar is supported at the moment. The kernel given by:
k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
alpha : float > 0, default: 1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on alpha
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
@property
def hyperparameter_length_scale(self):
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
@property
def hyperparameter_alpha(self):
return Hyperparameter("alpha", "numeric", self.alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Exp-Sine-Squared kernel.
The ExpSineSquared kernel allows modeling periodic functions. It is
parameterized by a length-scale parameter length_scale>0 and a periodicity
parameter periodicity>0. Only the isotropic variant where l is a scalar is
supported at the moment. The kernel given by:
k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
periodicity : float > 0, default: 1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on periodicity
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
@property
def hyperparameter_length_scale(self):
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
@property
def hyperparameter_periodicity(self):
return Hyperparameter(
"periodicity", "numeric", self.periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . .
. , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel
is invariant to a rotation of the coordinates about the origin, but not
translations. It is parameterized by a parameter sigma_0^2. For
sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
Parameters
----------
sigma_0 : float >= 0, default: 1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on l
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
@property
def hyperparameter_sigma_0(self):
return Hyperparameter("sigma_0", "numeric", self.sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
Parameters
----------
gamma: float >= 0, default: 1.0
Parameter gamma of the pairwise kernel specified by metric
gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on gamma
metric : string, or callable, default: "linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default: None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.metric = metric
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
@property
def hyperparameter_gamma(self):
return Hyperparameter("gamma", "numeric", self.gamma_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
pairwise_kernels_kwargs = self.pairwise_kernels_kwargs
if self.pairwise_kernels_kwargs is None:
pairwise_kernels_kwargs = {}
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X)[:, 0]
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
| bsd-3-clause |
rueckstiess/mtools | mtools/mplotqueries/plottypes/histogram_type.py | 1 | 5919 | import argparse
import numpy as np
from mtools.mplotqueries.plottypes.base_type import BasePlotType
from mtools.util.log2code import Log2CodeConverter
try:
from matplotlib.dates import date2num, num2date
except ImportError as error:
raise ImportError("Can't import matplotlib. See "
"https://matplotlib.org/users/installing.html "
"for instructions on how to install matplotlib."
"Error: " + str(error))
class HistogramPlotType(BasePlotType):
"""
Plot a histogram plot over all logevents.
The bucket size can be specified with the --bucketsize or -b parameter.
Unit is in seconds.
"""
plot_type_str = 'histogram'
timeunits = {'sec': 1, 's': 1, 'min': 60, 'm': 1, 'hour': 3600, 'h': 3600,
'day': 86400, 'd': 86400}
sort_order = 1
default_group_by = 'namespace'
l2cc = Log2CodeConverter()
def __init__(self, args=None, unknown_args=None):
BasePlotType.__init__(self, args, unknown_args)
# parse arguments further to get --bucketsize argument
argparser = argparse.ArgumentParser("mplotqueries --type histogram")
argparser.add_argument('--bucketsize', '-b', action='store',
metavar='SIZE',
help="histogram bucket size in seconds",
default=60)
argparser.add_argument('--no-stacked', action='store_true',
help=("switch graph mode from stacked "
"histogram (default) to side-by-side "
"histograms."), default=False)
sub_args = vars(argparser.parse_args(unknown_args))
self.logscale = args['logscale']
# get bucket size, either as int (seconds) or as string
# (see timeunits above)
bs = sub_args['bucketsize']
try:
self.bucketsize = int(bs)
except ValueError:
self.bucketsize = self.timeunits[bs]
self.barstacked = not sub_args['no_stacked']
self.ylabel = "# lines per %i second bin" % self.bucketsize
def accept_line(self, logevent):
"""
Return True for each line.
We bucket everything. Filtering has to be done before passing to this
type of plot.
"""
return True
def log2code(self, logevent):
codeline = self.l2cc(logevent.line_str)
if codeline:
return ' ... '.join(codeline.pattern)
else:
return None
def plot_group(self, group, idx, axis):
raise NotImplementedError("Not implemented for histogram plots.")
def plot(self, axis, ith_plot, total_plots, limits):
"""
Plot the histogram as a whole over all groups.
Do not plot as individual groups like other plot types.
"""
print(self.plot_type_str.upper() + " plot")
print("%5s %9s %s" % ("id", " #points", "group"))
for idx, group in enumerate(self.groups):
print("%5s %9s %s" % (idx + 1, len(self.groups[group]), group))
print('')
datasets = []
colors = []
minx = np.inf
maxx = -np.inf
for idx, group in enumerate(self.groups):
x = date2num([logevent.datetime
for logevent in self.groups[group]])
minx = min(minx, min(x))
maxx = max(maxx, max(x))
datasets.append(x)
color, marker = self.color_map(group)
colors.append(color)
if total_plots > 1:
# if more than one plot, move histogram to twin axis on the right
twin_axis = axis.twinx()
twin_axis.set_ylabel(self.ylabel)
axis.set_zorder(twin_axis.get_zorder() + 1) # put ax ahead of ax2
axis.patch.set_visible(False) # hide the 'canvas'
axis = twin_axis
n_bins = max(1, int((maxx - minx) * 24. * 60. * 60. / self.bucketsize))
if n_bins > 1000:
# warning for too many buckets
print("warning: %i buckets, will take a while to render. "
"consider increasing --bucketsize." % n_bins)
n, bins, artists = axis.hist(datasets, bins=n_bins, align='mid',
log=self.logscale,
histtype="barstacked"
if self.barstacked else "bar",
color=colors, edgecolor="none",
linewidth=0, alpha=0.8, picker=True,
label=map(str, self.groups.keys()))
# scale current y-axis to match min and max values
axis.set_ylim(np.min(n), np.max(n))
# add meta-data for picking
if len(self.groups) > 1:
for g, group in enumerate(self.groups.keys()):
for i in range(len(artists[g])):
artists[g][i]._mt_plot_type = self
artists[g][i]._mt_group = group
artists[g][i]._mt_n = n[g][i]
if self.barstacked:
artists[g][i]._mt_n -= (n[g - 1][i] if g > 0 else 0)
artists[g][i]._mt_bin = bins[i]
else:
for i in range(len(artists)):
artists[i]._mt_plot_type = self
artists[i]._mt_group = group
artists[i]._mt_n = n[i]
artists[i]._mt_bin = bins[i]
return artists
def clicked(self, event):
"""Print group name and number of items in bin."""
group = event.artist._mt_group
n = event.artist._mt_n
dt = num2date(event.artist._mt_bin)
print("%4i %s events in %s sec beginning at %s"
% (n, group, self.bucketsize, dt.strftime("%b %d %H:%M:%S")))
| apache-2.0 |
fengzhyuan/scikit-learn | doc/sphinxext/gen_rst.py | 142 | 40026 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
Parth-Brahmbhatt/kafka | system_test/utils/metrics.py | 89 | 13937 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# ===================================
# file: metrics.py
# ===================================
import inspect
import json
import logging
import os
import signal
import subprocess
import sys
import traceback
import csv
import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from collections import namedtuple
import numpy
from pyh import *
import kafka_system_test_utils
import system_test_utils
logger = logging.getLogger("namedLogger")
thisClassName = '(metrics)'
d = {'name_of_class': thisClassName}
attributeNameToNameInReportedFileMap = {
'Min': 'min',
'Max': 'max',
'Mean': 'mean',
'50thPercentile': 'median',
'StdDev': 'stddev',
'95thPercentile': '95%',
'99thPercentile': '99%',
'999thPercentile': '99.9%',
'Count': 'count',
'OneMinuteRate': '1 min rate',
'MeanRate': 'mean rate',
'FiveMinuteRate': '5 min rate',
'FifteenMinuteRate': '15 min rate',
'Value': 'value'
}
def getCSVFileNameFromMetricsMbeanName(mbeanName):
return mbeanName.replace(":type=", ".").replace(",name=", ".") + ".csv"
def read_metrics_definition(metricsFile):
metricsFileData = open(metricsFile, "r").read()
metricsJsonData = json.loads(metricsFileData)
allDashboards = metricsJsonData['dashboards']
allGraphs = []
for dashboard in allDashboards:
dashboardName = dashboard['name']
graphs = dashboard['graphs']
for graph in graphs:
bean = graph['bean_name']
allGraphs.append(graph)
attributes = graph['attributes']
#print "Filtering on attributes " + attributes
return allGraphs
def get_dashboard_definition(metricsFile, role):
metricsFileData = open(metricsFile, "r").read()
metricsJsonData = json.loads(metricsFileData)
allDashboards = metricsJsonData['dashboards']
dashboardsForRole = []
for dashboard in allDashboards:
if dashboard['role'] == role:
dashboardsForRole.append(dashboard)
return dashboardsForRole
def ensure_valid_headers(headers, attributes):
if headers[0] != "# time":
raise Exception("First column should be time")
for header in headers:
logger.debug(header, extra=d)
# there should be exactly one column with a name that matches attributes
try:
attributeColumnIndex = headers.index(attributes)
return attributeColumnIndex
except ValueError as ve:
#print "#### attributes : ", attributes
#print "#### headers : ", headers
raise Exception("There should be exactly one column that matches attribute: {0} in".format(attributes) +
" headers: {0}".format(",".join(headers)))
def plot_graphs(inputCsvFiles, labels, title, xLabel, yLabel, attribute, outputGraphFile):
if not inputCsvFiles: return
# create empty plot
fig=plt.figure()
fig.subplots_adjust(bottom=0.2)
ax=fig.add_subplot(111)
labelx = -0.3 # axes coords
ax.set_xlabel(xLabel)
ax.set_ylabel(yLabel)
ax.grid()
#ax.yaxis.set_label_coords(labelx, 0.5)
Coordinates = namedtuple("Coordinates", 'x y')
plots = []
coordinates = []
# read data for all files, organize by label in a dict
for fileAndLabel in zip(inputCsvFiles, labels):
inputCsvFile = fileAndLabel[0]
label = fileAndLabel[1]
csv_reader = list(csv.reader(open(inputCsvFile, "rb")))
x,y = [],[]
xticks_labels = []
try:
# read first line as the headers
headers = csv_reader.pop(0)
attributeColumnIndex = ensure_valid_headers(headers, attributeNameToNameInReportedFileMap[attribute])
logger.debug("Column index for attribute {0} is {1}".format(attribute, attributeColumnIndex), extra=d)
start_time = (int)(os.path.getctime(inputCsvFile) * 1000)
int(csv_reader[0][0])
for line in csv_reader:
if(len(line) == 0):
continue
yVal = float(line[attributeColumnIndex])
xVal = int(line[0])
y.append(yVal)
epoch= start_time + int(line[0])
x.append(xVal)
xticks_labels.append(time.strftime("%H:%M:%S", time.localtime(epoch)))
coordinates.append(Coordinates(xVal, yVal))
p1 = ax.plot(x,y)
plots.append(p1)
except Exception as e:
logger.error("ERROR while plotting data for {0}: {1}".format(inputCsvFile, e), extra=d)
traceback.print_exc()
# find xmin, xmax, ymin, ymax from all csv files
xmin = min(map(lambda coord: coord.x, coordinates))
xmax = max(map(lambda coord: coord.x, coordinates))
ymin = min(map(lambda coord: coord.y, coordinates))
ymax = max(map(lambda coord: coord.y, coordinates))
# set x and y axes limits
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
# set ticks accordingly
xticks = numpy.arange(xmin, xmax, 0.2*xmax)
# yticks = numpy.arange(ymin, ymax)
plt.xticks(xticks,xticks_labels,rotation=17)
# plt.yticks(yticks)
plt.legend(plots,labels, loc=2)
plt.title(title)
plt.savefig(outputGraphFile)
def draw_all_graphs(metricsDescriptionFile, testcaseEnv, clusterConfig):
# go through each role and plot graphs for the role's metrics
roles = set(map(lambda config: config['role'], clusterConfig))
for role in roles:
dashboards = get_dashboard_definition(metricsDescriptionFile, role)
entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role)
for dashboard in dashboards:
graphs = dashboard['graphs']
# draw each graph for all entities
draw_graph_for_role(graphs, entities, role, testcaseEnv)
def draw_graph_for_role(graphs, entities, role, testcaseEnv):
for graph in graphs:
graphName = graph['graph_name']
yLabel = graph['y_label']
inputCsvFiles = []
graphLegendLabels = []
for entity in entities:
entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entity['entity_id'], "metrics")
entityMetricCsvFile = entityMetricsDir + "/" + getCSVFileNameFromMetricsMbeanName(graph['bean_name'])
if(not os.path.exists(entityMetricCsvFile)):
logger.warn("The file {0} does not exist for plotting".format(entityMetricCsvFile), extra=d)
else:
inputCsvFiles.append(entityMetricCsvFile)
graphLegendLabels.append(role + "-" + entity['entity_id'])
# print "Plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
try:
# plot one graph per mbean attribute
labels = graph['y_label'].split(',')
fullyQualifiedAttributeNames = map(lambda attribute: graph['bean_name'] + ':' + attribute,
graph['attributes'].split(','))
attributes = graph['attributes'].split(',')
for labelAndAttribute in zip(labels, fullyQualifiedAttributeNames, attributes):
outputGraphFile = testcaseEnv.testCaseDashboardsDir + "/" + role + "/" + labelAndAttribute[1] + ".svg"
plot_graphs(inputCsvFiles, graphLegendLabels, graph['graph_name'] + '-' + labelAndAttribute[2],
"time", labelAndAttribute[0], labelAndAttribute[2], outputGraphFile)
# print "Finished plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
except Exception as e:
logger.error("ERROR while plotting graph {0}: {1}".format(outputGraphFile, e), extra=d)
traceback.print_exc()
def build_all_dashboards(metricsDefinitionFile, testcaseDashboardsDir, clusterConfig):
metricsHtmlFile = testcaseDashboardsDir + "/metrics.html"
centralDashboard = PyH('Kafka Metrics Dashboard')
centralDashboard << h1('Kafka Metrics Dashboard', cl='center')
roles = set(map(lambda config: config['role'], clusterConfig))
for role in roles:
entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role)
dashboardPagePath = build_dashboard_for_role(metricsDefinitionFile, role,
entities, testcaseDashboardsDir)
centralDashboard << a(role, href = dashboardPagePath)
centralDashboard << br()
centralDashboard.printOut(metricsHtmlFile)
def build_dashboard_for_role(metricsDefinitionFile, role, entities, testcaseDashboardsDir):
# build all dashboards for the input entity's based on its role. It can be one of kafka, zookeeper, producer
# consumer
dashboards = get_dashboard_definition(metricsDefinitionFile, role)
entityDashboard = PyH('Kafka Metrics Dashboard for ' + role)
entityDashboard << h1('Kafka Metrics Dashboard for ' + role, cl='center')
entityDashboardHtml = testcaseDashboardsDir + "/" + role + "-dashboards.html"
for dashboard in dashboards:
# place the graph svg files in this dashboard
allGraphs = dashboard['graphs']
for graph in allGraphs:
attributes = map(lambda attribute: graph['bean_name'] + ':' + attribute,
graph['attributes'].split(','))
for attribute in attributes:
graphFileLocation = testcaseDashboardsDir + "/" + role + "/" + attribute + ".svg"
entityDashboard << embed(src = graphFileLocation, type = "image/svg+xml")
entityDashboard.printOut(entityDashboardHtml)
return entityDashboardHtml
def start_metrics_collection(jmxHost, jmxPort, role, entityId, systemTestEnv, testcaseEnv):
logger.info("starting metrics collection on jmx port : " + jmxPort, extra=d)
jmxUrl = "service:jmx:rmi:///jndi/rmi://" + jmxHost + ":" + jmxPort + "/jmxrmi"
clusterConfig = systemTestEnv.clusterEntityConfigDictList
metricsDefinitionFile = systemTestEnv.METRICS_PATHNAME
entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entityId, "metrics")
dashboardsForRole = get_dashboard_definition(metricsDefinitionFile, role)
mbeansForRole = get_mbeans_for_role(dashboardsForRole)
kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "kafka_home")
javaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "java_home")
for mbean in mbeansForRole:
outputCsvFile = entityMetricsDir + "/" + mbean + ".csv"
startMetricsCmdList = ["ssh " + jmxHost,
"'JAVA_HOME=" + javaHome,
"JMX_PORT= " + kafkaHome + "/bin/kafka-run-class.sh kafka.tools.JmxTool",
"--jmx-url " + jmxUrl,
"--object-name " + mbean + " 1> ",
outputCsvFile + " & echo pid:$! > ",
entityMetricsDir + "/entity_pid'"]
startMetricsCommand = " ".join(startMetricsCmdList)
logger.debug("executing command: [" + startMetricsCommand + "]", extra=d)
system_test_utils.async_sys_call(startMetricsCommand)
time.sleep(1)
pidCmdStr = "ssh " + jmxHost + " 'cat " + entityMetricsDir + "/entity_pid' 2> /dev/null"
logger.debug("executing command: [" + pidCmdStr + "]", extra=d)
subproc = system_test_utils.sys_call_return_subproc(pidCmdStr)
# keep track of JMX ppid in a dictionary of entity_id to list of JMX ppid
# testcaseEnv.entityJmxParentPidDict:
# key: entity_id
# val: list of JMX ppid associated to that entity_id
# { 1: [1234, 1235, 1236], 2: [2234, 2235, 2236], ... }
for line in subproc.stdout.readlines():
line = line.rstrip('\n')
logger.debug("line: [" + line + "]", extra=d)
if line.startswith("pid"):
logger.debug("found pid line: [" + line + "]", extra=d)
tokens = line.split(':')
thisPid = tokens[1]
if entityId not in testcaseEnv.entityJmxParentPidDict:
testcaseEnv.entityJmxParentPidDict[entityId] = []
testcaseEnv.entityJmxParentPidDict[entityId].append(thisPid)
#print "\n#### testcaseEnv.entityJmxParentPidDict ", testcaseEnv.entityJmxParentPidDict, "\n"
def stop_metrics_collection(jmxHost, jmxPort):
logger.info("stopping metrics collection on " + jmxHost + ":" + jmxPort, extra=d)
system_test_utils.sys_call("ps -ef | grep JmxTool | grep -v grep | grep " + jmxPort + " | awk '{print $2}' | xargs kill -9")
def get_mbeans_for_role(dashboardsForRole):
graphs = reduce(lambda x,y: x+y, map(lambda dashboard: dashboard['graphs'], dashboardsForRole))
return set(map(lambda metric: metric['bean_name'], graphs))
| apache-2.0 |
rsivapr/scikit-learn | sklearn/metrics/cluster/bicluster/tests/test_bicluster_metrics.py | 13 | 1145 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal
from ..bicluster_metrics import _jaccard
from ..bicluster_metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
| bsd-3-clause |
nyirock/mg_blast_wrapper | mg_blast_wrapper_v1.14.3.py | 2 | 26958 | #!/usr/bin/python
import getopt
import sys
from Bio import SeqIO
from Bio.SeqUtils import GC
import time# import time, gmtime, strftime
import os
import shutil
import pandas
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
import csv
#from datetime import datetime
import numpy as np
from scipy import stats
__author__ = "Andriy Sheremet"
#Helper functions definitions
def genome_shredder(input_dct, shear_val):
shredded = {}
for key, value in input_dct.items():
#print input_dct[i].seq
#print i
dic_name = key
rec_name = value.name
for j in range(0, len(str(value.seq)), int(shear_val)):
# print j
record = str(value.seq)[0+j:int(shear_val)+j]
shredded[dic_name+"_"+str(j)] = SeqRecord(Seq(record),rec_name+"_"+str(j),'','')
#record = SeqRecord(input_ref_records[i].seq[0+i:int(shear_val)+i],input_ref_records[i].name+"_%i"%i,"","")
return shredded
def parse_contigs_ind(f_name):
"""
Returns sequences index from the input files(s)
remember to close index object after use
"""
handle = open(f_name, "rU")
record_dict = SeqIO.index(f_name,"fasta")
handle.close()
return record_dict
#returning specific sequences and overal list
def retrive_sequence(contig_lst, rec_dic):
"""
Returns list of sequence elements from dictionary/index of SeqIO objects specific to the contig_lst parameter
"""
contig_seqs = list()
#record_dict = rec_dic
#handle.close()
for contig in contig_lst:
contig_seqs.append(str(rec_dic[contig].seq))#fixing BiopythonDeprecationWarning
return contig_seqs
def filter_seq_dict(key_lst, rec_dic):
"""
Returns filtered dictionary element from rec_dic according to sequence names passed in key_lst
"""
return { key: rec_dic[key] for key in key_lst }
def unique_scaffold_topEval(dataframe):
#returns pandas series object
variables = list(dataframe.columns.values)
scaffolds=dict()
rows=list()
for row in dataframe.itertuples():
#if row[1]=='Ga0073928_10002560':
if row[1] not in scaffolds:
scaffolds[row[1]]=row
else:
if row[11]<scaffolds[row[1]][11]:
scaffolds[row[1]]=row
rows=scaffolds.values()
#variables=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
df = pandas.DataFrame([[getattr(i,j) for j in variables] for i in rows], columns = variables)
return df
def unique_scaffold_topBits(dataframe):
#returns pandas series object
variables = list(dataframe.columns.values)
scaffolds=dict()
rows=list()
for row in dataframe.itertuples():
#if row[1]=='Ga0073928_10002560':
if row[1] not in scaffolds:
scaffolds[row[1]]=row
else:
if row[12]>scaffolds[row[1]][12]:
scaffolds[row[1]]=row
rows=scaffolds.values()
#variables=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
df = pandas.DataFrame([[getattr(i,j) for j in variables] for i in rows], columns = variables)
return df
def close_ind_lst(ind_lst):
"""
Closes index objects supplied in input parameter list
"""
for index in ind_lst:
index.close()
def usage():
print "\nThis is the usage function\n"
# print 'Usage: '+sys.argv[0]+' -i <input_file> [-o <output>] [-l <minimum length>]'
# print 'Example: '+sys.argv[0]+' -i input.fasta -o output.fasta -l 100'
def main(argv):
#default parameters
mg_lst = []
ref_lst = []
e_val = 1e-5
alen = 50.0
alen_percent = True
alen_bp = False
iden = 95.0
name= "output"
fmt_lst = ["fasta"]
supported_formats =["fasta", "csv"]
iterations = 1
alen_increment = 5.0
iden_increment = 0.0
blast_db_Dir = ""
results_Dir = ""
input_files_Dir = ""
ref_out_0 = ""
blasted_lst = []
continue_from_previous = False #poorly supported, just keeping the directories
skip_blasting = False
debugging = False
sheared = False
shear_val = None
logfile = ""
try:
opts, args = getopt.getopt(argv, "r:m:n:e:a:i:s:f:h", ["reference=", "metagenome=", "name=", "e_value=", "alignment_length=", "identity=","shear=","format=", "iterations=", "alen_increment=", "iden_increment=","continue_from_previous","skip_blasting","debugging", "help"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
# elif opt in ("--recover_after_failure"):
# recover_after_failure = True
# print "Recover after failure:", recover_after_failure
elif opt in ("--continue_from_previous"):
continue_from_previous = True
if debugging:
print "Continue after failure:", continue_from_previous
elif opt in ("--debugging"):
debugging = True
if debugging:
print "Debugging messages:", debugging
elif opt in ("-r", "--reference"):
if arg:
ref_lst=arg.split(',')
#infiles = arg
if debugging:
print "Reference file(s)", ref_lst
elif opt in ("-m", "--metagenome"):
if arg:
mg_lst=arg.split(',')
#infiles = arg
if debugging:
print "Metagenome file(s)", mg_lst
elif opt in ("-f", "--format"):
if arg:
fmt_lst=arg.split(',')
#infiles = arg
if debugging:
print "Output format(s)", fmt_lst
elif opt in ("-n", "--name"):
if arg.strip():
name = arg
if debugging:
print "Project name", name
elif opt in ("-e", "--e_value"):
try:
e_val = float(arg)
except:
print "\nERROR: Please enter numerical value as -e parameter (default: 1e-5)"
usage()
sys.exit(1)
if debugging:
print "E value", e_val
elif opt in ("-a", "--alignment_length"):
if arg.strip()[-1]=="%":
alen_bp = False
alen_percent = True
else:
alen_bp = True
alen_percent = False
try:
alen = float(arg.split("%")[0])
except:
print "\nERROR: Please enter a numerical value as -a parameter (default: 50.0)"
usage()
sys.exit(1)
if debugging:
print "Alignment length", alen
elif opt in ("-i", "--identity"):
try:
iden = float(arg)
except:
print "\nERROR: Please enter a numerical value as -i parameter (default: 95.0)"
usage()
sys.exit(1)
if debugging:
print "Alignment length", iden
elif opt in ("-s", "--shear"):
sheared = True
try:
shear_val = int(arg)
except:
print "\nERROR: Please enter an integer value as -s parameter"
usage()
sys.exit(1)
if debugging:
print "Alignment length", iden
elif opt in ("--iterations"):
try:
iterations = int(arg)
except:
print "\nWARNING: Please enter integer value as --iterations parameter (using default: 1)"
if debugging:
print "Iterations: ", iterations
elif opt in ("--alen_increment"):
try:
alen_increment = float(arg)
except:
print "\nWARNING: Please enter numerical value as --alen_increment parameter (using default: )", alen_increment
if debugging:
print "Alignment length increment: ", alen_increment
elif opt in ("--iden_increment"):
try:
iden_increment = float(arg)
except:
print "\nWARNING: Please enter numerical value as --iden_increment parameter (using default: )", iden_increment
if debugging:
print "Alignment length increment: ", iden_increment
elif opt in ("--skip_blasting"):
skip_blasting = True
if debugging:
print "Blasting step omitted; Using previous blast output."
for ref_file in [x for x in ref_lst if x]:
try:
#
with open(ref_file, "rU") as hand_ref:
pass
except:
print "\nERROR: Reference File(s) ["+ref_file+"] doesn't exist"
usage()
sys.exit(1)
for mg_file in [x for x in mg_lst if x]:
try:
#
with open(mg_file, "rU") as hand_mg:
pass
except:
print "\nERROR: Metagenome File(s) ["+mg_file+"] doesn't exist"
usage()
sys.exit(1)
for fmt in [x for x in fmt_lst if x]:
if fmt not in supported_formats:
print "\nWARNING: Output format [",fmt,"] is not supported"
print "\tUse -h(--help) option for the list of supported formats"
fmt_lst=["fasta"]
print "\tUsing default output format: ", fmt_lst[0]
project_dir = name
if not continue_from_previous:
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
try:
os.mkdir(project_dir)
except OSError:
print "ERROR: Cannot create project directory: " + name
raise
print "\n\t Initial Parameters:"
print "\nProject Name: ", name,'\n'
print "Project Directory: ", os.path.abspath(name),'\n'
print "Reference File(s): ", ref_lst,'\n'
if sheared:
print "Shear Reference File(s):", str(shear_val)+"bp",'\n'
print "Metagenome File(s): ", mg_lst,'\n'
print "E Value: ", e_val, "\n"
if alen_percent:
print "Alignment Length: "+str(alen)+'%\n'
if alen_bp:
print "Alignment Length: "+str(alen)+'bp\n'
print "Sequence Identity: "+str(iden)+'%\n'
print "Output Format(s):", fmt_lst,'\n'
if iterations > 1:
print "Iterations: ", iterations, '\n'
print "Alignment Length Increment: ", alen_increment, '\n'
print "Sequence identity Increment: ", iden_increment, '\n'
#Initializing directories
blast_db_Dir = name+"/blast_db"
if not continue_from_previous:
if os.path.exists(blast_db_Dir):
shutil.rmtree(blast_db_Dir)
try:
os.mkdir(blast_db_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + blast_db_Dir
raise
results_Dir = name+"/results"
if not continue_from_previous:
if os.path.exists(results_Dir):
shutil.rmtree(results_Dir)
try:
os.mkdir(results_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + results_Dir
raise
input_files_Dir = name+"/input_files"
if not continue_from_previous:
if os.path.exists(input_files_Dir):
shutil.rmtree(input_files_Dir)
try:
os.mkdir(input_files_Dir)
except OSError:
print "ERROR: Cannot create project directory: " + input_files_Dir
raise
# Writing raw reference files into a specific input filename
input_ref_records = {}
for reference in ref_lst:
ref_records_ind = parse_contigs_ind(reference)
#ref_records = dict(ref_records_ind)
input_ref_records.update(ref_records_ind)
ref_records_ind.close()
#input_ref_records.update(ref_records)
ref_out_0 = input_files_Dir+"/reference0.fna"
if (sheared & bool(shear_val)):
with open(ref_out_0, "w") as handle:
SeqIO.write(genome_shredder(input_ref_records, shear_val).values(), handle, "fasta")
#NO NEED TO CLOSE with statement will automatically close the file
else:
with open(ref_out_0, "w") as handle:
SeqIO.write(input_ref_records.values(), handle, "fasta")
# Making BLAST databases
#output fname from before used as input for blast database creation
input_ref_0 = ref_out_0
title_db = name+"_db"#add iteration functionality
outfile_db = blast_db_Dir+"/iteration"+str(iterations)+"/"+name+"_db"#change into for loop
os.system("makeblastdb -in "+input_ref_0+" -dbtype nucl -title "+title_db+" -out "+outfile_db+" -parse_seqids")
# BLASTing query contigs
if not skip_blasting:
print "\nBLASTing query file(s):"
for i in range(len(mg_lst)):
database = outfile_db # adjust for iterations
blasted_lst.append(results_Dir+"/recruited_mg_"+str(i)+".tab")
start = time.time()
os_string = 'blastn -db '+database+' -query \"'+mg_lst[i]+'\" -out '+blasted_lst[i]+" -evalue "+str(e_val)+" -outfmt 6 -num_threads 8"
#print os_string
os.system(os_string)
print "\t"+mg_lst[i]+"; Time elapsed: "+str(time.time()-start)+" seconds."
else:
for i in range(len(mg_lst)):
blasted_lst.append(results_Dir+"/recruited_mg_"+str(i)+".tab")
# Parsing BLAST outputs
blast_cols = ['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
recruited_mg=[]
for i in range(len(mg_lst)):
try:
df = pandas.read_csv(blasted_lst[i] ,sep="\t", header=None)
except:
df = pandas.DataFrame(columns=blast_cols)
df.columns=blast_cols
recruited_mg.append(df)
# print len(recruited_mg[0])
# print len(recruited_mg[1])
#creating all_records entry
#! Remember to close index objects after they are no longer needed
#! Use helper function close_ind_lst()
all_records = []
all_input_recs = parse_contigs_ind(ref_out_0)
##calculating GC of the reference
if (len(all_input_recs)>1):
ref_gc_lst = np.array([GC(x.seq) for x in all_input_recs.values()])
ref_cnt = ref_gc_lst.size
ref_gc_avg = np.mean(ref_gc_lst)
ref_gc_avg_std = np.std(ref_gc_lst)
if(len(ref_gc_lst) > 0):
ref_gc_avg_sem = stats.sem(ref_gc_lst, axis=0)
else:
ref_gc_avg_sem=0
else:
if (debugging):
print "Only one reference"
ref_gc_lst = np.array([GC(x.seq) for x in all_input_recs.values()])
ref_cnt = ref_gc_lst.size
ref_gc_avg = np.mean(ref_gc_lst)
ref_gc_avg_std=0
ref_gc_avg_sem=0
#ref_gc_avg_sem = stats.sem(ref_gc_lst, axis=0)
# _ = 0
# for key, value in all_input_recs.items():
# _ +=1
# if _ < 20:
# print key, len(value)
print "\nIndexing metagenome file(s):"
for i in range(len(mg_lst)):
start = time.time()
all_records.append(parse_contigs_ind(mg_lst[i]))
print "\t"+mg_lst[i]+" Indexed in : "+str(time.time()-start)+" seconds."
# Transforming data
print "\nParsing recruited contigs:"
for i in range(len(mg_lst)):
start = time.time()
#cutoff_contigs[dataframe]=evalue_filter(cutoff_contigs[dataframe])
recruited_mg[i]=unique_scaffold_topBits(recruited_mg[i])
contig_list = recruited_mg[i]['quid'].tolist()
recruited_mg[i]['Contig_nt']=retrive_sequence(contig_list, all_records[i])
recruited_mg[i]['Contig_size']=recruited_mg[i]['Contig_nt'].apply(lambda x: len(x))
#recruited_mg[i]['Ref_nt']=recruited_mg[i]['suid'].apply(lambda x: all_input_recs[str(x)].seq)
recruited_mg[i]['Ref_size']=recruited_mg[i]['suid'].apply(lambda x: len(all_input_recs[str(x)]))
recruited_mg[i]['Ref_GC']=recruited_mg[i]['suid'].apply(lambda x: GC(all_input_recs[str(x)].seq))
#recruited_mg[i]['Coverage']=recruited_mg[i]['alen'].apply(lambda x: 100.0*float(x))/min(recruited_mg[i]['Contig_size'].apply(lambda y: y),recruited_mg[i]['Ref_size'].apply(lambda z: z))
#df.loc[:, ['B0', 'B1', 'B2']].min(axis=1)
recruited_mg[i]['Coverage']=recruited_mg[i]['alen'].apply(lambda x: 100.0*float(x))/recruited_mg[i].loc[:,["Contig_size", "Ref_size"]].min(axis=1)
recruited_mg[i]['Metric']=recruited_mg[i]['Coverage']*recruited_mg[i]['iden']/100.0
try:
recruited_mg[i]['Contig_GC']=recruited_mg[i]['Contig_nt'].apply(lambda x: GC(x))
except:
recruited_mg[i]['Contig_GC']=recruited_mg[i]['Contig_nt'].apply(lambda x: None)
try:
recruited_mg[i]['Read_RPKM']=1.0/((recruited_mg[i]['Ref_size']/1000.0)*(len(all_records[i])/1000000.0))
except:
recruited_mg[i]['Read_RPKM']=np.nan
#recruited_mg[i] = recruited_mg[i][['quid', 'suid', 'iden', 'alen','Coverage','Metric', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits','Ref_size','Ref_GC','Ref_nt','Contig_size','Contig_GC','Contig_nt']]
recruited_mg[i] = recruited_mg[i][['quid', 'suid', 'iden', 'alen','Coverage','Metric', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits','Ref_size','Ref_GC','Contig_size','Contig_GC','Read_RPKM','Contig_nt']]
print "\tContigs from "+mg_lst[i]+" parsed in : "+str(time.time()-start)+" seconds."
# Here would go statistics functions and producing plots
#
#
#
#
#
# Quality filtering before outputting
if alen_percent:
for i in range(len(recruited_mg)):
recruited_mg[i]=recruited_mg[i][(recruited_mg[i]['iden']>=iden)&(recruited_mg[i]['Coverage']>=alen)&(recruited_mg[i]['eval']<=e_val)]
if alen_bp:
for i in range(len(recruited_mg)):
recruited_mg[i]=recruited_mg[i][(recruited_mg[i]['iden']>=iden)&(recruited_mg[i]['alen']>=alen)&(recruited_mg[i]['eval']<=e_val)]
# print len(recruited_mg[0])
# print len(recruited_mg[1])
# Batch export to outfmt (csv and/or multiple FASTA)
alen_str = ""
iden_str = "_iden_"+str(iden)+"%"
if alen_percent:
alen_str = "_alen_"+str(alen)+"%"
if alen_bp:
alen_str = "_alen_"+str(alen)+"bp"
if iterations > 1:
prefix=name+"/results/"+name.split("/")[0]+"_iter_e_"+str(e_val)+iden_str+alen_str
else:
prefix=name+"/results/"+name.split("/")[0]+"_e_"+str(e_val)+iden_str+alen_str
if sheared:
prefix = prefix+'_sheared_'+str(shear_val)+"bp"
prefix = prefix + "_recruited_mg_"
#initializing log file data
logfile=name.split("/")[0]+"/results_log.csv"
try:
run = int(name.split("/")[-1].split("_")[-1])# using "_" less depends on the wrapper script
except:
if name.split("/")[-1].split("_")[-1]==name:
run = 0
else:
print "Warning: Run identifier could not be written in: "+logfile
#sys.exit(1)
run = None
alen_header = "Min alen"
if alen_bp:
alen_header = alen_header+" (bp)"
if alen_percent:
alen_header = alen_header+" (%)"
shear_header = "Reference Shear (bp)"
shear_log_value = 0
if sheared:
shear_log_value = str(shear_val)
print "\nWriting files:"
for i in range(len(mg_lst)):
records= []
if "csv" in fmt_lst:
outfile1 = prefix+str(i)+".csv"
recruited_mg[i].to_csv(outfile1, sep='\t')
print str(len(recruited_mg[i]))+" sequences written to "+outfile1
if "fasta" in fmt_lst:
ids = recruited_mg[i]['quid'].tolist()
#if len(ids)==len(sequences):
for j in range(len(ids)):
records.append(all_records[i][ids[j]])
outfile2 = prefix+str(i)+".fasta"
with open(outfile2, "w") as output_handle:
SeqIO.write(records, output_handle, "fasta")
print str(len(ids))+" sequences written to "+outfile2
#Writing logfile
try:
time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
except:
print "Warning: Time identifier could not be written in: "+logfile
metagenome = mg_lst[i]
#contig info
rpkm_lst = np.array(recruited_mg[i]['Read_RPKM'].tolist())
if(len(rpkm_lst) > 0):
rpkm = np.sum(rpkm_lst)
rpkm_std= np.std(rpkm_lst)
rpkm_sem = np.std(rpkm_lst)*np.sqrt(len(rpkm_lst))
else:
rpkm = 0
rpkm_std= 0
rpkm_sem=0
sizes_lst = np.array(recruited_mg[i]['Contig_size'].tolist())
if(len(sizes_lst) > 0):
sizes_avg = np.mean(sizes_lst)
sizes_avg_std= np.std(sizes_lst)
if(len(sizes_lst) > 1):
sizes_avg_sem = stats.sem(sizes_lst, axis=0)
else:
sizes_avg_sem = 0
else:
sizes_avg = 0
sizes_avg_std= 0
sizes_avg_sem=0
#sizes_avg_sem = stats.sem(sizes_lst, axis=0)
alen_lst = np.array(recruited_mg[i]['alen'].tolist())
if(len(alen_lst) > 0):
alen_avg = np.mean(alen_lst)
alen_avg_std = np.std(alen_lst)
if(len(alen_lst) > 1):
alen_avg_sem = stats.sem(alen_lst, axis=0)
else:
alen_avg_sem = 0
else:
alen_avg = 0
alen_avg_std = 0
alen_avg_sem=0
#alen_avg_sem = stats.sem(alen_lst, axis=0)
iden_lst = np.array(recruited_mg[i]['iden'].tolist())
if(len(iden_lst) > 0):
iden_avg = np.mean(iden_lst)
iden_avg_std = np.std(iden_lst)
if(len(iden_lst) > 1):
iden_avg_sem = stats.sem(iden_lst, axis=0)
else:
iden_avg_sem = 0
else:
iden_avg = 0
iden_avg_std = 0
iden_avg_sem=0
#iden_avg_sem = stats.sem(iden_lst, axis=0)
gc_lst = np.array(recruited_mg[i]['Contig_GC'].tolist())
if(len(gc_lst) > 0):
gc_avg = np.mean(gc_lst)
gc_avg_std = np.std(gc_lst)
if(len(gc_lst) > 1):
gc_avg_sem = stats.sem(gc_lst, axis=0)
else:
gc_avg_sem = 0
else:
gc_avg = 0
gc_avg_std = 0
gc_avg_sem=0
if ref_cnt > 0:
recr_percent = float(len(ids))/float(len(all_records[i]))*100
else:
recr_percent = 0.0
#log_header = ['Run','Project Name','Created', 'Reference(s)','Metagenome', 'No. Contigs','No. References', alen_header, "Min iden (%)", shear_header, "Mean Contig Size (bp)","STD Contig Size", "SEM Contig Size", "Mean Contig alen (bp)","STD Contig alen", "SEM Contig alen", "Mean Contig iden (bp)","STD Contig iden", "SEM Contig iden", "Mean Contig GC (%)","STD Contig GC","SEM Contig GC","Mean Reference GC (%)","STD Reference GC","SEM Reference GC"]
log_header = ['Run','Project Name','Created', 'Reference(s)', shear_header,'No. Ref. Sequences','Metagenome','No. Metagenome Contigs' , alen_header, "Min iden (%)",'No. Recruited Contigs','% Recruited Contigs', 'Total RPKM', 'RPKM STD', 'RPKM SEM', "Mean Rec. Contig Size (bp)","STD Rec. Contig Size", "SEM Rec. Contig Size", "Mean alen (bp)","STD alen", "SEM alen", "Mean Rec. Contig iden (bp)","STD Rec. Contig iden", "SEM Rec. Contig iden", "Mean Rec. Contigs GC (%)","STD Rec. Contig GC","SEM Rec. Contig GC","Mean Total Reference(s) GC (%)","STD Total Reference(s) GC","SEM Total Reference(s) GC"]
#log_row = [run,name.split("/")[0],time_str, ";".join(ref_lst), metagenome, len(ids),ref_cnt, alen, iden, shear_log_value, sizes_avg,sizes_avg_std, sizes_avg_sem, alen_avg,alen_avg_std, alen_avg_sem, iden_avg,iden_avg_std, iden_avg_sem, gc_avg,gc_avg_std, gc_avg_sem,ref_gc_avg,ref_gc_avg_std, ref_gc_avg_sem]
log_row = [run,name.split("/")[0],time_str, ";".join(ref_lst), shear_log_value,ref_cnt, metagenome,len(all_records[i]) , alen, iden,len(ids),recr_percent,rpkm, rpkm_std, rpkm_sem, sizes_avg,sizes_avg_std, sizes_avg_sem, alen_avg,alen_avg_std, alen_avg_sem, iden_avg,iden_avg_std, iden_avg_sem, gc_avg,gc_avg_std, gc_avg_sem,ref_gc_avg,ref_gc_avg_std, ref_gc_avg_sem]
if os.path.isfile(logfile):#file exists - appending
with open(logfile, "a") as log_handle:
log_writer = csv.writer(log_handle, delimiter='\t')
log_writer.writerow(log_row)
else:#no file exists - writing
with open(logfile,"w") as log_handle:
log_writer = csv.writer(log_handle, delimiter='\t')
log_writer.writerow(log_header)
log_writer.writerow(log_row)
close_ind_lst(all_records)
close_ind_lst([all_input_recs])
#run = 0
#all_records[i].close()# keep open if multiple iterations
#recruited_mg_1 = pandas.read_csv(out_name1 ,sep="\t", header=None)
#recruited_mg_1.columns=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
#recruited_mg_2 = pandas.read_csv(out_name2 ,sep="\t", header=None)
#recruited_mg_2.columns=['quid', 'suid', 'iden', 'alen', 'mism', 'gapo', 'qsta', 'qend', 'ssta', 'send', 'eval', 'bits']
#recruited_mg = [recruited_mg_1, recruited_mg_2]
# blast_db_Dir = ""
# results_Dir = ""
# input_files_Dir = ""
# parsed = SeqIO.parse(handle, "fasta")
#
# records = list()
#
#
# total = 0
# processed = 0
# for record in parsed:
# total += 1
# #print(record.id), len(record.seq)
# if len(record.seq) >= length:
# processed += 1
# records.append(record)
# handle.close()
#
# print "%d sequences found"%(total)
#
# try:
# output_handle = open(outfile, "w")
# SeqIO.write(records, output_handle, "fasta")
# output_handle.close()
# print "%d sequences written"%(processed)
# except:
# print "ERROR: Illegal output filename"
# sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
| mit |
dmlc/mxnet | example/bayesian-methods/bdk_demo.py | 6 | 17001 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Run Stochastic Gradient Langevin Dynamics (SGLD) and Bayesian Dark Knowledge (BDK)"""
from __future__ import print_function
import argparse
import time
import numpy
import matplotlib.pyplot as plt
import mxnet as mx
import mxnet.ndarray as nd
from algos import HMC, SGD, SGLD, DistilledSGLD
from data_loader import load_mnist, load_toy, load_synthetic
from utils import BiasXavier, SGLDScheduler
class CrossEntropySoftmax(mx.operator.NumpyOp):
"""Calculate CrossEntropy softmax function"""
def __init__(self):
super(CrossEntropySoftmax, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = in_shape[0]
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1))).astype('float32')
y /= y.sum(axis=1).reshape((x.shape[0], 1))
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
y = out_data[0]
dx = in_grad[0]
dx[:] = (y - l)
class LogSoftmax(mx.operator.NumpyOp):
"""Generate helper functions to calculate the logarithm of softmax"""
def __init__(self):
super(LogSoftmax, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = in_shape[0]
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = (x - x.max(axis=1, keepdims=True)).astype('float32')
y -= numpy.log(numpy.exp(y).sum(axis=1, keepdims=True)).astype('float32')
# y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1)))
# y /= y.sum(axis=1).reshape((x.shape[0], 1))
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
y = out_data[0]
dx = in_grad[0]
dx[:] = (numpy.exp(y) - l).astype('float32')
def classification_student_grad(student_outputs, teacher_pred):
return [student_outputs[0] - teacher_pred]
def regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision):
student_mean = student_outputs[0]
student_var = student_outputs[1]
grad_mean = nd.exp(-student_var) * (student_mean - teacher_pred)
grad_var = (1 - nd.exp(-student_var) * (nd.square(student_mean - teacher_pred)
+ 1.0 / teacher_noise_precision)) / 2
return [grad_mean, grad_var]
def get_mnist_sym(output_op=None, num_hidden=400):
"""Get symbol of mnist"""
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='mnist_fc1', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc2', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu2', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc3', num_hidden=10)
if output_op is None:
net = mx.symbol.SoftmaxOutput(data=net, name='softmax')
else:
net = output_op(data=net, name='softmax')
return net
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
"""Get synthetic gradient value"""
if grad is None:
grad = nd.empty(theta.shape, theta.context)
theta1 = theta.asnumpy()[0]
theta2 = theta.asnumpy()[1]
v1 = sigma1 ** 2
v2 = sigma2 ** 2
vx = sigmax ** 2
denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
-(X - theta1 - theta2) ** 2 / (2 * vx))
grad_npy = numpy.zeros(theta.shape)
grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
+ numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) *
(X - theta1 - theta2) / vx) / denominator).sum() + theta1 / v1
grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) *
(X - theta1 - theta2) / vx) / denominator).sum() + theta2 / v2
grad[:] = grad_npy
return grad
def get_toy_sym(teacher=True, teacher_noise_precision=None):
"""Get toy symbol"""
if teacher:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='teacher_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='teacher_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='teacher_fc2', num_hidden=1)
net = mx.symbol.LinearRegressionOutput(data=net, name='teacher_output',
grad_scale=teacher_noise_precision)
else:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='student_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='student_relu1', act_type="relu")
student_mean = mx.symbol.FullyConnected(data=net, name='student_mean', num_hidden=1)
student_var = mx.symbol.FullyConnected(data=net, name='student_var', num_hidden=1)
net = mx.symbol.Group([student_mean, student_var])
return net
def dev(gpu_id=None):
return mx.gpu(gpu_id) if gpu_id else mx.cpu()
def run_mnist_SGD(num_training=50000, gpu_id=None):
X, Y, X_test, Y_test = load_mnist(num_training)
minibatch_size = 100
net = get_mnist_sym()
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev(gpu_id))}
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
exe, exe_params, _ = SGD(sym=net, dev=dev(gpu_id), data_inputs=data_inputs, X=X, Y=Y,
X_test=X_test, Y_test=Y_test,
total_iter_num=1000000,
initializer=initializer,
lr=5E-6, prior_precision=1.0, minibatch_size=100)
def run_mnist_SGLD(num_training=50000, gpu_id=None):
X, Y, X_test, Y_test = load_mnist(num_training)
minibatch_size = 100
net = get_mnist_sym()
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev(gpu_id))}
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
exe, sample_pool = SGLD(sym=net, dev=dev(gpu_id), data_inputs=data_inputs, X=X, Y=Y,
X_test=X_test, Y_test=Y_test,
total_iter_num=1000000,
initializer=initializer,
learning_rate=4E-6, prior_precision=1.0, minibatch_size=100,
thin_interval=100, burn_in_iter_num=1000)
def run_mnist_DistilledSGLD(num_training=50000, gpu_id=None):
"""Run DistilledSGLD on mnist dataset"""
X, Y, X_test, Y_test = load_mnist(num_training)
minibatch_size = 100
if num_training >= 10000:
num_hidden = 800
total_iter_num = 1000000
teacher_learning_rate = 1E-6
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.1
else:
num_hidden = 400
total_iter_num = 20000
teacher_learning_rate = 4E-5
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.001
teacher_net = get_mnist_sym(num_hidden=num_hidden)
logsoftmax = LogSoftmax()
student_net = get_mnist_sym(output_op=logsoftmax, num_hidden=num_hidden)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev(gpu_id))}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev(gpu_id))}
teacher_initializer = BiasXavier(factor_type="in", magnitude=1)
student_initializer = BiasXavier(factor_type="in", magnitude=1)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=total_iter_num,
student_initializer=student_initializer,
teacher_initializer=teacher_initializer,
student_optimizing_algorithm="adam",
teacher_learning_rate=teacher_learning_rate,
student_learning_rate=student_learning_rate,
teacher_prior_precision=teacher_prior, student_prior_precision=student_prior,
perturb_deviation=perturb_deviation, minibatch_size=100, dev=dev(gpu_id))
def run_toy_SGLD(gpu_id=None):
"""Run SGLD on toy dataset"""
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0 / 9.0
net = get_toy_sym(True, teacher_noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))}
initializer = mx.init.Uniform(0.07)
exe, params, _ = SGLD(sym=net,
data_inputs=data_inputs,
X=X,
Y=Y,
X_test=X_test,
Y_test=Y_test,
total_iter_num=50000,
initializer=initializer,
learning_rate=1E-4,
# lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
prior_precision=0.1,
burn_in_iter_num=1000,
thin_interval=10,
task='regression',
minibatch_size=minibatch_size,
dev=dev(gpu_id)) # disable=unbalanced-tuple-unpacking
def run_toy_DistilledSGLD(gpu_id):
"""Run DistilledSGLD on toy dataset"""
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0
teacher_net = get_toy_sym(True, teacher_noise_precision)
student_net = get_toy_sym(False)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id))}
teacher_initializer = mx.init.Uniform(0.07)
student_initializer = mx.init.Uniform(0.07)
student_grad_f = lambda student_outputs, teacher_pred: \
regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=80000,
teacher_initializer=teacher_initializer,
student_initializer=student_initializer,
teacher_learning_rate=1E-4, student_learning_rate=0.01,
# teacher_lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
student_lr_scheduler=mx.lr_scheduler.FactorScheduler(8000, 0.8),
student_grad_f=student_grad_f,
teacher_prior_precision=0.1, student_prior_precision=0.001,
perturb_deviation=0.1, minibatch_size=minibatch_size, task='regression',
dev=dev(gpu_id))
def run_toy_HMC(gpu_id=None):
"""Run HMC on toy dataset"""
X, Y, X_test, Y_test = load_toy()
minibatch_size = Y.shape[0]
noise_precision = 1 / 9.0
net = get_toy_sym(True, noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))}
initializer = mx.init.Uniform(0.07)
sample_pool = HMC(net, data_inputs=data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test,
sample_num=300000, initializer=initializer, prior_precision=1.0,
learning_rate=1E-3, L=10, dev=dev(gpu_id))
def run_synthetic_SGLD():
"""Run synthetic SGLD"""
theta1 = 0
theta2 = 1
sigma1 = numpy.sqrt(10)
sigma2 = 1
sigmax = numpy.sqrt(2)
X = load_synthetic(theta1=theta1, theta2=theta2, sigmax=sigmax, num=100)
minibatch_size = 1
total_iter_num = 1000000
lr_scheduler = SGLDScheduler(begin_rate=0.01, end_rate=0.0001, total_iter_num=total_iter_num,
factor=0.55)
optimizer = mx.optimizer.create('sgld',
learning_rate=None,
rescale_grad=1.0,
lr_scheduler=lr_scheduler,
wd=0)
updater = mx.optimizer.get_updater(optimizer)
theta = mx.random.normal(0, 1, (2,), mx.cpu())
grad = nd.empty((2,), mx.cpu())
samples = numpy.zeros((2, total_iter_num))
start = time.time()
for i in range(total_iter_num):
if (i + 1) % 100000 == 0:
end = time.time()
print("Iter:%d, Time spent: %f" % (i + 1, end - start))
start = time.time()
ind = numpy.random.randint(0, X.shape[0])
synthetic_grad(X[ind], theta, sigma1, sigma2, sigmax,
rescale_grad=X.shape[0] / float(minibatch_size), grad=grad)
updater('theta', grad, theta)
samples[:, i] = theta.asnumpy()
plt.hist2d(samples[0, :], samples[1, :], (200, 200), cmap=plt.cm.jet)
plt.colorbar()
plt.show()
if __name__ == '__main__':
numpy.random.seed(100)
mx.random.seed(100)
parser = argparse.ArgumentParser(
description="Examples in the paper [NIPS2015]Bayesian Dark Knowledge and "
"[ICML2011]Bayesian Learning via Stochastic Gradient Langevin Dynamics")
parser.add_argument("-d", "--dataset", type=int, default=1,
help="Dataset to use. 0 --> TOY, 1 --> MNIST, 2 --> Synthetic Data in "
"the SGLD paper")
parser.add_argument("-l", "--algorithm", type=int, default=2,
help="Type of algorithm to use. 0 --> SGD, 1 --> SGLD, other-->DistilledSGLD")
parser.add_argument("-t", "--training", type=int, default=50000,
help="Number of training samples")
parser.add_argument("--gpu", type=int, help="0 to use GPU, not set to use CPU")
args = parser.parse_args()
training_num = args.training
if args.dataset == 1:
if args.algorithm == 0:
run_mnist_SGD(training_num, gpu_id=args.gpu)
elif args.algorithm == 1:
run_mnist_SGLD(training_num, gpu_id=args.gpu)
else:
run_mnist_DistilledSGLD(training_num, gpu_id=args.gpu)
elif args.dataset == 0:
if args.algorithm == 1:
run_toy_SGLD(gpu_id=args.gpu)
elif args.algorithm == 2:
run_toy_DistilledSGLD(gpu_id=args.gpu)
elif args.algorithm == 3:
run_toy_HMC(gpu_id=args.gpu)
else:
run_synthetic_SGLD()
| apache-2.0 |
mhdella/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
jeremyfix/pylearn2 | pylearn2/utils/image.py | 4 | 16346 | """
.. todo::
WRITEME
"""
import logging
import numpy as np
plt = None
axes = None
from theano.compat.six.moves import xrange
import warnings
try:
import matplotlib.pyplot as plt
import matplotlib.axes
except (RuntimeError, ImportError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
import os
try:
from PIL import Image
except ImportError:
Image = None
from pylearn2.utils import string_utils as string
from pylearn2.utils.exc import reraise_as
from tempfile import mkstemp
from multiprocessing import Process
import subprocess
logger = logging.getLogger(__name__)
def ensure_Image():
"""Makes sure Image has been imported from PIL"""
global Image
if Image is None:
raise RuntimeError("You are trying to use PIL-dependent functionality"
" but don't have PIL installed.")
def imview(*args, **kwargs):
"""
A more sensible matplotlib-based image viewer command,
a wrapper around `matplotlib.pyplot.imshow`.
Parameters are identical to `matplotlib.pyplot.imshow`
but this behaves somewhat differently:
* By default, it creates a new figure (unless a
`figure` keyword argument is supplied.
* It modifies the axes of that figure to use the
full frame, without ticks or tick labels.
* It turns on `nearest` interpolation by default
(i.e., it does not antialias pixel data). This
can be overridden with the `interpolation`
argument as in `imshow`.
All other arguments and keyword arguments are passed
on to `imshow`.`
"""
if 'figure' not in kwargs:
f = plt.figure()
else:
f = kwargs['figure']
new_ax = matplotlib.axes.Axes(f,
[0, 0, 1, 1],
xticks=[],
yticks=[],
frame_on=False)
f.delaxes(f.gca())
f.add_axes(new_ax)
if len(args) < 5 and 'interpolation' not in kwargs:
kwargs['interpolation'] = 'nearest'
plt.imshow(*args, **kwargs)
def imview_async(*args, **kwargs):
"""
A version of `imview` that forks a separate process and
immediately shows the image.
Supports the `window_title` keyword argument to cope with
the title always being 'Figure 1'.
Returns the `multiprocessing.Process` handle.
"""
if 'figure' in kwargs:
raise ValueError("passing a figure argument not supported")
def fork_image_viewer():
f = plt.figure()
kwargs['figure'] = f
imview(*args, **kwargs)
if 'window_title' in kwargs:
f.set_window_title(kwargs['window_title'])
plt.show()
p = Process(None, fork_image_viewer)
p.start()
return p
def show(image):
"""
.. todo::
WRITEME
Parameters
----------
image : PIL Image object or ndarray
If ndarray, integer formats are assumed to use 0-255
and float formats are assumed to use 0-1
"""
viewer_command = string.preprocess('${PYLEARN2_VIEWER_COMMAND}')
if viewer_command == 'inline':
return imview(image)
if hasattr(image, '__array__'):
#do some shape checking because PIL just raises a tuple indexing error
#that doesn't make it very clear what the problem is
if len(image.shape) < 2 or len(image.shape) > 3:
raise ValueError('image must have either 2 or 3 dimensions but its'
' shape is ' + str(image.shape))
if image.dtype == 'int8':
image = np.cast['uint8'](image)
elif str(image.dtype).startswith('float'):
#don't use *=, we don't want to modify the input array
image = image * 255.
image = np.cast['uint8'](image)
#PIL is too stupid to handle single-channel arrays
if len(image.shape) == 3 and image.shape[2] == 1:
image = image[:,:,0]
try:
ensure_Image()
image = Image.fromarray(image)
except TypeError:
reraise_as(TypeError("PIL issued TypeError on ndarray of shape " +
str(image.shape) + " and dtype " +
str(image.dtype)))
# Create a temporary file with the suffix '.png'.
fd, name = mkstemp(suffix='.png')
os.close(fd)
# Note:
# Although we can use tempfile.NamedTemporaryFile() to create
# a temporary file, the function should be used with care.
#
# In Python earlier than 2.7, a temporary file created by the
# function will be deleted just after the file is closed.
# We can re-use the name of the temporary file, but there is an
# instant where a file with the name does not exist in the file
# system before we re-use the name. This may cause a race
# condition.
#
# In Python 2.7 or later, tempfile.NamedTemporaryFile() has
# the 'delete' argument which can control whether a temporary
# file will be automatically deleted or not. With the argument,
# the above race condition can be avoided.
#
image.save(name)
if os.name == 'nt':
subprocess.Popen(viewer_command + ' ' + name +' && del ' + name,
shell=True)
else:
subprocess.Popen(viewer_command + ' ' + name +' ; rm ' + name,
shell=True)
def pil_from_ndarray(ndarray):
"""
.. todo::
WRITEME
"""
try:
if ndarray.dtype == 'float32' or ndarray.dtype == 'float64':
assert ndarray.min() >= 0.0
assert ndarray.max() <= 1.0
ndarray = np.cast['uint8'](ndarray * 255)
if len(ndarray.shape) == 3 and ndarray.shape[2] == 1:
ndarray = ndarray[:, :, 0]
ensure_Image()
rval = Image.fromarray(ndarray)
return rval
except Exception as e:
logger.exception('original exception: ')
logger.exception(e)
logger.exception('ndarray.dtype: {0}'.format(ndarray.dtype))
logger.exception('ndarray.shape: {0}'.format(ndarray.shape))
raise
assert False
def ndarray_from_pil(pil, dtype='uint8'):
"""
.. todo::
WRITEME
"""
rval = np.asarray(pil)
if dtype != rval.dtype:
rval = np.cast[dtype](rval)
if str(dtype).startswith('float'):
rval /= 255.
if len(rval.shape) == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
return rval
def rescale(image, shape):
"""
Scales image to be no larger than shape. PIL might give you
unexpected results beyond that.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
i = pil_from_ndarray(image)
ensure_Image()
i.thumbnail([shape[1], shape[0]], Image.ANTIALIAS)
rval = ndarray_from_pil(i, dtype=image.dtype)
return rval
resize = rescale
def fit_inside(image, shape):
"""
Scales image down to fit inside shape preserves proportions of image
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
if image.shape[0] <= shape[0] and image.shape[1] <= shape[1]:
return image.copy()
row_ratio = float(image.shape[0]) / float(shape[0])
col_ratio = float(image.shape[1]) / float(shape[1])
if row_ratio > col_ratio:
target_shape = [shape[0], min(image.shape[1] / row_ratio, shape[1])]
else:
target_shape = [min(image.shape[0] / col_ratio, shape[0]), shape[1]]
assert target_shape[0] <= shape[0]
assert target_shape[1] <= shape[1]
assert target_shape[0] == shape[0] or target_shape[1] == shape[1]
rval = rescale(image, target_shape)
return rval
def letterbox(image, shape):
"""
Pads image with black letterboxing to bring image.shape up to shape
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
assert image.shape[0] <= shape[0]
assert image.shape[1] <= shape[1]
if image.shape[0] == shape[0] and image.shape[1] == shape[1]:
return image.copy()
rval = np.zeros((shape[0], shape[1], image.shape[2]), dtype=image.dtype)
rstart = (shape[0] - image.shape[0]) / 2
cstart = (shape[1] - image.shape[1]) / 2
rend = rstart + image.shape[0]
cend = cstart + image.shape[1]
rval[rstart:rend, cstart:cend] = image
return rval
def make_letterboxed_thumbnail(image, shape):
"""
Scales image down to shape. Preserves proportions of image, introduces
black letterboxing if necessary.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3
assert len(shape) == 2
shrunk = fit_inside(image, shape)
letterboxed = letterbox(shrunk, shape)
return letterboxed
def load(filepath, rescale_image=True, dtype='float64'):
"""
.. todo::
WRITEME
"""
assert type(filepath) == str
if rescale_image == False and dtype == 'uint8':
ensure_Image()
rval = np.asarray(Image.open(filepath))
# print 'image.load: ' + str((rval.min(), rval.max()))
assert rval.dtype == 'uint8'
return rval
s = 1.0
if rescale_image:
s = 255.
try:
ensure_Image()
rval = Image.open(filepath)
except Exception:
reraise_as(Exception("Could not open " + filepath))
numpy_rval = np.array(rval)
if numpy_rval.ndim not in [2,3]:
logger.error(dir(rval))
logger.error(rval)
logger.error(rval.size)
rval.show()
raise AssertionError("Tried to load an image, got an array with " +
str(numpy_rval.ndim)+" dimensions. Expected 2 or 3."
"This may indicate a mildly corrupted image file. Try "
"converting it to a different image format with a different "
"editor like gimp or imagemagic. Sometimes these programs are "
"more robust to minor corruption than PIL and will emit a "
"correctly formatted image in the new format."
)
rval = numpy_rval
rval = np.cast[dtype](rval) / s
if rval.ndim == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
if rval.ndim != 3:
raise AssertionError("Something went wrong opening " +
filepath + '. Resulting shape is ' +
str(rval.shape) +
" (it's meant to have 3 dimensions by now)")
return rval
def save(filepath, ndarray):
"""
.. todo::
WRITEME
"""
pil_from_ndarray(ndarray).save(filepath)
def scale_to_unit_interval(ndar, eps=1e-8):
"""
Scales all values in the ndarray ndar to be between 0 and 1
Parameters
----------
ndar : WRITEME
eps : WRITEME
Returns
-------
WRITEME
"""
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
Parameters
----------
x : numpy.ndarray
2-d ndarray or 4 tuple of 2-d ndarrays or None for channels,
in which every row is a flattened image.
shape : 2-tuple of ints
The first component is the height of each image,
the second component is the width.
tile_shape : 2-tuple of ints
The number of images to tile in (row, columns) form.
scale_rows_to_unit_interval : bool
Whether or not the values need to be before being plotted to [0, 1].
output_pixel_vals : bool
Whether or not the output should be pixel values (int8) or floats.
Returns
-------
y : 2d-ndarray
The return value has the same dtype as X, and is suitable for
viewing as an image with PIL.Image.fromarray.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output np ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = np.zeros(out_shape, dtype=dt) + \
channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = np.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
if __name__ == '__main__':
black = np.zeros((50, 50, 3), dtype='uint8')
red = black.copy()
red[:, :, 0] = 255
green = black.copy()
green[:, :, 1] = 255
show(black)
show(green)
show(red)
| bsd-3-clause |
aewhatley/scikit-learn | examples/cluster/plot_cluster_comparison.py | 246 | 4684 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
mblondel/soft-dtw | sdtw/tests/test_soft_dtw.py | 1 | 2056 | import numpy as np
from scipy.optimize import approx_fprime
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sdtw.path import gen_all_paths
from sdtw.distance import SquaredEuclidean
from sdtw import SoftDTW
# Generate two inputs randomly.
rng = np.random.RandomState(0)
X = rng.randn(5, 4)
Y = rng.randn(6, 4)
D = euclidean_distances(X, Y, squared=True)
def _softmax(z):
max_val = np.max(z)
return max_val + np.log(np.exp(z - max_val).sum())
def _softmin(z, gamma):
z = np.array(z)
return -gamma * _softmax(-z / gamma)
def _soft_dtw_bf(D, gamma):
costs = [np.sum(A * D) for A in gen_all_paths(D.shape[0], D.shape[1])]
return _softmin(costs, gamma)
def test_soft_dtw():
for gamma in (0.001, 0.01, 0.1, 1, 10, 100, 1000):
assert_almost_equal(SoftDTW(D, gamma).compute(),
_soft_dtw_bf(D, gamma=gamma))
def test_soft_dtw_grad():
def make_func(gamma):
def func(d):
D_ = d.reshape(*D.shape)
return SoftDTW(D_, gamma).compute()
return func
for gamma in (0.001, 0.01, 0.1, 1, 10, 100, 1000):
sdtw = SoftDTW(D, gamma)
sdtw.compute()
E = sdtw.grad()
func = make_func(gamma)
E_num = approx_fprime(D.ravel(), func, 1e-6).reshape(*E.shape)
assert_array_almost_equal(E, E_num, 5)
def test_soft_dtw_grad_X():
def make_func(gamma):
def func(x):
X_ = x.reshape(*X.shape)
D_ = SquaredEuclidean(X_, Y)
return SoftDTW(D_, gamma).compute()
return func
for gamma in (0.001, 0.01, 0.1, 1, 10, 100, 1000):
dist = SquaredEuclidean(X, Y)
sdtw = SoftDTW(dist, gamma)
sdtw.compute()
E = sdtw.grad()
G = dist.jacobian_product(E)
func = make_func(gamma)
G_num = approx_fprime(X.ravel(), func, 1e-6).reshape(*G.shape)
assert_array_almost_equal(G, G_num, 5)
| bsd-2-clause |
toobaz/pandas | pandas/tests/sparse/test_indexing.py | 2 | 38613 | import numpy as np
import pytest
import pandas as pd
from pandas.core.sparse.api import SparseDtype
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
class TestSparseSeriesIndexing:
def setup_method(self, method):
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
self.sparse = self.orig.to_sparse()
def test_getitem(self):
orig = self.orig
sparse = self.sparse
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse())
tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse())
tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse())
tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse())
def test_getitem_int_dtype(self):
# GH 8292
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], name="xxx")
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6], name="xxx")
tm.assert_sp_series_equal(res, exp)
assert res.dtype == SparseDtype(np.int64)
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], fill_value=0, name="xxx")
res = s[::2]
exp = pd.SparseSeries(
[0, 2, 4, 6], index=[0, 2, 4, 6], fill_value=0, name="xxx"
)
tm.assert_sp_series_equal(res, exp)
assert res.dtype == SparseDtype(np.int64)
def test_getitem_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[2] == 0
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_ellipsis(self):
# GH 9467
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan])
tm.assert_sp_series_equal(s[...], s)
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan], fill_value=0)
tm.assert_sp_series_equal(s[...], s)
def test_getitem_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse(fill_value=0))
def test_loc(self):
orig = self.orig
sparse = self.sparse
assert sparse.loc[0] == 1
assert np.isnan(sparse.loc[1])
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.reindex([1, 3, 4, 5])
exp = orig.reindex([1, 3, 4, 5]).to_sparse()
tm.assert_sp_series_equal(result, exp)
# padded with NaN
assert np.isnan(result[-1])
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list("ABCDE"))
sparse = orig.to_sparse()
assert sparse.loc["A"] == 1
assert np.isnan(sparse.loc["B"])
result = sparse.loc[["A", "C", "D"]]
exp = orig.loc[["A", "C", "D"]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list("ABCDE"))
sparse = orig.to_sparse(fill_value=0)
assert sparse.loc["A"] == 1
assert np.isnan(sparse.loc["B"])
result = sparse.loc[["A", "C", "D"]]
exp = orig.loc[["A", "C", "D"]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_loc_slice_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list("ABCDE"))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(
sparse.loc["C":], orig.loc["C":].to_sparse(fill_value=0)
)
def test_loc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse(fill_value=0))
def test_iloc(self):
orig = self.orig
sparse = self.sparse
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[2])
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
result = sparse.iloc[[1, -2, -4]]
exp = orig.iloc[[1, -2, -4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
with pytest.raises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[1])
assert sparse.iloc[4] == 0
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_iloc_slice(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_iloc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(
sparse.iloc[2:], orig.iloc[2:].to_sparse(fill_value=0)
)
def test_at(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
assert sparse.at[0] == orig.at[0]
assert np.isnan(sparse.at[1])
assert np.isnan(sparse.at[2])
assert sparse.at[3] == orig.at[3]
assert np.isnan(sparse.at[4])
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list("abcde"))
sparse = orig.to_sparse()
assert sparse.at["a"] == orig.at["a"]
assert np.isnan(sparse.at["b"])
assert np.isnan(sparse.at["c"])
assert sparse.at["d"] == orig.at["d"]
assert np.isnan(sparse.at["e"])
def test_at_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list("abcde"))
sparse = orig.to_sparse(fill_value=0)
assert sparse.at["a"] == orig.at["a"]
assert np.isnan(sparse.at["b"])
assert sparse.at["c"] == orig.at["c"]
assert sparse.at["d"] == orig.at["d"]
assert sparse.at["e"] == orig.at["e"]
def test_iat(self):
orig = self.orig
sparse = self.sparse
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert np.isnan(sparse.iat[2])
assert sparse.iat[3] == orig.iat[3]
assert np.isnan(sparse.iat[4])
assert np.isnan(sparse.iat[-1])
assert sparse.iat[-5] == orig.iat[-5]
def test_iat_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse()
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert sparse.iat[2] == orig.iat[2]
assert sparse.iat[3] == orig.iat[3]
assert sparse.iat[4] == orig.iat[4]
assert sparse.iat[-1] == orig.iat[-1]
assert sparse.iat[-5] == orig.iat[-5]
def test_get(self):
s = pd.SparseSeries([1, np.nan, np.nan, 3, np.nan])
assert s.get(0) == 1
assert np.isnan(s.get(1))
assert s.get(5) is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list("ABCDE"))
assert s.get("A") == 1
assert np.isnan(s.get("B"))
assert s.get("C") == 0
assert s.get("XX") is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list("ABCDE"), fill_value=0)
assert s.get("A") == 1
assert np.isnan(s.get("B"))
assert s.get("C") == 0
assert s.get("XX") is None
def test_take(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list("ABCDE"))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.take([0]), orig.take([0]).to_sparse())
tm.assert_sp_series_equal(
sparse.take([0, 1, 3]), orig.take([0, 1, 3]).to_sparse()
)
tm.assert_sp_series_equal(
sparse.take([-1, -2]), orig.take([-1, -2]).to_sparse()
)
def test_take_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list("ABCDE"))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(
sparse.take([0]), orig.take([0]).to_sparse(fill_value=0)
)
exp = orig.take([0, 1, 3]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0, 1, 3]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list("ABCDE"))
sparse = orig.to_sparse()
res = sparse.reindex(["A", "E", "C", "D"])
exp = orig.reindex(["A", "E", "C", "D"]).to_sparse()
tm.assert_sp_series_equal(res, exp)
# all missing & fill_value
res = sparse.reindex(["B", "E", "C"])
exp = orig.reindex(["B", "E", "C"]).to_sparse()
tm.assert_sp_series_equal(res, exp)
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan], index=list("ABCDE"))
sparse = orig.to_sparse()
res = sparse.reindex(["A", "E", "C", "D"])
exp = orig.reindex(["A", "E", "C", "D"]).to_sparse()
tm.assert_sp_series_equal(res, exp)
def test_fill_value_reindex(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list("ABCDE"))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(["A", "E", "C", "D"])
exp = orig.reindex(["A", "E", "C", "D"]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# includes missing and fill_value
res = sparse.reindex(["A", "B", "C"])
exp = orig.reindex(["A", "B", "C"]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all missing
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan], index=list("ABCDE"))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(["A", "E", "C", "D"])
exp = orig.reindex(["A", "E", "C", "D"]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all fill_value
orig = pd.Series([0.0, 0.0, 0.0, 0.0, 0.0], index=list("ABCDE"))
sparse = orig.to_sparse(fill_value=0)
def test_fill_value_reindex_coerces_float_int(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list("ABCDE"))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(["A", "E", "C", "D"])
exp = orig.reindex(["A", "E", "C", "D"]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_reindex_fill_value(self):
floats = pd.Series([1.0, 2.0, 3.0]).to_sparse()
result = floats.reindex([1, 2, 3], fill_value=0)
expected = pd.Series([2.0, 3.0, 0], index=[1, 2, 3]).to_sparse()
tm.assert_sp_series_equal(result, expected)
def test_reindex_nearest(self):
s = pd.Series(np.arange(10, dtype="float64")).to_sparse()
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method="nearest")
expected = pd.Series(np.around(target), target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
actual = s.reindex(target, method="nearest", tolerance=0.2)
expected = pd.Series([0, 1, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
actual = s.reindex(target, method="nearest", tolerance=[0.3, 0.01, 0.4, 3])
expected = pd.Series([0, np.nan, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
@pytest.mark.parametrize("kind", ["integer", "block"])
@pytest.mark.parametrize("fill", [True, False, np.nan])
def tests_indexing_with_sparse(self, kind, fill):
# see gh-13985
arr = pd.SparseArray([1, 2, 3], kind=kind)
indexer = pd.SparseArray([True, False, True], fill_value=fill, dtype=bool)
expected = arr[indexer]
result = pd.SparseArray([1, 3], kind=kind)
tm.assert_sp_array_equal(result, expected)
s = pd.SparseSeries(arr, index=["a", "b", "c"], dtype=np.float64)
expected = pd.SparseSeries(
[1, 3],
index=["a", "c"],
kind=kind,
dtype=SparseDtype(np.float64, s.fill_value),
)
tm.assert_sp_series_equal(s[indexer], expected)
tm.assert_sp_series_equal(s.loc[indexer], expected)
tm.assert_sp_series_equal(s.iloc[indexer], expected)
indexer = pd.SparseSeries(indexer, index=["a", "b", "c"])
tm.assert_sp_series_equal(s[indexer], expected)
tm.assert_sp_series_equal(s.loc[indexer], expected)
msg = "iLocation based boolean indexing cannot use an indexable as a mask"
with pytest.raises(ValueError, match=msg):
s.iloc[indexer]
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseSeriesMultiIndexing(TestSparseSeriesIndexing):
def setup_method(self, method):
# Mi with duplicated values
idx = pd.MultiIndex.from_tuples(
[("A", 0), ("A", 1), ("B", 0), ("C", 0), ("C", 1)]
)
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=idx)
self.sparse = self.orig.to_sparse()
def test_getitem_multi(self):
orig = self.orig
sparse = self.sparse
assert sparse[0] == orig[0]
assert np.isnan(sparse[1])
assert sparse[3] == orig[3]
tm.assert_sp_series_equal(sparse["A"], orig["A"].to_sparse())
tm.assert_sp_series_equal(sparse["B"], orig["B"].to_sparse())
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_multi_tuple(self):
orig = self.orig
sparse = self.sparse
assert sparse["C", 0] == orig["C", 0]
assert np.isnan(sparse["A", 1])
assert np.isnan(sparse["B", 0])
def test_getitems_slice_multi(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[2:], orig[2:].to_sparse())
tm.assert_sp_series_equal(sparse.loc["B":], orig.loc["B":].to_sparse())
tm.assert_sp_series_equal(sparse.loc["C":], orig.loc["C":].to_sparse())
tm.assert_sp_series_equal(sparse.loc["A":"B"], orig.loc["A":"B"].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:"B"], orig.loc[:"B"].to_sparse())
def test_loc(self):
# need to be override to use different label
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc["A"], orig.loc["A"].to_sparse())
tm.assert_sp_series_equal(sparse.loc["B"], orig.loc["B"].to_sparse())
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# single element list (GH 15447)
result = sparse.loc[["A"]]
exp = orig.loc[["A"]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_multi_tuple(self):
orig = self.orig
sparse = self.sparse
assert sparse.loc["C", 0] == orig.loc["C", 0]
assert np.isnan(sparse.loc["A", 1])
assert np.isnan(sparse.loc["B", 0])
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc["A":], orig.loc["A":].to_sparse())
tm.assert_sp_series_equal(sparse.loc["B":], orig.loc["B":].to_sparse())
tm.assert_sp_series_equal(sparse.loc["C":], orig.loc["C":].to_sparse())
tm.assert_sp_series_equal(sparse.loc["A":"B"], orig.loc["A":"B"].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:"B"], orig.loc[:"B"].to_sparse())
def test_reindex(self):
# GH 15447
orig = self.orig
sparse = self.sparse
res = sparse.reindex([("A", 0), ("C", 1)])
exp = orig.reindex([("A", 0), ("C", 1)]).to_sparse()
tm.assert_sp_series_equal(res, exp)
# On specific level:
res = sparse.reindex(["A", "C", "B"], level=0)
exp = orig.reindex(["A", "C", "B"], level=0).to_sparse()
tm.assert_sp_series_equal(res, exp)
# single element list (GH 15447)
res = sparse.reindex(["A"], level=0)
exp = orig.reindex(["A"], level=0).to_sparse()
tm.assert_sp_series_equal(res, exp)
with pytest.raises(TypeError):
# Incomplete keys are not accepted for reindexing:
sparse.reindex(["A", "C"])
# "copy" argument:
res = sparse.reindex(sparse.index, copy=True)
exp = orig.reindex(orig.index, copy=True).to_sparse()
tm.assert_sp_series_equal(res, exp)
assert sparse is not res
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
class TestSparseDataFrameIndexing:
def test_getitem(self):
orig = pd.DataFrame(
[[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4], [0, np.nan, 5]],
columns=list("xyz"),
)
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse["x"], orig["x"].to_sparse())
tm.assert_sp_frame_equal(sparse[["x"]], orig[["x"]].to_sparse())
tm.assert_sp_frame_equal(sparse[["z", "x"]], orig[["z", "x"]].to_sparse())
tm.assert_sp_frame_equal(
sparse[[True, False, True, True]],
orig[[True, False, True, True]].to_sparse(),
)
tm.assert_sp_frame_equal(sparse.iloc[[1, 2]], orig.iloc[[1, 2]].to_sparse())
def test_getitem_fill_value(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
columns=list("xyz"),
)
sparse = orig.to_sparse(fill_value=0)
result = sparse[["z"]]
expected = orig[["z"]].to_sparse(fill_value=0)
tm.assert_sp_frame_equal(result, expected, check_fill_value=False)
tm.assert_sp_series_equal(sparse["y"], orig["y"].to_sparse(fill_value=0))
exp = orig[["x"]].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[["x"]], exp)
exp = orig[["z", "x"]].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[["z", "x"]], exp)
indexer = [True, False, True, True]
exp = orig[indexer].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[indexer], exp)
exp = orig.iloc[[1, 2]].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.iloc[[1, 2]], exp)
def test_loc(self):
orig = pd.DataFrame(
[[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]],
columns=list("xyz"),
)
sparse = orig.to_sparse()
assert sparse.loc[0, "x"] == 1
assert np.isnan(sparse.loc[1, "z"])
assert sparse.loc[2, "z"] == 4
# have to specify `kind='integer'`, since we construct a
# new SparseArray here, and the default sparse type is
# integer there, but block in SparseSeries
tm.assert_sp_series_equal(sparse.loc[0], orig.loc[0].to_sparse(kind="integer"))
tm.assert_sp_series_equal(sparse.loc[1], orig.loc[1].to_sparse(kind="integer"))
tm.assert_sp_series_equal(
sparse.loc[2, :], orig.loc[2, :].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(
sparse.loc[2, :], orig.loc[2, :].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(sparse.loc[:, "y"], orig.loc[:, "y"].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, "y"], orig.loc[:, "y"].to_sparse())
result = sparse.loc[[1, 2]]
exp = orig.loc[[1, 2]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[1, 2], :]
exp = orig.loc[[1, 2], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ["x", "z"]]
exp = orig.loc[:, ["x", "z"]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[0, 2], ["x", "z"]]
exp = orig.loc[[0, 2], ["x", "z"]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# exceeds the bounds
result = sparse.reindex([1, 3, 4, 5])
exp = orig.reindex([1, 3, 4, 5]).to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_index(self):
orig = pd.DataFrame(
[[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]],
index=list("abc"),
columns=list("xyz"),
)
sparse = orig.to_sparse()
assert sparse.loc["a", "x"] == 1
assert np.isnan(sparse.loc["b", "z"])
assert sparse.loc["c", "z"] == 4
tm.assert_sp_series_equal(
sparse.loc["a"], orig.loc["a"].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(
sparse.loc["b"], orig.loc["b"].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(
sparse.loc["b", :], orig.loc["b", :].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(
sparse.loc["b", :], orig.loc["b", :].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(sparse.loc[:, "z"], orig.loc[:, "z"].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, "z"], orig.loc[:, "z"].to_sparse())
result = sparse.loc[["a", "b"]]
exp = orig.loc[["a", "b"]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[["a", "b"], :]
exp = orig.loc[["a", "b"], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ["x", "z"]]
exp = orig.loc[:, ["x", "z"]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[["c", "a"], ["x", "z"]]
exp = orig.loc[["c", "a"], ["x", "z"]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_slice(self):
orig = pd.DataFrame(
[[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]],
columns=list("xyz"),
)
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_iloc(self):
orig = pd.DataFrame([[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]])
sparse = orig.to_sparse()
assert sparse.iloc[1, 1] == 3
assert np.isnan(sparse.iloc[2, 0])
tm.assert_sp_series_equal(sparse.iloc[0], orig.loc[0].to_sparse(kind="integer"))
tm.assert_sp_series_equal(sparse.iloc[1], orig.loc[1].to_sparse(kind="integer"))
tm.assert_sp_series_equal(
sparse.iloc[2, :], orig.iloc[2, :].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(
sparse.iloc[2, :], orig.iloc[2, :].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(sparse.iloc[:, 1], orig.iloc[:, 1].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[:, 1], orig.iloc[:, 1].to_sparse())
result = sparse.iloc[[1, 2]]
exp = orig.iloc[[1, 2]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[[1, 2], :]
exp = orig.iloc[[1, 2], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[:, [1, 0]]
exp = orig.iloc[:, [1, 0]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[[2], [1, 0]]
exp = orig.iloc[[2], [1, 0]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
with pytest.raises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_slice(self):
orig = pd.DataFrame(
[[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]],
columns=list("xyz"),
)
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_at(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
index=list("ABCD"),
columns=list("xyz"),
)
sparse = orig.to_sparse()
assert sparse.at["A", "x"] == orig.at["A", "x"]
assert np.isnan(sparse.at["B", "z"])
assert np.isnan(sparse.at["C", "y"])
assert sparse.at["D", "x"] == orig.at["D", "x"]
def test_at_fill_value(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
index=list("ABCD"),
columns=list("xyz"),
)
sparse = orig.to_sparse(fill_value=0)
assert sparse.at["A", "x"] == orig.at["A", "x"]
assert np.isnan(sparse.at["B", "z"])
assert np.isnan(sparse.at["C", "y"])
assert sparse.at["D", "x"] == orig.at["D", "x"]
def test_iat(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
index=list("ABCD"),
columns=list("xyz"),
)
sparse = orig.to_sparse()
assert sparse.iat[0, 0] == orig.iat[0, 0]
assert np.isnan(sparse.iat[1, 2])
assert np.isnan(sparse.iat[2, 1])
assert sparse.iat[2, 0] == orig.iat[2, 0]
assert np.isnan(sparse.iat[-1, -2])
assert sparse.iat[-1, -1] == orig.iat[-1, -1]
def test_iat_fill_value(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
index=list("ABCD"),
columns=list("xyz"),
)
sparse = orig.to_sparse(fill_value=0)
assert sparse.iat[0, 0] == orig.iat[0, 0]
assert np.isnan(sparse.iat[1, 2])
assert np.isnan(sparse.iat[2, 1])
assert sparse.iat[2, 0] == orig.iat[2, 0]
assert np.isnan(sparse.iat[-1, -2])
assert sparse.iat[-1, -1] == orig.iat[-1, -1]
def test_take(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
columns=list("xyz"),
)
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.take([0]), orig.take([0]).to_sparse())
tm.assert_sp_frame_equal(sparse.take([0, 1]), orig.take([0, 1]).to_sparse())
tm.assert_sp_frame_equal(sparse.take([-1, -2]), orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
columns=list("xyz"),
)
sparse = orig.to_sparse(fill_value=0)
exp = orig.take([0]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([0]), exp)
exp = orig.take([0, 1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([0, 1]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
index=list("ABCD"),
columns=list("xyz"),
)
sparse = orig.to_sparse()
res = sparse.reindex(["A", "C", "B"])
exp = orig.reindex(["A", "C", "B"]).to_sparse()
tm.assert_sp_frame_equal(res, exp)
orig = pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
],
index=list("ABCD"),
columns=list("xyz"),
)
sparse = orig.to_sparse()
res = sparse.reindex(["A", "C", "B"])
exp = orig.reindex(["A", "C", "B"]).to_sparse()
tm.assert_sp_frame_equal(res, exp)
def test_reindex_fill_value(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
index=list("ABCD"),
columns=list("xyz"),
)
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(["A", "C", "B"])
exp = orig.reindex(["A", "C", "B"]).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
# all missing
orig = pd.DataFrame(
[
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
],
index=list("ABCD"),
columns=list("xyz"),
)
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(["A", "C", "B"])
exp = orig.reindex(["A", "C", "B"]).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
# all fill_value
orig = pd.DataFrame(
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
index=list("ABCD"),
columns=list("xyz"),
dtype=np.int,
)
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(["A", "C", "B"])
exp = orig.reindex(["A", "C", "B"]).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestMultitype:
def setup_method(self, method):
self.cols = ["string", "int", "float", "object"]
self.string_series = pd.SparseSeries(["a", "b", "c"])
self.int_series = pd.SparseSeries([1, 2, 3])
self.float_series = pd.SparseSeries([1.1, 1.2, 1.3])
self.object_series = pd.SparseSeries([[], {}, set()])
self.sdf = pd.SparseDataFrame(
{
"string": self.string_series,
"int": self.int_series,
"float": self.float_series,
"object": self.object_series,
}
)
self.sdf = self.sdf[self.cols]
self.ss = pd.SparseSeries(["a", 1, 1.1, []], index=self.cols)
def test_frame_basic_dtypes(self):
for _, row in self.sdf.iterrows():
assert row.dtype == SparseDtype(object)
tm.assert_sp_series_equal(
self.sdf["string"], self.string_series, check_names=False
)
tm.assert_sp_series_equal(self.sdf["int"], self.int_series, check_names=False)
tm.assert_sp_series_equal(
self.sdf["float"], self.float_series, check_names=False
)
tm.assert_sp_series_equal(
self.sdf["object"], self.object_series, check_names=False
)
def test_frame_indexing_single(self):
tm.assert_sp_series_equal(
self.sdf.iloc[0],
pd.SparseSeries(["a", 1, 1.1, []], index=self.cols),
check_names=False,
)
tm.assert_sp_series_equal(
self.sdf.iloc[1],
pd.SparseSeries(["b", 2, 1.2, {}], index=self.cols),
check_names=False,
)
tm.assert_sp_series_equal(
self.sdf.iloc[2],
pd.SparseSeries(["c", 3, 1.3, set()], index=self.cols),
check_names=False,
)
def test_frame_indexing_multiple(self):
tm.assert_sp_frame_equal(self.sdf, self.sdf[:])
tm.assert_sp_frame_equal(self.sdf, self.sdf.loc[:])
tm.assert_sp_frame_equal(
self.sdf.iloc[[1, 2]],
pd.SparseDataFrame(
{
"string": self.string_series.iloc[[1, 2]],
"int": self.int_series.iloc[[1, 2]],
"float": self.float_series.iloc[[1, 2]],
"object": self.object_series.iloc[[1, 2]],
},
index=[1, 2],
)[self.cols],
)
tm.assert_sp_frame_equal(
self.sdf[["int", "string"]],
pd.SparseDataFrame({"int": self.int_series, "string": self.string_series}),
)
def test_series_indexing_single(self):
for i, idx in enumerate(self.cols):
assert self.ss.iloc[i] == self.ss[idx]
tm.assert_class_equal(self.ss.iloc[i], self.ss[idx], obj="series index")
assert self.ss["string"] == "a"
assert self.ss["int"] == 1
assert self.ss["float"] == 1.1
assert self.ss["object"] == []
def test_series_indexing_multiple(self):
tm.assert_sp_series_equal(
self.ss.loc[["string", "int"]],
pd.SparseSeries(["a", 1], index=["string", "int"]),
)
tm.assert_sp_series_equal(
self.ss.loc[["string", "object"]],
pd.SparseSeries(["a", []], index=["string", "object"]),
)
| bsd-3-clause |
Winand/pandas | pandas/tests/io/parser/test_read_fwf.py | 11 | 16032 | # -*- coding: utf-8 -*-
"""
Tests the 'read_fwf' function in parsers.py. This
test suite is independent of the others because the
engine is set to 'python-fwf' internally.
"""
from datetime import datetime
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame
from pandas import compat
from pandas.compat import StringIO, BytesIO
from pandas.io.parsers import read_csv, read_fwf, EmptyDataError
class TestFwfParsing(object):
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = read_csv(StringIO(data_expected),
engine='python', header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From Thomas Kluyver: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assert_raises_regex(ValueError,
"must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assert_raises_regex(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_BytesIO_input(self):
if not compat.PY3:
pytest.skip(
"Bytes-related test - only needs to work on Python 3")
result = read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
def test_fwf_colspecs_is_list_or_tuple(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
with tm.assert_raises_regex(TypeError,
'column specifications must '
'be a list or tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(data),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
with tm.assert_raises_regex(TypeError,
'Each column specification '
'must be.+'):
read_fwf(StringIO(data), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
assert len(res)
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71""" # noqa
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn",
"dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
pytest.skip("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = np.array([[1, 2., 4],
[5, np.nan, 10.]])
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = np.array([[1, 2334., 5],
[10, 13, 10]])
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_bool_header_arg(self):
# see gh-6114
data = """\
MyColumn
a
b
a
b"""
for arg in [True, False]:
with pytest.raises(TypeError):
read_fwf(StringIO(data), header=arg)
def test_full_file(self):
# File with all values
test = """index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo"""
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = """index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34"""
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = """
Account Name Balance CreditLimit AccountCreated
101 Keanu Reeves 9315.45 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00 8/6/2003
868 Jennifer Love Hewitt 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65 5000.00 2/5/2007
""".strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columns
test = """
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00 8/6/2003
868 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65
""".strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = """
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65
""".strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r"""
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~Keanu Reeves
33+++122.33\\\bar.........Gerard Butler
++44~~~~12.01 baz~~Jennifer Love Hewitt
~~55 11+++foo++++Jada Pinkett-Smith
..66++++++.03~~~bar Bill Murray
""".strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
pytest.skip(
'Bytes-related test - only needs to work on Python 3')
test = """
שלום שלום
ום שלל
של ום
""".strip('\r\n')
expected = read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)],
header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(
BytesIO(test.encode('utf8')), header=None, encoding='utf8'))
def test_dtype(self):
data = """ a b c
1 2 3.2
3 4 5.2
"""
colspecs = [(0, 5), (5, 10), (10, None)]
result = pd.read_fwf(StringIO(data), colspecs=colspecs)
expected = pd.DataFrame({
'a': [1, 3],
'b': [2, 4],
'c': [3.2, 5.2]}, columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
expected['a'] = expected['a'].astype('float64')
expected['b'] = expected['b'].astype(str)
expected['c'] = expected['c'].astype('int32')
result = pd.read_fwf(StringIO(data), colspecs=colspecs,
dtype={'a': 'float64', 'b': str, 'c': 'int32'})
tm.assert_frame_equal(result, expected)
def test_skiprows_inference(self):
# GH11256
test = """
Text contained in the file header
DataCol1 DataCol2
0.0 1.0
101.6 956.1
""".strip()
expected = read_csv(StringIO(test), skiprows=2,
delim_whitespace=True)
tm.assert_frame_equal(expected, read_fwf(
StringIO(test), skiprows=2))
def test_skiprows_by_index_inference(self):
test = """
To be skipped
Not To Be Skipped
Once more to be skipped
123 34 8 123
456 78 9 456
""".strip()
expected = read_csv(StringIO(test), skiprows=[0, 2],
delim_whitespace=True)
tm.assert_frame_equal(expected, read_fwf(
StringIO(test), skiprows=[0, 2]))
def test_skiprows_inference_empty(self):
test = """
AA BBB C
12 345 6
78 901 2
""".strip()
with pytest.raises(EmptyDataError):
read_fwf(StringIO(test), skiprows=3)
def test_whitespace_preservation(self):
# Addresses Issue #16772
data_expected = """
a ,bbb
cc,dd """
expected = read_csv(StringIO(data_expected), header=None)
test_data = """
a bbb
ccdd """
result = read_fwf(StringIO(test_data), widths=[3, 3],
header=None, skiprows=[0], delimiter="\n\t")
tm.assert_frame_equal(result, expected)
def test_default_delimiter(self):
data_expected = """
a,bbb
cc,dd"""
expected = read_csv(StringIO(data_expected), header=None)
test_data = """
a \tbbb
cc\tdd """
result = read_fwf(StringIO(test_data), widths=[3, 3],
header=None, skiprows=[0])
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
kevin-intel/scikit-learn | sklearn/linear_model/tests/test_omp.py | 3 | 8944 | # Author: Vlad Niculae
# License: BSD 3 clause
import numpy as np
import pytest
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 25, 35, 5, 3
y, X, gamma = make_sparse_coded_signal(n_samples=n_targets,
n_components=n_features,
n_features=n_samples,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
# Make X not of norm 1 for testing
X *= 10
y *= 10
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert (orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape ==
(n_features,))
assert (orthogonal_mp(X, y, n_nonzero_coefs=5).shape ==
(n_features, 3))
def test_correct_shapes_gram():
assert (orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape ==
(n_features,))
assert (orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape ==
(n_features, 3))
def test_n_nonzero_coefs():
assert np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5)) <= 5
assert np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5,
precompute=True)) <= 5
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol
assert np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
warning_message = (
"Orthogonal matching pursuit ended prematurely "
"due to linear dependence in the dictionary. "
"The requested precision might not have been met."
)
with pytest.warns(RuntimeWarning, match=warning_message):
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0, precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
@pytest.mark.parametrize("positional_params", [(X, y), (G, Xy)])
@pytest.mark.parametrize(
"keyword_params",
[{"tol": -1}, {"n_nonzero_coefs": -1}, {"n_nonzero_coefs": n_features + 1}]
)
def test_bad_input(positional_params, keyword_params):
with pytest.raises(ValueError):
orthogonal_mp(*positional_params, **keyword_params)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_orthogonal_mp_gram_readonly():
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/5956
idx, = gamma[:, 0].nonzero()
G_readonly = G.copy()
G_readonly.setflags(write=False)
Xy_readonly = Xy.copy()
Xy_readonly.setflags(write=False)
gamma_gram = orthogonal_mp_gram(G_readonly, Xy_readonly[:, 0],
n_nonzero_coefs=5,
copy_Gram=False, copy_Xy=False)
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert omp.coef_.shape == (n_features,)
assert omp.intercept_.shape == ()
assert np.count_nonzero(omp.coef_) <= n_nonzero_coefs
omp.fit(X, y)
assert omp.coef_.shape == (n_targets, n_features)
assert omp.intercept_.shape == (n_targets,)
assert np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs
coef_normalized = omp.coef_[0].copy()
omp.set_params(fit_intercept=True, normalize=False)
omp.fit(X, y[:, 0])
assert_array_almost_equal(coef_normalized, omp.coef_)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert np.count_nonzero(omp.coef_) <= n_nonzero_coefs
assert omp.coef_.shape == (n_features,)
assert omp.intercept_ == 0
omp.fit(X, y)
assert omp.coef_.shape == (n_targets, n_features)
assert omp.intercept_ == 0
assert np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
warning_message = (
"Orthogonal matching pursuit ended prematurely "
"due to linear dependence in the dictionary. "
"The requested precision might not have been met."
)
with pytest.warns(RuntimeWarning, match=warning_message):
orthogonal_mp(newX, newy, n_nonzero_coefs=2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, n_nonzero_coefs=2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, n_nonzero_coefs=2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty,
n_nonzero_coefs=1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty,
n_nonzero_coefs=1)
assert np.all(gamma_empty == 0)
assert np.all(gamma_empty_gram == 0)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert path.shape == (n_features, n_targets, 5)
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert path.shape == (n_features, n_targets, 5)
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert path.shape == (n_features, n_targets, 5)
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10)
ompcv.fit(X, y_)
assert ompcv.n_nonzero_coefs_ == n_nonzero_coefs
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
ltiao/scikit-learn | sklearn/neural_network/rbm.py | 11 | 12298 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
exowanderer/ExoplanetTSO | wanderer/auxiliary.py | 1 | 31292 | # from astroML.plotting import hist
from astropy.io import fits
from astropy.modeling import models, fitting
from datetime import datetime
from functools import partial
# from image_registration import cross_correlation_shifts
from glob import glob
from lmfit import Model, Parameters
# from matplotlib.ticker import MaxNLocator
# from matplotlib import style, colors
from multiprocessing import cpu_count, Pool
from os import listdir, path, mkdir, chdir
from pandas import DataFrame, Series, read_csv, read_pickle, scatter_matrix
from photutils import CircularAperture, CircularAnnulus, aperture_photometry, findstars
# from least_asymmetry.asym import actr
from numpy import sort, linspace, indices, nanmedian as median, nanmean as mean, nanstd as std, empty, transpose, ceil#ion, gcf, figure,
from numpy import concatenate, pi, sqrt, ones, diag, inf, isnan, isfinite, array, nanmax, shape, zeros#rcParams,
from numpy import min as npmin, max as npmax, zeros, arange, sum, float, isnan, hstack, int32, exp, log
from numpy import int32 as npint, round as npround, nansum as sum, std as std, where, bitwise_and, vstack
# from seaborn import *
from scipy.special import erf
from scipy import stats
from sklearn.cluster import DBSCAN
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
from skimage.filters import gaussian as gaussianFilter
from socket import gethostname
from statsmodels.robust import scale
from statsmodels.nonparametric import kde
from sys import exit
from time import time, localtime, sleep
from tqdm import tqdm, tqdm_notebook
from scipy import optimize
from sklearn.externals import joblib
from skimage.filters import gaussian as gaussianFilter
from sys import exit
import numpy as np
y,x = 0,1
'''Start: From least_asymmetry.asym by N.Lust (github.com/natelust/least_asymmetry) and modified (reversed XY -> YX)'''
def gaussian(height, center_y, center_x, width_y, width_x, offset, yy, xx):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
"""Returns a gaussian function with the given parameters"""
width_y = float(width_y)
width_x = float(width_x)
return height*np.exp(-0.5*(((center_y-yy)/width_y)**2+((center_x-xx)/width_x)**2))+offset
def moments(data, kernel_size=2):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
"""Returns (height, x, y, width_x, width_y,offset)
the gaussian parameters of a 2D distribution by calculating its
moments """
total = data.sum()
Y, X = indices(data.shape)
y = (Y*data).sum()/total
x = (X*data).sum()/total
height = gaussianFilter(data, kernel_size).max()
firstq = median(data[data < median(data)])
thirdq = median(data[data > median(data)])
offset = median(data[np.where(np.bitwise_and(data > firstq,
data < thirdq))])
places = where((data-offset) > 4*std(data[where(np.bitwise_and(data > firstq, data < thirdq))]))
width_y = std(places[0])
width_x = std(places[1])
# These if statements take into account there might only be one significant
# point above the background when that is the case it is assumend the width
# of the gaussian must be smaller than one pixel
if width_y == 0.0: width_y = 0.5
if width_x == 0.0: width_x = 0.5
height -= offset
return height, y, x, width_y, width_x, offset
def fitgaussian(data, weights=False):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
"""Returns (height, y, x, width_y, width_x)
the gaussian parameters of a 2D distribution found by a fit
Weights must be the same size as the data, but every point
contains the value of the weight of the pixel"""
if isinstance(weights, type(False)):
weights = np.ones(data.shape, dtype=float)
elif weights.dtype != np.dtype('float'):
weights = np.array(weights, dtype=float)
params = moments(data)
yy,xx = indices(data.shape)
gausspartial = partial(gaussian, yy=yy, xx=xx)
errorfunction = lambda p: np.ravel((gausspartial(*p) - data)*weights)
params, success = optimize.leastsq(errorfunction, params)
return params
def center_of_light(data, weights=False):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
if isinstance(weights, type(False)):
weights = np.ones(data.shape, dtype=float)
elif weights.dtype != np.dtype('float'):
weights = np.array(weights, dtype=float)
ny, nx = np.indices(data.shape)
return [sum(weights*ny*data)/sum(weights*data), sum(weights*nx*data)/sum(weights*data)]
def get_julian_date_from_gregorian_date(*date):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
"""gd2jd.py converts a UT Gregorian date to Julian date.
Functions for JD <-> GD conversion,
courtesy of Ian Crossfield at
http://www.astro.ucla.edu/~ianc/python/_modules/date.html
Downloaded from Marshall Perrin Github at
https://github.com/mperrin/misc_astro/blob/master/idlastro_ports/gd2jd.py
Usage: gd2jd.py (2009, 02, 25, 01, 59, 59)
To get the current Julian date:
import time
gd2jd(time.gmtime())
Hours, minutes and/or seconds can be omitted -- if so, they are
assumed to be zero.
Year and month are converted to type INT, but all others can be
type FLOAT (standard practice would suggest only the final element
of the date should be float)
"""
verbose=False
if verbose: print(date)
date = list(date)
if len(date)<3:
print("You must enter a date of the form (2009, 02, 25)!")
return -1
elif len(date)==3:
for ii in range(3): date.append(0)
elif len(date)==4:
for ii in range(2): date.append(0)
elif len(date)==5:
date.append(0)
yyyy = int(date[0])
mm = int(date[1])
dd = float(date[2])
hh = float(date[3])
min = float(date[4])
sec = float(date[5])
UT=hh+min/60+sec/3600
total_seconds=hh*3600+min*60+sec
fracday=total_seconds/86400
if (100*yyyy+mm-190002.5)>0:
sig=1
else:
sig=-1
JD = 367*yyyy - int(7*(yyyy+int((mm+9)/12))/4) + int(275*mm/9) + dd + 1721013.5 + UT/24 - 0.5*sig +0.5
months=["January", "February", "March", "April", "May", "June", "July", "August",
"September", "October", "November", "December"]
# Now calculate the fractional year. Do we have a leap year?
daylist=[31,28,31,30,31,30,31,31,30,31,30,31]
daylist2=[31,29,31,30,31,30,31,31,30,31,30,31]
if (yyyy%4 != 0):
days=daylist2
elif (yyyy%400 == 0):
days=daylist2
elif (yyyy%100 == 0):
days=daylist
else:
days=daylist2
daysum=0
for y in range(mm-1):
daysum=daysum+days[y]
daysum=daysum+dd-1+UT/24
if days[1]==29:
fracyear=yyyy+daysum/366
else:
fracyear=yyyy+daysum/365
if verbose:
print(yyyy,mm,dd,hh,min,sec)
print("UT="+UT)
print("Fractional day: %f" % fracday)
print("\n"+months[mm-1]+" %i, %i, %i:%i:%i UT = JD %f" % (dd, yyyy, hh, min, sec, JD), end= " ")
print(" = " + fracyear+"\n")
return JD
# In[ ]:
def get_julian_date_from_header(header):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
# These are specific to STScI standards -- may vary on the ground
fitsDate = header['DATE-OBS']
startTimeStr= header['TIME-OBS']
endTimeStr = header['TIME-END']
yyyy, mm , dd = fitsDate.split('-')
hh1 , mn1, ss1 = array(startTimeStr.split(':')).astype(float)
hh2 , mn2, ss2 = array(endTimeStr.split(':')).astype(float)
yyyy = float(yyyy)
mm = float(mm)
dd = float(dd)
hh1 = float(hh1)
mn1 = float(mn1)
ss1 = float(ss1)
hh2 = float(hh2)
mn2 = float(mn2)
ss2 = float(ss2)
startDate = get_julian_date_from_gregorian_date(yyyy,mm,dd,hh1,mn1,ss1)
endDate = get_julian_date_from_gregorian_date(yyyy,mm,dd,hh2,mn2,ss2)
return startDate, endDate
def clipOutlier(oneDarray, nSig=8):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
medarray = np.median(oneDarray)
stdarray = np.std(oneDarray)
outliers = abs(oneDarray - medarray) > nSig*stdarray
oneDarray[outliers]= np.median(oneDarray[~outliers])
return oneDarray
def flux_weighted_centroid(image, ypos, xpos, bSize = 7):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
'''
Flux-weighted centroiding (Knutson et al. 2008)
xpos and ypos are the rounded pixel positions of the star
'''
ypos, xpos, bsize = np.int32([ypos, xpos, bSize])
## extract a box around the star:
#im = a[ypos-bSize:ypos+bSize, xpos-bSize:xpos+bSize].copy()
subImage = image[ypos-bSize:ypos+bSize, xpos-bSize:xpos+bSize].transpose().copy()
y,x = 0,1
ydim = subImage.shape[y]
xdim = subImage.shape[x]
## add up the flux along x and y
xflux = zeros(xdim)
xrng = arange(xdim)
yflux = zeros(ydim)
yrng = arange(ydim)
for i in range(xdim):
xflux[i] = sum(subImage[i,:])
for j in range(ydim):
yflux[j] = sum(subImage[:,j])
## get the flux weighted average position:
ypeak = sum(yflux * yrng) / sum(yflux) + ypos - float(bSize)
xpeak = sum(xflux * xrng) / sum(xflux) + xpos - float(bSize)
return (ypeak, xpeak)
def gaussian(height, center_y, center_x, width_y, width_x, offset, yy, xx):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
"""Returns a gaussian function with the given parameters"""
width_x = float(width_x)
width_y = float(width_y)
chiY = (center_y - yy) / width_y
chiX = (center_x - xx) / width_x
return height * exp(-0.5*(chiY**2 + chiX**2)) + offset
def moments(data):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
"""Returns (height, x, y, width_x, width_y,offset)
the gaussian parameters of a 2D distribution by calculating its
moments """
total = data.sum()
X, Y = indices(data.shape)
x = (X*data).sum()/total
y = (Y*data).sum()/total
height = data.max()
firstq = median(data[data < median(data)])
thirdq = median(data[data > median(data)])
offset = median(data[where(bitwise_and(data > firstq,
data < thirdq))])
places = where((data-offset) > 4*std(data[where(bitwise_and(
data > firstq, data < thirdq))]))
width_y = std(places[0])
width_x = std(places[1])
# These if statements take into account there might only be one significant
# point above the background when that is the case it is assumend the width
# of the gaussian must be smaller than one pixel
if width_y == 0.0:
width_y = 0.5
if width_x == 0.0:
width_x = 0.5
height -= offset
return height, y, x, width_y, width_x, offset
def lame_lmfit_gaussian_centering(imageCube, yguess=15, xguess=15, subArraySize=10, init_params=None, nSig=False, useMoments=False, method='leastsq'):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
imageSize = imageCube.shape[1]
nparams = 6
if init_params is None:
useMoments = True
init_params = moments(imageCube[0])
ihg, iyc, ixc, iyw, ixw, ibg = arange(nparams)
lmfit_init_params = Parameters()
lmfit_init_params.add_many(
('height' , init_params[ihg], True , 0.0 , inf ),
('center_y', init_params[iyc], True , 0.0 , imageSize),
('center_x', init_params[ixc], True , 0.0 , imageSize),
('width_y' , init_params[iyw], True , 0.0 , imageSize),
('width_x' , init_params[ixw], True , 0.0 , imageSize),
('offset' , init_params[ibg], True))
gfit_model = Model(gaussian, independent_vars=['yy', 'xx'])
yy0, xx0 = indices(imageCube[0].shape)
npix = subArraySize//2
ylower = yguess - npix
yupper = yguess + npix
xlower = xguess - npix
xupper = xguess + npix
ylower, xlower, yupper, xupper = int32([ylower, xlower, yupper, xupper])
yy = yy0[ylower:yupper, xlower:xupper]
xx = xx0[ylower:yupper, xlower:xupper]
heights, ycenters, xcenters, ywidths, xwidths, offsets = zeros((nparams, nFrames))
for k, image in enumerate(imageCube):
subFrameNow = image[ylower:yupper, xlower:xupper]
subFrameNow[isnan(subFrameNow)] = median(subFrameNow)
subFrameNow = gaussianFilter(subFrameNow, nSig) if not isinstance(nSig, bool) else subFrameNow
init_params = moments(subFrameNow) if useMoments else init_params
gfit_res = gfit_model.fit(subFrameNow, params=lmfit_init_params, xx=xx, yy=yy, method=method)
heights[k] = gfit_res.best_values['height']
ycenters[k] = gfit_res.best_values['center_y']
xcenters[k] = gfit_res.best_values['center_x']
ywidths[k] = gfit_res.best_values['width_y']
xwidths[k] = gfit_res.best_values['width_x']
offsets[k] = gfit_res.best_values['offset']
return heights, ycenters, xcenters, ywidths, xwidths, offsets
def lmfit_one_center(image, yy, xx, gfit_model, lmfit_init_params, yupper, ylower, xupper, xlower, useMoments=True, nSig=False, method='leastsq'):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
subFrameNow = image[ylower:yupper, xlower:xupper]
subFrameNow[isnan(subFrameNow)] = median(subFrameNow)
subFrameNow = gaussianFilter(subFrameNow, nSig) if not isinstance(nSig, bool) else subFrameNow
init_params = moments(subFrameNow) if useMoments else list(lmfit_init_params.valuesdict().values())
nparams = 6
ihg, iyc, ixc, iyw, ixw, ibg = arange(nparams)
lmfit_init_params.height = init_params[ihg]
lmfit_init_params.center_y = init_params[iyc]
lmfit_init_params.center_x = init_params[ixc]
lmfit_init_params.width_y = init_params[iyw]
lmfit_init_params.width_x = init_params[ixw]
lmfit_init_params.offset = init_params[ibg]
# print(lmfit_init_params)
gfit_res = gfit_model.fit(subFrameNow, params=lmfit_init_params, xx=xx, yy=yy, method=method)
# print(list(gfit_res.best_values.values()))
fit_values = gfit_res.best_values
return fit_values['center_y'], fit_values['center_x'], fit_values['width_y'], fit_values['width_x'], fit_values['height'], fit_values['offset']
def fit_gauss(subFrameNow, xinds, yinds, initParams, print_compare=False):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
# initParams = (height, x, y, width_x, width_y, offset)
fit_lvmq = fitting.LevMarLSQFitter()
model0 = models.Gaussian2D(amplitude=initParams[0], x_mean=initParams[1], y_mean=initParams[2],
x_stddev=initParams[3], y_stddev=initParams[4], theta=0.0) + models.Const2D(amplitude=initParams[5])
model1 = fit_lvmq(model0, xinds, yinds, subFrameNow)
model1 = fit_lvmq(model1, xinds, yinds, subFrameNow)
if print_compare:
print(model1.amplitude_0 - initParams[0], end=" ")
print(model1.x_mean_0 - initParams[1], end=" ")
print(model1.y_mean_0 - initParams[2], end=" ")
print(model1.x_stddev_0 - initParams[3], end=" ")
print(model1.y_stddev_0 - initParams[4], end=" ")
print(model1.amplitude_1 - initParams[5])
return model1.parameters
def fit_one_center(image, ylower, yupper, xlower, xupper, nSig=False, method='gauss', bSize = 7):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
subFrameNow = image[ylower:yupper, xlower:xupper]
subFrameNow[isnan(subFrameNow)] = median(~isnan(subFrameNow))
subFrameNow = gaussianFilter(subFrameNow, nSig) if not isinstance(nSig, bool) else subFrameNow
if method == 'cmom':
return np.array(moments(subFrameNow)) # H, Xc, Yc, Xs, Ys, O
if method == 'gauss':
return fitgaussian(subFrameNow)#, xinds, yinds, np.copy(cmom)) # H, Xc, Yc, Xs, Ys, Th, O
if method == 'fwc':
return flux_weighted_centroid(image, image.shape[y]//2, image.shape[x]//2, bSize = bSize)[::-1]
def compute_flux_one_frame(image, center, background, aperRad=3.0):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
frameNow = image - background
frameNow[np.isnan(frameNow)] = median(frameNow)
aperture = CircularAperture([center[x], center[y]], r=abs(aperRad))
return aperture_photometry(frameNow, aperture)['aperture_sum'].data[0]
def measure_one_circle_bg(image, center, aperRad, metric, apMethod='exact'):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
aperture = CircularAperture(center, aperRad)
aper_mask = aperture.to_mask(method=apMethod)[0] # list of ApertureMask objects (one for each position)
# backgroundMask = abs(aperture.get_fractions(np.ones(self.imageCube[0].shape))-1)
backgroundMask = aper_mask.to_image(image.shape).astype(bool)
backgroundMask = ~backgroundMask#[backgroundMask == 0] = False
return metric(image[backgroundMask])
def measure_one_annular_bg(image, center, innerRad, outerRad, metric, apMethod='exact'):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
innerAperture = CircularAperture(center, innerRad)
outerAperture = CircularAperture(center, outerRad)
inner_aper_mask = innerAperture.to_mask(method=apMethod)[0]
inner_aper_mask = inner_aper_mask.to_image(image.shape).astype(bool)
outer_aper_mask = outerAperture.to_mask(method=apMethod)[0]
outer_aper_mask = outer_aper_mask.to_image(image.shape).astype(bool)
backgroundMask = (~inner_aper_mask)*outer_aper_mask
return metric(image[backgroundMask])
from numpy import median, std
def measure_one_median_bg(image, center, aperRad, metric, nSig, apMethod='exact'):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
aperture = CircularAperture(center, aperRad)
aperture = aperture.to_mask(method=apMethod)[0]
aperture = aperture.to_image(image.shape).astype(bool)
backgroundMask = ~aperture
medFrame = median(image[backgroundMask])
madFrame = std(image[backgroundMask])
medianMask= abs(image - medFrame) < nSig*madFrame
maskComb = medianMask*backgroundMask
return median(image[maskComb])
def measure_one_KDE_bg(image, center, aperRad, metric, apMethod='exact'):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
aperture = CircularAperture(center, aperRad)
aperture = aperture.to_mask(method=apMethod)[0]
aperture = aperture.to_image(image.shape).astype(bool)
backgroundMask = ~aperture
kdeFrame = kde.KDEUnivariate(image[backgroundMask])
kdeFrame.fit()
return kdeFrame.support[kdeFrame.density.argmax()]
def measure_one_background(image, center, aperRad, metric, apMethod='exact', bgMethod='circle'):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
if np.ndim(aperRad) == 0:
aperture = CircularAperture(center, aperRad)
aperture = aperture.to_mask(method=apMethod)[0] # list of ApertureMask objects (one for each position)
aperture = ~aperture.to_image(image).astype(bool) # inverse to keep 'outside' aperture
else:
innerRad, outerRad = aperRad
innerAperture = CircularAperture(center, innerRad)
outerAperture = CircularAperture(center, outerRad)
inner_aper_mask = innerAperture.to_mask(method=method)[0]
inner_aper_mask = inner_aper_mask.to_image(image.shape).astype(bool)
outer_aper_mask = outerAperture.to_mask(method=method)[0]
outer_aper_mask = outer_aper_mask.to_image(image.shape).astype(bool)
aperture = (~inner_aper_mask)*outer_aper_mask
if bgMethod == 'median':
medFrame = median(image[aperture])
madFrame = scale.mad(image[aperture])
medianMask= abs(image - medFrame) < nSig*madFrame
aperture = medianMask*aperture
if bgMethod == 'kde':
kdeFrame = kde.KDEUnivariate(image[aperture].ravel())
kdeFrame.fit()
return kdeFrame.support[kdeFrame.density.argmax()]
return metric(image[aperture])
def DBScan_Flux(phots, ycenters, xcenters, dbsClean=0, useTheForce=False):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
dbsPhots = DBSCAN()#n_jobs=-1)
stdScaler = StandardScaler()
phots = np.copy(phots.ravel())
phots[~np.isfinite(phots)] = np.median(phots[np.isfinite(phots)])
featuresNow = np.transpose([stdScaler.fit_transform(ycenters[:,None]).ravel(), \
stdScaler.fit_transform(xcenters[:,None]).ravel(), \
stdScaler.fit_transform(phots[:,None]).ravel() ] )
# print(featuresNow.shape)
dbsPhotsPred= dbsPhots.fit_predict(featuresNow)
return dbsPhotsPred == dbsClean
def factor(numberToFactor, arr=list()):
i = 2
maximum = numberToFactor / 2 + 1
while i < maximum:
if numberToFactor % i == 0:
return factor(numberToFactor/i,arr + [i])
i += 1
return list(set(arr + [numberToFactor]))
def DBScan_Segmented_Flux(phots, ycenters, xcenters, dbsClean=0, nSegments=None, maxSegment = int(6e4), useTheForce=False):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
if phots.size <= maxSegment:
# Default to un-segmented
return DBScan_Flux(phots, ycenters, xcenters, dbsClean=dbsClean, useTheForce=useTheForce)
dbsPhots = DBSCAN()#n_jobs=-1)
stdScaler = StandardScaler()
if nSegments is None:
nSegments = phots.size // maxSegment
segSize = phots.size // nSegments
max_in_segs = nSegments * segSize
segments = list(np.arange(max_in_segs).reshape(nSegments, -1))
leftovers= np.arange(max_in_segs, phots.size)
segments[-1] = np.hstack([segments[-1], leftovers])
phots = np.copy(phots.ravel())
phots[~np.isfinite(phots)] = np.median(phots[np.isfinite(phots)])
dbsPhotsPred = np.zeros(phots.size) + dbsClean # default to array of `dbsClean` values
for segment in segments:
dbsPhotsPred[segment]= DBScan_Flux(phots[segment], ycenters[segment], xcenters[segment], dbsClean=dbsClean, useTheForce=useTheForce)
return dbsPhotsPred == dbsClean
def DBScan_PLD(PLDNow, dbsClean=0, useTheForce=False):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
dbsPLD = DBSCAN()#n_jobs=-1)
stdScaler = StandardScaler()
dbsPLDPred= dbsPLD.fit_predict(stdScaler.fit_transform(PLDNow[:,None]))
return dbsPLDPred == dbsClean
def DBScan_Segmented_PLD(PLDNow, dbsClean=0, nSegments=None, maxSegment = int(6e4), useTheForce=False):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
if PLDNow.size <= maxSegment:
# Default to un-segmented
return DBScan_PLD(PLDNow, dbsClean=dbsClean, useTheForce=useTheForce)
# dbsPLD = DBSCAN()#n_jobs=-1)
# stdScaler = StandardScaler()
#
if nSegments is None:
nSegments = PLDNow.size // maxSegment
segSize = PLDNow.size // nSegments
max_in_segs = nSegments * segSize
segments = list(np.arange(max_in_segs).reshape(nSegments, -1))
leftovers= np.arange(max_in_segs, PLDNow.size)
segments[-1] = np.hstack([segments[-1], leftovers])
dbsPLDPred = np.zeros(PLDNow.size) + dbsClean # default to array of `dbsClean` values
for segment in segments:
dbsPLDPred[segment]= DBScan_PLD(PLDNow[segment], dbsClean=dbsClean, useTheForce=useTheForce)
return dbsPLDPred == dbsClean
def cross_correlation_HST_diff_NDR():
# Cross correlated the differential non-destructive reads from HST Scanning mode with WFC3 and G141
import image_registration as ir
from glob import glob
from pylab import plot, ion;ion()
from astropy.io import fits
fitsfiles = glob("*ima*fits")
ylow = 50
yhigh = 90
nExts = 36
extGap = 5
shifts_ndr = zeros((len(fitsfiles), (nExts-1)//extGap, 2))
fitsfile0 = fits.open(fitsfiles[0])
for kf, fitsfilenow in enumerate(fitsfiles):
fitsnow = fits.open(fitsfilenow)
for kndr in range(extGap+1,nExts+1)[::extGap][::-1]:
fits0_dndrnow = fitsfile0[kndr-extGap].data[ylow:yhigh]- fitsfile0[kndr].data[ylow:yhigh]
fitsN_dndrnow = fitsnow[kndr-extGap].data[ylow:yhigh] - fitsnow[kndr].data[ylow:yhigh]
shifts_ndr[kf][(kndr-1)//extGap-1] = ir.chi2_shift(fits0_dndrnow, fitsN_dndrnow)[:2]
#ax.clear()
#plt.imshow(fitsN_dndrnow)
#ax.set_aspect('auto')
#plt.pause(1e-3)
plot(shifts_ndr[:,:-1,0],'o') # x-shifts
plot(shifts_ndr[:,:-1,1],'o') # y-shifts
| gpl-3.0 |
evgchz/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
nils-werner/SimpleCV | SimpleCV/ImageClass.py | 8 | 519091 | # Load required libraries
from SimpleCV.base import *
from SimpleCV.Color import *
from SimpleCV.LineScan import *
from numpy import int32
from numpy import uint8
import cv2
from EXIF import *
if not init_options_handler.headless:
import pygame as pg
import scipy.ndimage as ndimage
import scipy.stats.stats as sss #for auto white balance
import scipy.cluster.vq as scv
import scipy.linalg as nla # for linear algebra / least squares
import math # math... who does that
import copy # for deep copy
#import scipy.stats.mode as spsmode
class ColorSpace:
"""
**SUMMARY**
The colorspace class is used to encapsulate the color space of a given image.
This class acts like C/C++ style enumerated type.
See: http://stackoverflow.com/questions/2122706/detect-color-space-with-opencv
"""
UNKNOWN = 0
BGR = 1
GRAY = 2
RGB = 3
HLS = 4
HSV = 5
XYZ = 6
YCrCb = 7
class ImageSet(list):
"""
**SUMMARY**
This is an abstract class for keeping a list of images. It has a few
advantages in that you can use it to auto load data sets from a directory
or the net.
Keep in mind it inherits from a list too, so all the functionality a
normal python list has this will too.
**EXAMPLES**
>>> imgs = ImageSet()
>>> imgs.download("ninjas")
>>> imgs.show(ninjas)
or you can load a directory path:
>>> imgs = ImageSet('/path/to/imgs/')
>>> imgs.show()
This will download and show a bunch of random ninjas. If you want to
save all those images locally then just use:
>>> imgs.save()
You can also load up the sample images that come with simplecv as:
>>> imgs = ImageSet('samples')
>>> imgs.filelist
>>> logo = imgs.find('simplecv.png')
**TO DO**
Eventually this should allow us to pull image urls / paths from csv files.
The method also allow us to associate an arbitraty bunch of data with each
image, and on load/save pickle that data or write it to a CSV file.
"""
filelist = None
def __init__(self, directory = None):
if not directory:
return
if isinstance(directory,list):
if isinstance(directory[0], Image):
super(ImageSet,self).__init__(directory)
elif isinstance(directory[0], str) or isinstance(directory[0], unicode):
super(ImageSet,self).__init__(map(Image, directory))
elif directory.lower() == 'samples' or directory.lower() == 'sample':
pth = LAUNCH_PATH
pth = os.path.realpath(pth)
directory = os.path.join(pth, 'sampleimages')
self.load(directory)
else:
self.load(directory)
def download(self, tag=None, number=10, size='thumb'):
"""
**SUMMARY**
This function downloads images from Google Image search based
on the tag you provide. The number is the number of images you
want to have in the list. Valid values for size are 'thumb', 'small',
'medium', 'large' or a tuple of exact dimensions i.e. (640,480).
Note that 'thumb' is exceptionally faster than others.
.. Warning::
This requires the python library Beautiful Soup to be installed
http://www.crummy.com/software/BeautifulSoup/
**PARAMETERS**
* *tag* - A string of tag values you would like to download.
* *number* - An integer of the number of images to try and download.
* *size* - the size of the images to download. Valid options a tuple
of the exact size or a string of the following approximate sizes:
* thumb ~ less than 128x128
* small ~ approximately less than 640x480 but larger than 128x128
* medium ~ approximately less than 1024x768 but larger than 640x480.
* large ~ > 1024x768
**RETURNS**
Nothing - but caches local copy of images.
**EXAMPLE**
>>> imgs = ImageSet()
>>> imgs.download("ninjas")
>>> imgs.show(ninjas)
"""
try:
from BeautifulSoup import BeautifulSoup
except:
print "You need to install Beatutiul Soup to use this function"
print "to install you can use:"
print "easy_install beautifulsoup"
return
INVALID_SIZE_MSG = """I don't understand what size images you want.
Valid options: 'thumb', 'small', 'medium', 'large'
or a tuple of exact dimensions i.e. (640,480)."""
if isinstance(size, basestring):
size = size.lower()
if size == 'thumb':
size_param = ''
elif size == 'small':
size_param = '&tbs=isz:s'
elif size == 'medium':
size_param = '&tbs=isz:m'
elif size == 'large':
size_param = '&tbs=isz:l'
else:
print INVALID_SIZE_MSG
return None
elif type(size) == tuple:
width, height = size
size_param = '&tbs=isz:ex,iszw:' + str(width) + ',iszh:' + str(height)
else:
print INVALID_SIZE_MSG
return None
# Used to extract imgurl parameter value from a URL
imgurl_re = re.compile('(?<=(&|\?)imgurl=)[^&]*((?=&)|$)')
add_set = ImageSet()
candidate_count = 0
while len(add_set) < number:
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
url = ("http://www.google.com/search?tbm=isch&q=" + urllib2.quote(tag) +
size_param + "&start=" + str(candidate_count))
page = opener.open(url)
soup = BeautifulSoup(page)
img_urls = []
# Gets URLs of the thumbnail images
if size == 'thumb':
imgs = soup.findAll('img')
for img in imgs:
dl_url = str(dict(img.attrs)['src'])
img_urls.append(dl_url)
# Gets the direct image URLs
else:
for link_tag in soup.findAll('a', {'href': re.compile('imgurl=')}):
dirty_url = link_tag.get('href') # URL to an image as given by Google Images
dl_url = str(re.search(imgurl_re, dirty_url).group()) # The direct URL to the image
img_urls.append(dl_url)
for dl_url in img_urls:
try:
add_img = Image(dl_url, verbose=False)
# Don't know a better way to check if the image was actually returned
if add_img.height <> 0 and add_img.width <> 0:
add_set.append(add_img)
except:
#do nothing
None
if len(add_set) >= number:
break
self.extend(add_set)
def upload(self,dest,api_key=None,api_secret=None, verbose = True):
"""
**SUMMARY**
Uploads all the images to imgur or flickr or dropbox. In verbose mode URL values are printed.
**PARAMETERS**
* *api_key* - a string of the API key.
* *api_secret* - (required only for flickr and dropbox ) a string of the API secret.
* *verbose* - If verbose is true all values are printed to the screen
**RETURNS**
if uploading is successful
- Imgur return the original image URL on success and None if it fails.
- Flick returns True on success, else returns False.
- dropbox returns True on success.
**EXAMPLE**
TO upload image to imgur::
>>> imgset = ImageSet("/home/user/Desktop")
>>> result = imgset.upload( 'imgur',"MY_API_KEY1234567890" )
>>> print "Uploaded To: " + result[0]
To upload image to flickr::
>>> imgset.upload('flickr','api_key','api_secret')
>>> imgset.upload('flickr') #Once the api keys and secret keys are cached.
To upload image to dropbox::
>>> imgset.upload('dropbox','api_key','api_secret')
>>> imgset.upload('dropbox') #Once the api keys and secret keys are cached.
**NOTES**
.. Warning::
This method requires two packages to be installed
-PyCurl
-flickr api.
-dropbox
.. Warning::
You must supply your own API key.
Find more about API keys:
- http://imgur.com/register/api_anon
- http://www.flickr.com/services/api/misc.api_keys.html
- https://www.dropbox.com/developers/start/setup#python
"""
try :
for i in self:
i.upload(dest,api_key,api_secret, verbose)
return True
except :
return False
def show(self, showtime = 0.25):
"""
**SUMMARY**
This is a quick way to show all the items in a ImageSet.
The time is in seconds. You can also provide a decimal value, so
showtime can be 1.5, 0.02, etc.
to show each image.
**PARAMETERS**
* *showtime* - the time, in seconds, to show each image in the set.
**RETURNS**
Nothing.
**EXAMPLE**
>>> imgs = ImageSet()
>>> imgs.download("ninjas")
>>> imgs.show()
"""
for i in self:
i.show()
time.sleep(showtime)
def _get_app_ext(self, loops=0):
""" Application extention. Part that secifies amount of loops.
if loops is 0, if goes on infinitely.
"""
bb = "\x21\xFF\x0B" # application extension
bb += "NETSCAPE2.0"
bb += "\x03\x01"
if loops == 0:
loops = 2**16-1
bb += int_to_bin(loops)
bb += '\x00' # end
return bb
def _get_graphics_control_ext(self, duration=0.1):
""" Graphics Control Extension. A sort of header at the start of
each image. Specifies transparancy and duration. """
bb = '\x21\xF9\x04'
bb += '\x08' # no transparency
bb += int_to_bin( int(duration*100) ) # in 100th of seconds
bb += '\x00' # no transparent color
bb += '\x00' # end
return bb
def _write_gif(self, filename, duration=0.1, loops=0, dither=1):
""" Given a set of images writes the bytes to the specified stream.
"""
frames = 0
previous = None
fp = open(filename, 'wb')
if not PIL_ENABLED:
logger.warning("Need PIL to write animated gif files.")
return
converted = []
for img in self:
if not isinstance(img,pil.Image):
pil_img = img.getPIL()
else:
pil_img = img
converted.append((pil_img.convert('P',dither=dither), img._get_header_anim()))
try:
for img, header_anim in converted:
if not previous:
# gather data
palette = getheader(img)[1]
data = getdata(img)
imdes, data = data[0], data[1:]
header = header_anim
appext = self._get_app_ext(loops)
graphext = self._get_graphics_control_ext(duration)
# write global header
fp.write(header)
fp.write(palette)
fp.write(appext)
# write image
fp.write(graphext)
fp.write(imdes)
for d in data:
fp.write(d)
else:
# gather info (compress difference)
data = getdata(img)
imdes, data = data[0], data[1:]
graphext = self._get_graphics_control_ext(duration)
# write image
fp.write(graphext)
fp.write(imdes)
for d in data:
fp.write(d)
previous = img.copy()
frames = frames + 1
fp.write(";") # end gif
finally:
fp.close()
return frames
def save(self, destination=None, dt=0.2, verbose = False, displaytype=None):
"""
**SUMMARY**
This is a quick way to save all the images in a data set.
Or to Display in webInterface.
If you didn't specify a path one will randomly be generated.
To see the location the files are being saved to then pass
verbose = True.
**PARAMETERS**
* *destination* - path to which images should be saved, or name of gif
* file. If this ends in .gif, the pictures will be saved accordingly.
* *dt* - time between frames, for creating gif files.
* *verbose* - print the path of the saved files to the console.
* *displaytype* - the method use for saving or displaying images.
valid values are:
* 'notebook' - display to the ipython notebook.
* None - save to a temporary file.
**RETURNS**
Nothing.
**EXAMPLE**
>>> imgs = ImageSet()
>>> imgs.download("ninjas")
>>> imgs.save(destination="ninjas_folder", verbose=True)
>>> imgs.save(destination="ninjas.gif", verbose=True)
"""
if displaytype=='notebook':
try:
from IPython.core.display import Image as IPImage
except ImportError:
print "You need IPython Notebooks to use this display mode"
return
from IPython.core import display as Idisplay
for i in self:
tf = tempfile.NamedTemporaryFile(suffix=".png")
loc = tf.name
tf.close()
i.save(loc)
Idisplay.display(IPImage(filename=loc))
return
else:
if destination:
if destination.endswith(".gif"):
return self._write_gif(destination, dt)
else:
for i in self:
i.save(path=destination, temp=True, verbose=verbose)
else:
for i in self:
i.save(verbose=verbose)
def showPaths(self):
"""
**SUMMARY**
This shows the file paths of all the images in the set.
If they haven't been saved to disk then they will not have a filepath
**RETURNS**
Nothing.
**EXAMPLE**
>>> imgs = ImageSet()
>>> imgs.download("ninjas")
>>> imgs.save(verbose=True)
>>> imgs.showPaths()
**TO DO**
This should return paths as a list too.
"""
for i in self:
print i.filename
def _read_gif(self, filename):
""" read_gif(filename)
Reads images from an animated GIF file. Returns the number of images loaded.
"""
if not PIL_ENABLED:
return
elif not os.path.isfile(filename):
return
pil_img = pil.open(filename)
pil_img.seek(0)
pil_images = []
try:
while True:
pil_images.append(pil_img.copy())
pil_img.seek(pil_img.tell()+1)
except EOFError:
pass
loaded = 0
for img in pil_images:
self.append(Image(img))
loaded += 1
return loaded
def load(self, directory = None, extension = None, sort_by=None):
"""
**SUMMARY**
This function loads up files automatically from the directory you pass
it. If you give it an extension it will only load that extension
otherwise it will try to load all know file types in that directory.
extension should be in the format:
extension = 'png'
**PARAMETERS**
* *directory* - The path or directory from which to load images.
* *extension* - The extension to use. If none is given png is the default.
* *sort_by* - Sort the directory based on one of the following parameters passed as strings.
* *time* - the modification time of the file.
* *name* - the name of the file.
* *size* - the size of the file.
The default behavior is to leave the directory unsorted.
**RETURNS**
The number of images in the image set.
**EXAMPLE**
>>> imgs = ImageSet()
>>> imgs.load("images/faces")
>>> imgs.load("images/eyes", "png")
"""
if not directory:
logger.warning("You need to give a directory to load files from.")
return
if not os.path.exists(directory):
logger.warning( "Invalid image path given.")
return
if extension:
#regexes to ignore case
regexList = [ '[' + letter + letter.upper() + ']' for letter in extension]
regex = ''.join(regexList)
regex = "*." + regex
formats = [os.path.join(directory, regex)]
else:
formats = [os.path.join(directory, x) for x in IMAGE_FORMATS]
file_set = [glob.glob(p) for p in formats]
full_set = []
for f in file_set:
for i in f:
full_set.append(i)
file_set = full_set
if(sort_by is not None):
if( sort_by.lower() == "time"):
file_set = sorted(file_set,key=os.path.getmtime)
if( sort_by.lower() == "name"):
file_set = sorted(file_set)
if( sort_by.lower() == "size"):
file_set = sorted(file_set,key=os.path.getsize)
self.filelist = dict()
for i in file_set:
tmp = None
try:
tmp = Image(i)
if( tmp is not None and tmp.width > 0 and tmp.height > 0):
if sys.platform.lower() == 'win32' or sys.platform.lower() == 'win64':
self.filelist[tmp.filename.split('\\')[-1]] = tmp
else:
self.filelist[tmp.filename.split('/')[-1]] = tmp
self.append(tmp)
except:
continue
return len(self)
def standardize(self,width,height):
"""
**SUMMARY**
Resize every image in the set to a standard size.
**PARAMETERS**
* *width* - the width that we want for every image in the set.
* *height* - the height that we want for every image in the set.
**RETURNS**
A new image set where every image in the set is scaled to the desired size.
**EXAMPLE**
>>>> iset = ImageSet("./b/")
>>>> thumbnails = iset.standardize(64,64)
>>>> for t in thumbnails:
>>>> t.show()
"""
retVal = ImageSet()
for i in self:
retVal.append(i.resize(width,height))
return retVal
def dimensions(self):
"""
**SUMMARY**
Return an np.array that are the width and height of every image in the image set.
**PARAMETERS**
--NONE--
**RETURNS**
A 2xN numpy array where N is the number of images in the set. The first column is
the width, and the second collumn is the height.
**EXAMPLE**
>>> iset = ImageSet("./b/")
>>> sz = iset.dimensions()
>>> np.max(sz[:,0]) # returns the largest width in the set.
"""
retVal = []
for i in self:
retVal.append((i.width,i.height))
return np.array(retVal)
def average(self, mode="first", size=(None,None)):
"""
**SUMMARY**
Casts each in the image set into a 32F image, averages them together and returns the results.
If the images are different sizes the method attempts to standarize them.
**PARAMETERS**
* *mode* -
* "first" - resize everything to the size of the first image.
* "max" - resize everything to be the max width and max height of the set.
* "min" - resize everything to be the min width and min height of the set.
* "average" - resize everything to be the average width and height of the set
* "fixed" - fixed, use the size tuple provided.
* *size* - if the mode is set to fixed use this tuple as the size of the resulting image.
**RETURNS**
Returns a single image that is the average of all the values.
**EXAMPLE**
>>> imgs = ImageSet()
>>> imgs.load("images/faces")
>>> result = imgs.average(mode="first")
>>> result.show()
**TODO**
* Allow the user to pass in an offset parameters that blit the images into the resutl.
"""
fw = 0
fh = 0
# figger out how we will handle everything
if( len(self) <= 0 ):
return ImageSet()
vals = self.dimensions()
if( mode.lower() == "first" ):
fw = self[0].width
fh = self[0].height
elif( mode.lower() == "fixed" ):
fw = size[0]
fh = size[1]
elif( mode.lower() == "max" ):
fw = np.max(vals[:,0])
fh = np.max(vals[:,1])
elif( mode.lower() == "min" ):
fw = np.min(vals[:,0])
fh = np.min(vals[:,1])
elif( mode.lower() == "average" ):
fw = int(np.average(vals[:,0]))
fh = int(np.average(vals[:,1]))
#determine if we really need to resize the images
t1 = np.sum(vals[:,0]-fw)
t2 = np.sum(vals[:,1]-fh)
if( t1 != 0 or t2 != 0 ):
resized = self.standardize(fw,fh)
else:
resized = self
# Now do the average calculation
accumulator = cv.CreateImage((fw,fh), cv.IPL_DEPTH_8U,3)
cv.Zero(accumulator)
alpha = float(1.0/len(resized))
beta = float((len(resized)-1.0)/len(resized))
for i in resized:
cv.AddWeighted(i.getBitmap(),alpha,accumulator,beta,0,accumulator)
retVal = Image(accumulator)
return retVal
def __getitem__(self,key):
"""
**SUMMARY**
Returns a ImageSet when sliced. Previously used to
return list. Now it is possible to ImageSet member
functions on sub-lists
"""
if type(key) is types.SliceType: #Or can use 'try:' for speed
return ImageSet(list.__getitem__(self, key))
else:
return list.__getitem__(self,key)
def __getslice__(self, i, j):
"""
Deprecated since python 2.0, now using __getitem__
"""
return self.__getitem__(slice(i,j))
class Image:
"""
**SUMMARY**
The Image class is the heart of SimpleCV and allows you to convert to and
from a number of source types with ease. It also has intelligent buffer
management, so that modified copies of the Image required for algorithms
such as edge detection, etc can be cached and reused when appropriate.
Image are converted into 8-bit, 3-channel images in RGB colorspace. It will
automatically handle conversion from other representations into this
standard format. If dimensions are passed, an empty image is created.
**EXAMPLE**
>>> i = Image("/path/to/image.png")
>>> i = Camera().getImage()
You can also just load the SimpleCV logo using:
>>> img = Image("simplecv")
>>> img = Image("logo")
>>> img = Image("logo_inverted")
>>> img = Image("logo_transparent")
Or you can load an image from a URL:
>>> img = Image("http://www.simplecv.org/image.png")
"""
width = 0 #width and height in px
height = 0
depth = 0
filename = "" #source filename
filehandle = "" #filehandle if used
camera = ""
_mLayers = []
_mDoHuePalette = False
_mPaletteBins = None
_mPalette = None
_mPaletteMembers = None
_mPalettePercentages = None
_barcodeReader = "" #property for the ZXing barcode reader
#these are buffer frames for various operations on the image
_bitmap = "" #the bitmap (iplimage) representation of the image
_matrix = "" #the matrix (cvmat) representation
_grayMatrix = "" #the gray scale (cvmat) representation -KAS
_graybitmap = "" #a reusable 8-bit grayscale bitmap
_equalizedgraybitmap = "" #the above bitmap, normalized
_blobLabel = "" #the label image for blobbing
_edgeMap = "" #holding reference for edge map
_cannyparam = (0, 0) #parameters that created _edgeMap
_pil = "" #holds a PIL object in buffer
_numpy = "" #numpy form buffer
_grayNumpy = "" # grayscale numpy for keypoint stuff
_colorSpace = ColorSpace.UNKNOWN #Colorspace Object
_pgsurface = ""
_cv2Numpy = None #numpy array for OpenCV >= 2.3
_cv2GrayNumpy = None #grayscale numpy array for OpenCV >= 2.3
_gridLayer = [None,[0,0]]#to store grid details | Format -> [gridIndex , gridDimensions]
#For DFT Caching
_DFT = [] #an array of 2 channel (real,imaginary) 64F images
#Keypoint caching values
_mKeyPoints = None
_mKPDescriptors = None
_mKPFlavor = "NONE"
#temp files
_tempFiles = []
#when we empty the buffers, populate with this:
_initialized_buffers = {
"_bitmap": "",
"_matrix": "",
"_grayMatrix": "",
"_graybitmap": "",
"_equalizedgraybitmap": "",
"_blobLabel": "",
"_edgeMap": "",
"_cannyparam": (0, 0),
"_pil": "",
"_numpy": "",
"_grayNumpy":"",
"_pgsurface": "",
"_cv2GrayNumpy": "",
"_cv2Numpy":""}
#The variables _uncroppedX and _uncroppedY are used to buffer the points when we crop the image.
_uncroppedX = 0
_uncroppedY = 0
def __repr__(self):
if len(self.filename) == 0:
fn = "None"
else:
fn = self.filename
return "<SimpleCV.Image Object size:(%d, %d), filename: (%s), at memory location: (%s)>" % (self.width, self.height, fn, hex(id(self)))
#initialize the frame
#parameters: source designation (filename)
#todo: handle camera/capture from file cases (detect on file extension)
def __init__(self, source = None, camera = None, colorSpace = ColorSpace.UNKNOWN,verbose=True, sample=False, cv2image=False, webp=False):
"""
**SUMMARY**
The constructor takes a single polymorphic parameter, which it tests
to see how it should convert into an RGB image. Supported types include:
**PARAMETERS**
* *source* - The source of the image. This can be just about anything, a numpy arrray, a file name, a width and height
tuple, a url. Certain strings such as "lenna" or "logo" are loaded automatically for quick testing.
* *camera* - A camera to pull a live image.
* *colorspace* - A default camera color space. If none is specified this will usually default to the BGR colorspace.
* *sample* - This is set to true if you want to load some of the included sample images without having to specify the complete path
**EXAMPLES**
>>> img = Image('simplecv')
>>> img = Image('test.png')
>>> img = Image('http://www.website.com/my_image.jpg')
>>> img.show()
**NOTES**
OpenCV: iplImage and cvMat types
Python Image Library: Image type
Filename: All opencv supported types (jpg, png, bmp, gif, etc)
URL: The source can be a url, but must include the http://
"""
self._mLayers = []
self.camera = camera
self._colorSpace = colorSpace
#Keypoint Descriptors
self._mKeyPoints = []
self._mKPDescriptors = []
self._mKPFlavor = "NONE"
#Pallete Stuff
self._mDoHuePalette = False
self._mPaletteBins = None
self._mPalette = None
self._mPaletteMembers = None
self._mPalettePercentages = None
#Temp files
self._tempFiles = []
#Check if need to load from URL
#(this can be made shorter)if type(source) == str and (source[:7].lower() == "http://" or source[:8].lower() == "https://"):
if isinstance(source, basestring) and (source.lower().startswith("http://") or source.lower().startswith("https://")):
#try:
# added spoofed user agent for images that are blocking bots (like wikipedia)
req = urllib2.Request(source, headers={'User-Agent' : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.54 Safari/536.5"})
img_file = urllib2.urlopen(req)
#except:
#if verbose:
#print "Couldn't open Image from URL:" + source
#return None
im = StringIO(img_file.read())
source = pil.open(im).convert("RGB")
#Check if loaded from base64 URI
if isinstance(source, basestring) and (source.lower().startswith("data:image/png;base64,")):
img = source[22:].decode("base64")
im = StringIO(img)
source = pil.open(im).convert("RGB")
#This section loads custom built-in images
if isinstance(source, basestring):
tmpname = source.lower()
if tmpname == "simplecv" or tmpname == "logo":
imgpth = os.path.join(LAUNCH_PATH, 'sampleimages','simplecv.png')
source = imgpth
elif tmpname == "simplecv_inverted" or tmpname == "inverted" or tmpname == "logo_inverted":
imgpth = os.path.join(LAUNCH_PATH, 'sampleimages','simplecv_inverted.png')
source = imgpth
elif tmpname == "lenna":
imgpth = os.path.join(LAUNCH_PATH, 'sampleimages','lenna.png')
source = imgpth
elif tmpname == "lyle":
imgpth = os.path.join(LAUNCH_PATH, 'sampleimages','LyleJune1973.png')
source = imgpth
elif tmpname == "parity":
choice = random.choice(['LyleJune1973.png','lenna.png'])
imgpth = os.path.join(LAUNCH_PATH, 'sampleimages',choice)
source = imgpth
elif sample:
imgpth = os.path.join(LAUNCH_PATH, 'sampleimages', source)
source = imgpth
if (type(source) == tuple):
w = int(source[0])
h = int(source[1])
source = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 3)
cv.Zero(source)
if (type(source) == cv.cvmat):
self._matrix = cv.CreateMat(source.rows, source.cols, cv.CV_8UC3)
if((source.step/source.cols)==3): #this is just a guess
cv.Copy(source, self._matrix, None)
self._colorSpace = ColorSpace.BGR
elif((source.step/source.cols)==1):
cv.Merge(source, source, source, None, self._matrix)
self._colorSpace = ColorSpace.GRAY
else:
self._colorSpace = ColorSpace.UNKNOWN
warnings.warn("Unable to process the provided cvmat")
elif (type(source) == np.ndarray): #handle a numpy array conversion
if (type(source[0, 0]) == np.ndarray): #we have a 3 channel array
#convert to an iplimage bitmap
source = source.astype(np.uint8)
self._numpy = source
if not cv2image:
invertedsource = source[:, :, ::-1].transpose([1, 0, 2])
else:
# If the numpy array is from cv2, then it must not be transposed.
invertedsource = source
#invertedsource = source[:, :, ::-1].transpose([1, 0, 2]) # do not un-comment. breaks cv2 image support
self._bitmap = cv.CreateImageHeader((invertedsource.shape[1], invertedsource.shape[0]), cv.IPL_DEPTH_8U, 3)
cv.SetData(self._bitmap, invertedsource.tostring(),
invertedsource.dtype.itemsize * 3 * invertedsource.shape[1])
self._colorSpace = ColorSpace.BGR #this is an educated guess
else:
#we have a single channel array, convert to an RGB iplimage
source = source.astype(np.uint8)
if not cv2image:
source = source.transpose([1,0]) #we expect width/height but use col/row
self._bitmap = cv.CreateImage((source.shape[1], source.shape[0]), cv.IPL_DEPTH_8U, 3)
channel = cv.CreateImageHeader((source.shape[1], source.shape[0]), cv.IPL_DEPTH_8U, 1)
#initialize an empty channel bitmap
cv.SetData(channel, source.tostring(),
source.dtype.itemsize * source.shape[1])
cv.Merge(channel, channel, channel, None, self._bitmap)
self._colorSpace = ColorSpace.BGR
elif (type(source) == cv.iplimage):
if (source.nChannels == 1):
self._bitmap = cv.CreateImage(cv.GetSize(source), source.depth, 3)
cv.Merge(source, source, source, None, self._bitmap)
self._colorSpace = ColorSpace.GRAY
else:
self._bitmap = cv.CreateImage(cv.GetSize(source), source.depth, 3)
cv.Copy(source, self._bitmap, None)
self._colorSpace = ColorSpace.BGR
elif (type(source) == type(str()) or source.__class__.__name__ == 'StringIO'):
if source == '':
raise IOError("No filename provided to Image constructor")
elif webp or source.split('.')[-1] == 'webp':
try:
if source.__class__.__name__ == 'StringIO':
source.seek(0) # set the stringIO to the begining
self._pil = pil.open(source)
self._bitmap = cv.CreateImageHeader(self._pil.size, cv.IPL_DEPTH_8U, 3)
except:
try:
from webm import decode as webmDecode
except ImportError:
logger.warning('The webm module or latest PIL / PILLOW module needs to be installed to load webp files: https://github.com/sightmachine/python-webm')
return
WEBP_IMAGE_DATA = bytearray(file(source, "rb").read())
result = webmDecode.DecodeRGB(WEBP_IMAGE_DATA)
webpImage = pil.frombuffer(
"RGB", (result.width, result.height), str(result.bitmap),
"raw", "RGB", 0, 1
)
self._pil = webpImage.convert("RGB")
self._bitmap = cv.CreateImageHeader(self._pil.size, cv.IPL_DEPTH_8U, 3)
self.filename = source
cv.SetData(self._bitmap, self._pil.tostring())
cv.CvtColor(self._bitmap, self._bitmap, cv.CV_RGB2BGR)
else:
self.filename = source
try:
self._bitmap = cv.LoadImage(self.filename, iscolor=cv.CV_LOAD_IMAGE_COLOR)
except:
self._pil = pil.open(self.filename).convert("RGB")
self._bitmap = cv.CreateImageHeader(self._pil.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(self._bitmap, self._pil.tostring())
cv.CvtColor(self._bitmap, self._bitmap, cv.CV_RGB2BGR)
#TODO, on IOError fail back to PIL
self._colorSpace = ColorSpace.BGR
elif (type(source) == pg.Surface):
self._pgsurface = source
self._bitmap = cv.CreateImageHeader(self._pgsurface.get_size(), cv.IPL_DEPTH_8U, 3)
cv.SetData(self._bitmap, pg.image.tostring(self._pgsurface, "RGB"))
cv.CvtColor(self._bitmap, self._bitmap, cv.CV_RGB2BGR)
self._colorSpace = ColorSpace.BGR
elif (PIL_ENABLED and (
(len(source.__class__.__bases__) and source.__class__.__bases__[0].__name__ == "ImageFile")
or source.__class__.__name__ == "JpegImageFile"
or source.__class__.__name__ == "WebPPImageFile"
or source.__class__.__name__ == "Image")):
if source.mode != 'RGB':
source = source.convert('RGB')
self._pil = source
#from the opencv cookbook
#http://opencv.willowgarage.com/documentation/python/cookbook.html
self._bitmap = cv.CreateImageHeader(self._pil.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(self._bitmap, self._pil.tostring())
self._colorSpace = ColorSpace.BGR
cv.CvtColor(self._bitmap, self._bitmap, cv.CV_RGB2BGR)
#self._bitmap = cv.iplimage(self._bitmap)
else:
return None
#if the caller passes in a colorspace we overide it
if(colorSpace != ColorSpace.UNKNOWN):
self._colorSpace = colorSpace
bm = self.getBitmap()
self.width = bm.width
self.height = bm.height
self.depth = bm.depth
def __del__(self):
"""
This is called when the instance is about to be destroyed also called a destructor.
"""
try :
for i in self._tempFiles:
if (i[1]):
os.remove(i[0])
except :
pass
def getEXIFData(self):
"""
**SUMMARY**
This function extracts the exif data from an image file like JPEG or TIFF. The data is returned as a dict.
**RETURNS**
A dictionary of key value pairs. The value pairs are defined in the EXIF.py file.
**EXAMPLE**
>>> img = Image("./SimpleCV/sampleimages/OWS.jpg")
>>> data = img.getEXIFData()
>>> data['Image GPSInfo'].values
**NOTES**
* Compliments of: http://exif-py.sourceforge.net/
* See also: http://en.wikipedia.org/wiki/Exchangeable_image_file_format
**See Also**
:py:class:`EXIF`
"""
import os, string
if( len(self.filename) < 5 or self.filename is None ):
#I am not going to warn, better of img sets
#logger.warning("ImageClass.getEXIFData: This image did not come from a file, can't get EXIF data.")
return {}
fileName, fileExtension = os.path.splitext(self.filename)
fileExtension = string.lower(fileExtension)
if( fileExtension != '.jpeg' and fileExtension != '.jpg' and
fileExtension != 'tiff' and fileExtension != '.tif'):
#logger.warning("ImageClass.getEXIFData: This image format does not support EXIF")
return {}
raw = open(self.filename,'rb')
data = process_file(raw)
return data
def live(self):
"""
**SUMMARY**
This shows a live view of the camera.
* Left click will show mouse coordinates and color.
* Right click will kill the live image.
**RETURNS**
Nothing. In place method.
**EXAMPLE**
>>> cam = Camera()
>>> cam.live()
"""
start_time = time.time()
from SimpleCV.Display import Display
i = self
d = Display(i.size())
i.save(d)
col = Color.RED
while d.isNotDone():
i = self
i.clearLayers()
elapsed_time = time.time() - start_time
if d.mouseLeft:
txt = "coord: (" + str(d.mouseX) + "," + str(d.mouseY) + ")"
i.dl().text(txt, (10,i.height / 2), color=col)
txt = "color: " + str(i.getPixel(d.mouseX,d.mouseY))
i.dl().text(txt, (10,(i.height / 2) + 10), color=col)
print "coord: (" + str(d.mouseX) + "," + str(d.mouseY) + "), color: " + str(i.getPixel(d.mouseX,d.mouseY))
if elapsed_time > 0 and elapsed_time < 5:
i.dl().text("In live mode", (10,10), color=col)
i.dl().text("Left click will show mouse coordinates and color", (10,20), color=col)
i.dl().text("Right click will kill the live image", (10,30), color=col)
i.save(d)
if d.mouseRight:
print "Closing Window"
d.done = True
pg.quit()
def getColorSpace(self):
"""
**SUMMARY**
Returns the value matched in the color space class
**RETURNS**
Integer corresponding to the color space.
**EXAMPLE**
>>> if(image.getColorSpace() == ColorSpace.RGB)
**SEE ALSO**
:py:class:`ColorSpace`
"""
return self._colorSpace
def isRGB(self):
"""
**SUMMARY**
Returns true if this image uses the RGB colorspace.
**RETURNS**
True if the image uses the RGB colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isRGB() ):
>>> r,g,b = img.splitChannels()
**SEE ALSO**
:py:meth:`toRGB`
"""
return(self._colorSpace==ColorSpace.RGB)
def isBGR(self):
"""
**SUMMARY**
Returns true if this image uses the BGR colorspace.
**RETURNS**
True if the image uses the BGR colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isBGR() ):
>>> b,g,r = img.splitChannels()
**SEE ALSO**
:py:meth:`toBGR`
"""
return(self._colorSpace==ColorSpace.BGR)
def isHSV(self):
"""
**SUMMARY**
Returns true if this image uses the HSV colorspace.
**RETURNS**
True if the image uses the HSV colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isHSV() ):
>>> h,s,v = img.splitChannels()
**SEE ALSO**
:py:meth:`toHSV`
"""
return(self._colorSpace==ColorSpace.HSV)
def isHLS(self):
"""
**SUMMARY**
Returns true if this image uses the HLS colorspace.
**RETURNS**
True if the image uses the HLS colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isHLS() ):
>>> h,l,s = img.splitChannels()
**SEE ALSO**
:py:meth:`toHLS`
"""
return(self._colorSpace==ColorSpace.HLS)
def isXYZ(self):
"""
**SUMMARY**
Returns true if this image uses the XYZ colorspace.
**RETURNS**
True if the image uses the XYZ colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isXYZ() ):
>>> x,y,z = img.splitChannels()
**SEE ALSO**
:py:meth:`toXYZ`
"""
return(self._colorSpace==ColorSpace.XYZ)
def isGray(self):
"""
**SUMMARY**
Returns true if this image uses the Gray colorspace.
**RETURNS**
True if the image uses the Gray colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isGray() ):
>>> print "The image is in Grayscale."
**SEE ALSO**
:py:meth:`toGray`
"""
return(self._colorSpace==ColorSpace.GRAY)
def isYCrCb(self):
"""
**SUMMARY**
Returns true if this image uses the YCrCb colorspace.
**RETURNS**
True if the image uses the YCrCb colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isYCrCb() ):
>>> Y,Cr,Cb = img.splitChannels()
**SEE ALSO**
:py:meth:`toYCrCb`
"""
return(self._colorSpace==ColorSpace.YCrCb)
def toRGB(self):
"""
**SUMMARY**
This method attemps to convert the image to the RGB colorspace.
If the color space is unknown we assume it is in the BGR format
**RETURNS**
Returns the converted image if the conversion was successful,
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> RGBImg = img.toRGB()
**SEE ALSO**
:py:meth:`isRGB`
"""
retVal = self.getEmpty()
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_BGR2RGB)
elif( self._colorSpace == ColorSpace.HSV ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HSV2RGB)
elif( self._colorSpace == ColorSpace.HLS ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HLS2RGB)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2RGB)
elif( self._colorSpace == ColorSpace.YCrCb ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_YCrCb2RGB)
elif( self._colorSpace == ColorSpace.RGB ):
retVal = self.getBitmap()
else:
logger.warning("Image.toRGB: There is no supported conversion to RGB colorspace")
return None
return Image(retVal, colorSpace=ColorSpace.RGB )
def toBGR(self):
"""
**SUMMARY**
This method attemps to convert the image to the BGR colorspace.
If the color space is unknown we assume it is in the BGR format.
**RETURNS**
Returns the converted image if the conversion was successful,
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> BGRImg = img.toBGR()
**SEE ALSO**
:py:meth:`isBGR`
"""
retVal = self.getEmpty()
if( self._colorSpace == ColorSpace.RGB or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_RGB2BGR)
elif( self._colorSpace == ColorSpace.HSV ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HSV2BGR)
elif( self._colorSpace == ColorSpace.HLS ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HLS2BGR)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2BGR)
elif( self._colorSpace == ColorSpace.YCrCb ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_YCrCb2BGR)
elif( self._colorSpace == ColorSpace.BGR ):
retVal = self.getBitmap()
else:
logger.warning("Image.toBGR: There is no supported conversion to BGR colorspace")
return None
return Image(retVal, colorSpace = ColorSpace.BGR )
def toHLS(self):
"""
**SUMMARY**
This method attempts to convert the image to the HLS colorspace.
If the color space is unknown we assume it is in the BGR format.
**RETURNS**
Returns the converted image if the conversion was successful,
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> HLSImg = img.toHLS()
**SEE ALSO**
:py:meth:`isHLS`
"""
retVal = self.getEmpty()
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_BGR2HLS)
elif( self._colorSpace == ColorSpace.RGB):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_RGB2HLS)
elif( self._colorSpace == ColorSpace.HSV ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HSV2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HLS)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HLS)
elif( self._colorSpace == ColorSpace.YCrCb ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_YCrCb2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HLS)
elif( self._colorSpace == ColorSpace.HLS ):
retVal = self.getBitmap()
else:
logger.warning("Image.toHSL: There is no supported conversion to HSL colorspace")
return None
return Image(retVal, colorSpace = ColorSpace.HLS )
def toHSV(self):
"""
**SUMMARY**
This method attempts to convert the image to the HSV colorspace.
If the color space is unknown we assume it is in the BGR format
**RETURNS**
Returns the converted image if the conversion was successful,
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> HSVImg = img.toHSV()
**SEE ALSO**
:py:meth:`isHSV`
"""
retVal = self.getEmpty()
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_BGR2HSV)
elif( self._colorSpace == ColorSpace.RGB):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_RGB2HSV)
elif( self._colorSpace == ColorSpace.HLS ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HLS2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HSV)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HSV)
elif( self._colorSpace == ColorSpace.YCrCb ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_YCrCb2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HSV)
elif( self._colorSpace == ColorSpace.HSV ):
retVal = self.getBitmap()
else:
logger.warning("Image.toHSV: There is no supported conversion to HSV colorspace")
return None
return Image(retVal, colorSpace = ColorSpace.HSV )
def toXYZ(self):
"""
**SUMMARY**
This method attemps to convert the image to the XYZ colorspace.
If the color space is unknown we assume it is in the BGR format
**RETURNS**
Returns the converted image if the conversion was successful,
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> XYZImg = img.toXYZ()
**SEE ALSO**
:py:meth:`isXYZ`
"""
retVal = self.getEmpty()
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_BGR2XYZ)
elif( self._colorSpace == ColorSpace.RGB):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_RGB2XYZ)
elif( self._colorSpace == ColorSpace.HLS ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HLS2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2XYZ)
elif( self._colorSpace == ColorSpace.HSV ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HSV2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2XYZ)
elif( self._colorSpace == ColorSpace.YCrCb ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_YCrCb2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2XYZ)
elif( self._colorSpace == ColorSpace.XYZ ):
retVal = self.getBitmap()
else:
logger.warning("Image.toXYZ: There is no supported conversion to XYZ colorspace")
return None
return Image(retVal, colorSpace=ColorSpace.XYZ )
def toGray(self):
"""
**SUMMARY**
This method attemps to convert the image to the grayscale colorspace.
If the color space is unknown we assume it is in the BGR format.
**RETURNS**
A grayscale SimpleCV image if successful.
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> img.toGray().binarize().show()
**SEE ALSO**
:py:meth:`isGray`
:py:meth:`binarize`
"""
retVal = self.getEmpty(1)
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_BGR2GRAY)
elif( self._colorSpace == ColorSpace.RGB):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.HLS ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HLS2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.HSV ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HSV2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.YCrCb ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_YCrCb2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.GRAY ):
retVal = self.getBitmap()
else:
logger.warning("Image.toGray: There is no supported conversion to gray colorspace")
return None
return Image(retVal, colorSpace = ColorSpace.GRAY )
def toYCrCb(self):
"""
**SUMMARY**
This method attemps to convert the image to the YCrCb colorspace.
If the color space is unknown we assume it is in the BGR format
**RETURNS**
Returns the converted image if the conversion was successful,
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> RGBImg = img.toYCrCb()
**SEE ALSO**
:py:meth:`isYCrCb`
"""
retVal = self.getEmpty()
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_BGR2YCrCb)
elif( self._colorSpace == ColorSpace.RGB ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_RGB2YCrCb)
elif( self._colorSpace == ColorSpace.HSV ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HSV2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2YCrCb)
elif( self._colorSpace == ColorSpace.HLS ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HLS2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2YCrCb)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2YCrCb)
elif( self._colorSpace == ColorSpace.YCrCb ):
retVal = self.getBitmap()
else:
logger.warning("Image.toYCrCb: There is no supported conversion to YCrCb colorspace")
return None
return Image(retVal, colorSpace=ColorSpace.YCrCb )
def getEmpty(self, channels=3):
"""
**SUMMARY**
Create a new, empty OpenCV bitmap with the specified number of channels (default 3).
This method basically creates an empty copy of the image. This is handy for
interfacing with OpenCV functions directly.
**PARAMETERS**
* *channels* - The number of channels in the returned OpenCV image.
**RETURNS**
Returns an black OpenCV IplImage that matches the width, height, and color
depth of the source image.
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getEmpty()
>>> cv.SomeOpenCVFunc(img.getBitmap(),rawImg)
**SEE ALSO**
:py:meth:`getBitmap`
:py:meth:`getFPMatrix`
:py:meth:`getPIL`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
"""
bitmap = cv.CreateImage(self.size(), cv.IPL_DEPTH_8U, channels)
cv.SetZero(bitmap)
return bitmap
def getBitmap(self):
"""
**SUMMARY**
Retrieve the bitmap (iplImage) of the Image. This is useful if you want
to use functions from OpenCV with SimpleCV's image class
**RETURNS**
Returns black OpenCV IplImage from this image.
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getBitmap()
>>> rawOut = img.getEmpty()
>>> cv.SomeOpenCVFunc(rawImg,rawOut)
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getFPMatrix`
:py:meth:`getPIL`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
"""
if (self._bitmap):
return self._bitmap
elif (self._matrix):
self._bitmap = cv.GetImage(self._matrix)
return self._bitmap
def getMatrix(self):
"""
**SUMMARY**
Get the matrix (cvMat) version of the image, required for some OpenCV algorithms.
**RETURNS**
Returns the OpenCV CvMat version of this image.
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getMatrix()
>>> rawOut = img.getEmpty()
>>> cv.SomeOpenCVFunc(rawImg,rawOut)
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getBitmap`
:py:meth:`getFPMatrix`
:py:meth:`getPIL`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
"""
if (self._matrix):
return self._matrix
else:
self._matrix = cv.GetMat(self.getBitmap()) #convert the bitmap to a matrix
return self._matrix
def getFPMatrix(self):
"""
**SUMMARY**
Converts the standard int bitmap to a floating point bitmap.
This is handy for some OpenCV functions.
**RETURNS**
Returns the floating point OpenCV CvMat version of this image.
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getFPMatrix()
>>> rawOut = img.getEmpty()
>>> cv.SomeOpenCVFunc(rawImg,rawOut)
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getBitmap`
:py:meth:`getMatrix`
:py:meth:`getPIL`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
"""
retVal = cv.CreateImage((self.width,self.height), cv.IPL_DEPTH_32F, 3)
cv.Convert(self.getBitmap(),retVal)
return retVal
def getPIL(self):
"""
**SUMMARY**
Get a PIL Image object for use with the Python Image Library
This is handy for some PIL functions.
**RETURNS**
Returns the Python Imaging Library (PIL) version of this image.
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getPIL()
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getBitmap`
:py:meth:`getMatrix`
:py:meth:`getFPMatrix`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
"""
if (not PIL_ENABLED):
return None
if (not self._pil):
rgbbitmap = self.getEmpty()
cv.CvtColor(self.getBitmap(), rgbbitmap, cv.CV_BGR2RGB)
self._pil = pil.fromstring("RGB", self.size(), rgbbitmap.tostring())
return self._pil
def getGrayNumpy(self):
"""
**SUMMARY**
Return a grayscale Numpy array of the image.
**RETURNS**
Returns the image, converted first to grayscale and then converted to a 2D numpy array.
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getGrayNumpy()
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getBitmap`
:py:meth:`getMatrix`
:py:meth:`getPIL`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
"""
if( self._grayNumpy != "" ):
return self._grayNumpy
else:
self._grayNumpy = uint8(np.array(cv.GetMat(self._getGrayscaleBitmap())).transpose())
return self._grayNumpy
def getNumpy(self):
"""
**SUMMARY**
Get a Numpy array of the image in width x height x RGB dimensions
**RETURNS**
Returns the image, converted first to grayscale and then converted to a 3D numpy array.
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getNumpy()
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getBitmap`
:py:meth:`getMatrix`
:py:meth:`getPIL`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
"""
if self._numpy != "":
return self._numpy
self._numpy = np.array(self.getMatrix())[:, :, ::-1].transpose([1, 0, 2])
return self._numpy
def getNumpyCv2(self):
"""
**SUMMARY**
Get a Numpy array of the image in width x height x RGB dimensions compatible with OpenCV >= 2.3
**RETURNS**
Returns the 3D numpy array of the image compatible with OpenCV >= 2.3
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getNumpyCv2()
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getBitmap`
:py:meth:`getMatrix`
:py:meth:`getPIL`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpyCv2`
"""
if type(self._cv2Numpy) is not np.ndarray:
self._cv2Numpy = np.array(self.getMatrix())
return self._cv2Numpy
def getGrayNumpyCv2(self):
"""
**SUMMARY**
Get a Grayscale Numpy array of the image in width x height y compatible with OpenCV >= 2.3
**RETURNS**
Returns the grayscale numpy array compatible with OpenCV >= 2.3
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getNumpyCv2()
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getBitmap`
:py:meth:`getMatrix`
:py:meth:`getPIL`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpyCv2`
"""
if type(self._cv2GrayNumpy) is not np.ndarray:
self._cv2GrayNumpy = np.array(self.getGrayscaleMatrix())
return self._cv2GrayNumpy
def _getGrayscaleBitmap(self):
if (self._graybitmap):
return self._graybitmap
self._graybitmap = self.getEmpty(1)
temp = self.getEmpty(3)
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), self._graybitmap, cv.CV_BGR2GRAY)
elif( self._colorSpace == ColorSpace.RGB):
cv.CvtColor(self.getBitmap(), self._graybitmap, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.HLS ):
cv.CvtColor(self.getBitmap(), temp, cv.CV_HLS2RGB)
cv.CvtColor(temp, self._graybitmap, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.HSV ):
cv.CvtColor(self.getBitmap(), temp, cv.CV_HSV2RGB)
cv.CvtColor(temp, self._graybitmap, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2RGB)
cv.CvtColor(temp, self._graybitmap, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.GRAY):
cv.Split(self.getBitmap(), self._graybitmap, self._graybitmap, self._graybitmap, None)
else:
logger.warning("Image._getGrayscaleBitmap: There is no supported conversion to gray colorspace")
return None
return self._graybitmap
def getGrayscaleMatrix(self):
"""
**SUMMARY**
Get the grayscale matrix (cvMat) version of the image, required for some OpenCV algorithms.
**RETURNS**
Returns the OpenCV CvMat version of this image.
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getGrayscaleMatrix()
>>> rawOut = img.getEmpty()
>>> cv.SomeOpenCVFunc(rawImg,rawOut)
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getBitmap`
:py:meth:`getFPMatrix`
:py:meth:`getPIL`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpy`
:py:meth:`getMatrix`
"""
if (self._grayMatrix):
return self._grayMatrix
else:
self._grayMatrix = cv.GetMat(self._getGrayscaleBitmap()) #convert the bitmap to a matrix
return self._grayMatrix
def _getEqualizedGrayscaleBitmap(self):
if (self._equalizedgraybitmap):
return self._equalizedgraybitmap
self._equalizedgraybitmap = self.getEmpty(1)
cv.EqualizeHist(self._getGrayscaleBitmap(), self._equalizedgraybitmap)
return self._equalizedgraybitmap
def equalize(self):
"""
**SUMMARY**
Perform a histogram equalization on the image.
**RETURNS**
Returns a grayscale SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> img = img.equalize()
"""
return Image(self._getEqualizedGrayscaleBitmap())
def getPGSurface(self):
"""
**SUMMARY**
Returns the image as a pygame surface. This is used for rendering the display
**RETURNS**
A pygame surface object used for rendering.
"""
if (self._pgsurface):
return self._pgsurface
else:
if self.isGray():
self._pgsurface = pg.image.fromstring(self.getBitmap().tostring(), self.size(), "RGB")
else:
self._pgsurface = pg.image.fromstring(self.toRGB().getBitmap().tostring(), self.size(), "RGB")
return self._pgsurface
def toString(self):
"""
**SUMMARY**
Returns the image as a string, useful for moving data around.
**RETURNS**
The image, converted to rgb, then converted to a string.
"""
return self.toRGB().getBitmap().tostring()
def save(self, filehandle_or_filename="", mode="", verbose=False, temp=False, path=None, filename=None, cleanTemp=False ,**params):
"""
**SUMMARY**
Save the image to the specified filename. If no filename is provided then
then it will use the filename the Image was loaded from or the last
place it was saved to. You can save to lots of places, not just files.
For example you can save to the Display, a JpegStream, VideoStream,
temporary file, or Ipython Notebook.
Save will implicitly render the image's layers before saving, but the layers are
not applied to the Image itself.
**PARAMETERS**
* *filehandle_or_filename* - the filename to which to store the file. The method will infer the file type.
* *mode* - This flag is used for saving using pul.
* *verbose* - If this flag is true we return the path where we saved the file.
* *temp* - If temp is True we save the image as a temporary file and return the path
* *path* - path where temporary files needed to be stored
* *filename* - name(Prefix) of the temporary file.
* *cleanTemp* - This flag is made True if tempfiles are tobe deleted once the object is to be destroyed.
* *params* - This object is used for overloading the PIL save methods. In particular
this method is useful for setting the jpeg compression level. For JPG see this documentation:
http://www.pythonware.com/library/pil/handbook/format-jpeg.htm
**EXAMPLES**
To save as a temporary file just use:
>>> img = Image('simplecv')
>>> img.save(temp=True)
It will return the path that it saved to.
Save also supports IPython Notebooks when passing it a Display object
that has been instainted with the notebook flag.
To do this just use::
>>> disp = Display(displaytype='notebook')
>>> img.save(disp)
.. Note::
You must have IPython notebooks installed for this to work path and filename are valid if and only if temp is set to True.
.. attention::
We need examples for all save methods as they are unintuitve.
"""
#TODO, we use the term mode here when we mean format
#TODO, if any params are passed, use PIL
if temp :
import glob
if filename == None :
filename = 'Image'
if path == None :
path=tempfile.gettempdir()
if glob.os.path.exists(path):
path = glob.os.path.abspath(path)
imagefiles = glob.glob(glob.os.path.join(path,filename+"*.png"))
num = [0]
for img in imagefiles :
num.append(int(glob.re.findall('[0-9]+$',img[:-4])[-1]))
num.sort()
fnum = num[-1]+1
filename = glob.os.path.join(path,filename+("%07d" % fnum)+".png")
self._tempFiles.append((filename,cleanTemp))
self.save(self._tempFiles[-1][0])
return self._tempFiles[-1][0]
else :
print "Path does not exist!"
else :
if (filename) :
filehandle_or_filename = filename + ".png"
if (not filehandle_or_filename):
if (self.filename):
filehandle_or_filename = self.filename
else:
filehandle_or_filename = self.filehandle
if (len(self._mLayers)):
saveimg = self.applyLayers()
else:
saveimg = self
if self._colorSpace != ColorSpace.BGR and self._colorSpace != ColorSpace.GRAY:
saveimg = saveimg.toBGR()
if not isinstance(filehandle_or_filename, basestring):
fh = filehandle_or_filename
if (not PIL_ENABLED):
logger.warning("You need the python image library to save by filehandle")
return 0
if (type(fh) == InstanceType and fh.__class__.__name__ == "JpegStreamer"):
fh.jpgdata = StringIO()
saveimg.getPIL().save(fh.jpgdata, "jpeg", **params) #save via PIL to a StringIO handle
fh.refreshtime = time.time()
self.filename = ""
self.filehandle = fh
elif (type(fh) == InstanceType and fh.__class__.__name__ == "VideoStream"):
self.filename = ""
self.filehandle = fh
fh.writeFrame(saveimg)
elif (type(fh) == InstanceType and fh.__class__.__name__ == "Display"):
if fh.displaytype == 'notebook':
try:
from IPython.core.display import Image as IPImage
except ImportError:
print "You need IPython Notebooks to use this display mode"
return
from IPython.core import display as Idisplay
tf = tempfile.NamedTemporaryFile(suffix=".png")
loc = tf.name
tf.close()
self.save(loc)
Idisplay.display(IPImage(filename=loc))
return
else:
#self.filename = ""
self.filehandle = fh
fh.writeFrame(saveimg)
else:
if (not mode):
mode = "jpeg"
try:
saveimg.getPIL().save(fh, mode, **params) # The latest version of PIL / PILLOW supports webp, try this first, if not gracefully fallback
self.filehandle = fh #set the filename for future save operations
self.filename = ""
return 1
except Exception, e:
if mode.lower() != 'webp':
raise e
if verbose:
print self.filename
if not mode.lower() == 'webp':
return 1
#make a temporary file location if there isn't one
if not filehandle_or_filename:
filename = tempfile.mkstemp(suffix=".png")[-1]
else:
filename = filehandle_or_filename
#allow saving in webp format
if mode == 'webp' or re.search('\.webp$', filename):
try:
#newer versions of PIL support webp format, try that first
self.getPIL().save(filename, **params)
except:
#if PIL doesn't support it, maybe we have the python-webm library
try:
from webm import encode as webmEncode
from webm.handlers import BitmapHandler, WebPHandler
except:
logger.warning('You need the webm library to save to webp format. You can download from: https://github.com/sightmachine/python-webm')
return 0
#PNG_BITMAP_DATA = bytearray(Image.open(PNG_IMAGE_FILE).tostring())
PNG_BITMAP_DATA = bytearray(self.toString())
IMAGE_WIDTH = self.width
IMAGE_HEIGHT = self.height
image = BitmapHandler(
PNG_BITMAP_DATA, BitmapHandler.RGB,
IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_WIDTH * 3
)
result = webmEncode.EncodeRGB(image)
if filehandle_or_filename.__class__.__name__ == 'StringIO':
filehandle_or_filename.write(result.data)
else:
file(filename.format("RGB"), "wb").write(result.data)
return 1
#if the user is passing kwargs use the PIL save method.
if( params ): #usually this is just the compression rate for the image
if (not mode):
mode = "jpeg"
saveimg.getPIL().save(filename, mode, **params)
return 1
if (filename):
cv.SaveImage(filename, saveimg.getBitmap())
self.filename = filename #set the filename for future save operations
self.filehandle = ""
elif (self.filename):
cv.SaveImage(self.filename, saveimg.getBitmap())
else:
return 0
if verbose:
print self.filename
if temp:
return filename
else:
return 1
def copy(self):
"""
**SUMMARY**
Return a full copy of the Image's bitmap. Note that this is different
from using python's implicit copy function in that only the bitmap itself
is copied. This method essentially performs a deep copy.
**RETURNS**
A copy of this SimpleCV image.
**EXAMPLE**
>>> img = Image("logo")
>>> img2 = img.copy()
"""
newimg = self.getEmpty()
cv.Copy(self.getBitmap(), newimg)
return Image(newimg, colorSpace=self._colorSpace)
def upload(self,dest,api_key=None,api_secret=None, verbose = True):
"""
**SUMMARY**
Uploads image to imgur or flickr or dropbox. In verbose mode URL values are printed.
**PARAMETERS**
* *api_key* - a string of the API key.
* *api_secret* (required only for flickr and dropbox ) - a string of the API secret.
* *verbose* - If verbose is true all values are printed to the screen
**RETURNS**
if uploading is successful
- Imgur return the original image URL on success and None if it fails.
- Flick returns True on success, else returns False.
- dropbox returns True on success.
**EXAMPLE**
TO upload image to imgur::
>>> img = Image("lenna")
>>> result = img.upload( 'imgur',"MY_API_KEY1234567890" )
>>> print "Uploaded To: " + result[0]
To upload image to flickr::
>>> img.upload('flickr','api_key','api_secret')
>>> img.invert().upload('flickr') #Once the api keys and secret keys are cached.
To upload image to dropbox::
>>> img.upload('dropbox','api_key','api_secret')
>>> img.invert().upload('dropbox') #Once the api keys and secret keys are cached.
**NOTES**
.. Warning::
This method requires two packages to be installed
- PyCurl
- flickr api.
- dropbox
.. Warning::
You must supply your own API key. See here:
- http://imgur.com/register/api_anon
- http://www.flickr.com/services/api/misc.api_keys.html
- https://www.dropbox.com/developers/start/setup#python
"""
if ( dest=='imgur' ) :
try:
import pycurl
except ImportError:
print "PycURL Library not installed."
return
response = StringIO()
c = pycurl.Curl()
values = [("key", api_key),
("image", (c.FORM_FILE, self.filename))]
c.setopt(c.URL, "http://api.imgur.com/2/upload.xml")
c.setopt(c.HTTPPOST, values)
c.setopt(c.WRITEFUNCTION, response.write)
c.perform()
c.close()
match = re.search(r'<hash>(\w+).*?<deletehash>(\w+).*?<original>(http://[\w.]+/[\w.]+)', response.getvalue() , re.DOTALL)
if match:
if(verbose):
print "Imgur page: http://imgur.com/" + match.group(1)
print "Original image: " + match.group(3)
print "Delete page: http://imgur.com/delete/" + match.group(2)
return [match.group(1),match.group(3),match.group(2)]
else :
if(verbose):
print "The API Key given is not valid"
return None
elif (dest=='flickr'):
global temp_token
flickr = None
try :
import flickrapi
except ImportError:
print "Flickr API is not installed. Please install it from http://pypi.python.org/pypi/flickrapi"
return False
try :
if (not(api_key==None and api_secret==None)):
self.flickr = flickrapi.FlickrAPI(api_key,api_secret,cache=True)
self.flickr.cache = flickrapi.SimpleCache(timeout=3600, max_entries=200)
self.flickr.authenticate_console('write')
temp_token = (api_key,api_secret)
else :
try :
self.flickr = flickrapi.FlickrAPI(temp_token[0],temp_token[1],cache=True)
self.flickr.authenticate_console('write')
except NameError :
print "API key and Secret key are not set."
return
except :
print "The API Key and Secret Key are not valid"
return False
if (self.filename) :
try :
self.flickr.upload(self.filename,self.filehandle)
except :
print "Uploading Failed !"
return False
else :
tf = self.save(temp=True)
self.flickr.upload(tf,"Image")
return True
elif (dest=='dropbox'):
global dropbox_token
access_type = 'dropbox'
try :
from dropbox import client, rest, session
import webbrowser
except ImportError:
print "Dropbox API is not installed. For more info refer : https://www.dropbox.com/developers/start/setup#python "
return False
try :
if ( 'dropbox_token' not in globals() and api_key!=None and api_secret!=None ):
sess = session.DropboxSession(api_key, api_secret, access_type)
request_token = sess.obtain_request_token()
url = sess.build_authorize_url(request_token)
webbrowser.open(url)
print "Please visit this website and press the 'Allow' button, then hit 'Enter' here."
raw_input()
access_token = sess.obtain_access_token(request_token)
dropbox_token = client.DropboxClient(sess)
else :
if (dropbox_token) :
pass
else :
return None
except :
print "The API Key and Secret Key are not valid"
return False
if (self.filename) :
try :
f = open(self.filename)
dropbox_token.put_file('/SimpleCVImages/'+os.path.split(self.filename)[-1], f)
except :
print "Uploading Failed !"
return False
else :
tf = self.save(temp=True)
f = open(tf)
dropbox_token.put_file('/SimpleCVImages/'+'Image', f)
return True
def scale(self, width, height = -1, interpolation=cv2.INTER_LINEAR):
"""
**SUMMARY**
Scale the image to a new width and height.
If no height is provided, the width is considered a scaling value.
**PARAMETERS**
* *width* - either the new width in pixels, if the height parameter is > 0, or if this value
is a floating point value, this is the scaling factor.
* *height* - the new height in pixels.
* *interpolation* - how to generate new pixels that don't match the original pixels. Argument goes direction to cv.Resize. See http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html?highlight=resize#cv2.resize for more details
**RETURNS**
The resized image.
**EXAMPLE**
>>> img.scale(200, 100) #scales the image to 200px x 100px
>>> img.scale(2.0) #enlarges the image to 2x its current size
.. Warning::
The two value scale command is deprecated. To set width and height
use the resize function.
:py:meth:`resize`
"""
w, h = width, height
if height == -1:
w = int(self.width * width)
h = int(self.height * width)
if( w > MAX_DIMENSION or h > MAX_DIMENSION or h < 1 or w < 1 ):
logger.warning("Holy Heck! You tried to make an image really big or impossibly small. I can't scale that")
return self
scaledArray = np.zeros((w,h,3),dtype='uint8')
retVal = cv2.resize(self.getNumpyCv2(), (w,h), interpolation = interpolation)
return Image(retVal, colorSpace=self._colorSpace,cv2image = True)
def resize(self, w=None,h=None):
"""
**SUMMARY**
This method resizes an image based on a width, a height, or both.
If either width or height is not provided the value is inferred by keeping the aspect ratio.
If both values are provided then the image is resized accordingly.
**PARAMETERS**
* *width* - The width of the output image in pixels.
* *height* - The height of the output image in pixels.
**RETURNS**
Returns a resized image, if the size is invalid a warning is issued and
None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> img2 = img.resize(w=1024) # h is guessed from w
>>> img3 = img.resize(h=1024) # w is guessed from h
>>> img4 = img.resize(w=200,h=100)
"""
retVal = None
if( w is None and h is None ):
logger.warning("Image.resize has no parameters. No operation is performed")
return None
elif( w is not None and h is None):
sfactor = float(w)/float(self.width)
h = int( sfactor*float(self.height) )
elif( w is None and h is not None):
sfactor = float(h)/float(self.height)
w = int( sfactor*float(self.width) )
if( w > MAX_DIMENSION or h > MAX_DIMENSION ):
logger.warning("Image.resize Holy Heck! You tried to make an image really big or impossibly small. I can't scale that")
return retVal
scaled_bitmap = cv.CreateImage((w, h), 8, 3)
cv.Resize(self.getBitmap(), scaled_bitmap)
return Image(scaled_bitmap, colorSpace=self._colorSpace)
def smooth(self, algorithm_name='gaussian', aperture=(3,3), sigma=0, spatial_sigma=0, grayscale=False, aperature=None):
"""
**SUMMARY**
Smooth the image, by default with the Gaussian blur. If desired,
additional algorithms and apertures can be specified. Optional parameters
are passed directly to OpenCV's cv.Smooth() function.
If grayscale is true the smoothing operation is only performed on a single channel
otherwise the operation is performed on each channel of the image.
for OpenCV versions >= 2.3.0 it is advisible to take a look at
- :py:meth:`bilateralFilter`
- :py:meth:`medianFilter`
- :py:meth:`blur`
- :py:meth:`gaussianBlur`
**PARAMETERS**
* *algorithm_name* - valid options are 'blur' or gaussian, 'bilateral', and 'median'.
* `Median Filter <http://en.wikipedia.org/wiki/Median_filter>`_
* `Gaussian Blur <http://en.wikipedia.org/wiki/Gaussian_blur>`_
* `Bilateral Filter <http://en.wikipedia.org/wiki/Bilateral_filter>`_
* *aperture* - A tuple for the aperture of the gaussian blur as an (x,y) tuple.
- Note there was rampant spelling mistakes in both smooth & sobel,
aperture is spelled as such, and not "aperature". This code is backwards
compatible.
.. Warning::
These must be odd numbers.
* *sigma* -
* *spatial_sigma* -
* *grayscale* - Return just the grayscale image.
**RETURNS**
The smoothed image.
**EXAMPLE**
>>> img = Image("Lenna")
>>> img2 = img.smooth()
>>> img3 = img.smooth('median')
**SEE ALSO**
:py:meth:`bilateralFilter`
:py:meth:`medianFilter`
:py:meth:`blur`
"""
# see comment on argument documentation (spelling error)
aperture = aperature if aperature else aperture
if is_tuple(aperture):
win_x, win_y = aperture
if win_x <= 0 or win_y <= 0 or win_x % 2 == 0 or win_y % 2 == 0:
logger.warning("The aperture (x,y) must be odd number and greater than 0.")
return None
else:
raise ValueError("Please provide a tuple to aperture, got: %s" % type(aperture))
#gauss and blur can work in-place, others need a buffer frame
#use a string to ID rather than the openCV constant
if algorithm_name == "blur":
algorithm = cv.CV_BLUR
elif algorithm_name == "bilateral":
algorithm = cv.CV_BILATERAL
win_y = win_x #aperture must be square
elif algorithm_name == "median":
algorithm = cv.CV_MEDIAN
win_y = win_x #aperture must be square
else:
algorithm = cv.CV_GAUSSIAN #default algorithm is gaussian
if grayscale:
newimg = self.getEmpty(1)
cv.Smooth(self._getGrayscaleBitmap(), newimg, algorithm, win_x, win_y, sigma, spatial_sigma)
else:
newimg = self.getEmpty(3)
r = self.getEmpty(1)
g = self.getEmpty(1)
b = self.getEmpty(1)
ro = self.getEmpty(1)
go = self.getEmpty(1)
bo = self.getEmpty(1)
cv.Split(self.getBitmap(), b, g, r, None)
cv.Smooth(r, ro, algorithm, win_x, win_y, sigma, spatial_sigma)
cv.Smooth(g, go, algorithm, win_x, win_y, sigma, spatial_sigma)
cv.Smooth(b, bo, algorithm, win_x, win_y, sigma, spatial_sigma)
cv.Merge(bo,go,ro, None, newimg)
return Image(newimg, colorSpace=self._colorSpace)
def medianFilter(self, window='',grayscale=False):
"""
**SUMMARY**
Smooths the image, with the median filter. Performs a median filtering operation to denoise/despeckle the image.
The optional parameter is the window size.
see : http://en.wikipedia.org/wiki/Median_filter
**Parameters**
* *window* - should be in the form a tuple (win_x,win_y). Where win_x should be equal to win_y. By default it is set to 3x3, i.e window = (3x3).
**Note**
win_x and win_y should be greater than zero, a odd number and equal.
For OpenCV versions <= 2.3.0
this acts as Convience function derived from the :py:meth:`smooth` method. Which internally calls cv.Smooth
For OpenCV versions >= 2.3.0
cv2.medianBlur function is called.
"""
try:
import cv2
new_version = True
except :
new_version = False
pass
if is_tuple(window):
win_x, win_y = window
if ( win_x>=0 and win_y>=0 and win_x%2==1 and win_y%2==1 ) :
if win_x != win_y :
win_x=win_y
else :
logger.warning("The aperture (win_x,win_y) must be odd number and greater than 0.")
return None
elif( is_number(window) ):
win_x = window
else :
win_x = 3 #set the default aperture window size (3x3)
if ( not new_version ) :
grayscale_ = grayscale
return self.smooth(algorithm_name='median', aperture=(win_x,win_y),grayscale=grayscale_)
else :
if (grayscale) :
img_medianBlur = cv2.medianBlur(self.getGrayNumpy(),win_x)
return Image(img_medianBlur, colorSpace=ColorSpace.GRAY)
else :
img_medianBlur = cv2.medianBlur(self.getNumpy()[:,:, ::-1].transpose([1,0,2]),win_x)
img_medianBlur = img_medianBlur[:,:, ::-1].transpose([1,0,2])
return Image(img_medianBlur, colorSpace=self._colorSpace)
def bilateralFilter(self, diameter=5,sigmaColor=10, sigmaSpace=10,grayscale=False):
"""
**SUMMARY**
Smooths the image, using bilateral filtering. Potential of bilateral filtering is for the removal of texture.
The optional parameter are diameter, sigmaColor, sigmaSpace.
Bilateral Filter
see : http://en.wikipedia.org/wiki/Bilateral_filter
see : http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html
**Parameters**
* *diameter* - A tuple for the window of the form (diameter,diameter). By default window = (3x3). ( for OpenCV versions <= 2.3.0)
- Diameter of each pixel neighborhood that is used during filtering. ( for OpenCV versions >= 2.3.0)
* *sigmaColor* - Filter the specified value in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in larger areas of semi-equal color.
* *sigmaSpace* - Filter the specified value in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough
**NOTE**
For OpenCV versions <= 2.3.0
-- this acts as Convience function derived from the :py:meth:`smooth` method. Which internally calls cv.Smooth.
-- where aperture(window) is (diameter,diameter)
-- sigmaColor and sigmanSpace become obsolete
For OpenCV versions higher than 2.3.0. i.e >= 2.3.0
-- cv.bilateralFilter function is called
-- If the sigmaColor and sigmaSpace values are small (< 10), the filter will not have much effect, whereas if they are large (> 150), they will have a very strong effect, making the image look 'cartoonish'
-- It is recommended to use diamter=5 for real time applications, and perhaps diameter=9 for offile applications that needs heavy noise filtering.
"""
try:
import cv2
new_version = True
except :
new_version = False
pass
if is_tuple(diameter):
win_x, win_y = diameter
if ( win_x>=0 and win_y>=0 and win_x%2==1 and win_y%2==1 ) :
if win_x != win_y :
diameter = (win_x, win_y)
else :
logger.warning("The aperture (win_x,win_y) must be odd number and greater than 0.")
return None
elif( is_number(diameter) ):
pass
else :
win_x = 3 #set the default aperture window size (3x3)
diameter = (win_x,win_x)
if ( not new_version ) :
grayscale_ = grayscale
if( is_number(diameter) ) :
diameter = (diameter,diameter)
return self.smooth(algorithm_name='bilateral', aperture=diameter,grayscale=grayscale_)
else :
if (grayscale) :
img_bilateral = cv2.bilateralFilter(self.getGrayNumpy(),diameter,sigmaColor, sigmaSpace)
return Image(img_bilateral, colorSpace=ColorSpace.GRAY)
else :
img_bilateral = cv2.bilateralFilter(self.getNumpy()[:,:, ::-1].transpose([1,0,2]),diameter,sigmaColor, sigmaSpace)
img_bilateral = img_bilateral[:,:, ::-1].transpose([1,0,2])
return Image(img_bilateral,colorSpace=self._colorSpace)
def blur(self, window = '', grayscale=False):
"""
**SUMMARY**
Smoothes an image using the normalized box filter.
The optional parameter is window.
see : http://en.wikipedia.org/wiki/Blur
**Parameters**
* *window* - should be in the form a tuple (win_x,win_y).
- By default it is set to 3x3, i.e window = (3x3).
**NOTE**
For OpenCV versions <= 2.3.0
-- this acts as Convience function derived from the :py:meth:`smooth` method. Which internally calls cv.Smooth
For OpenCV versions higher than 2.3.0. i.e >= 2.3.0
-- cv.blur function is called
"""
try:
import cv2
new_version = True
except :
new_version = False
pass
if is_tuple(window):
win_x, win_y = window
if ( win_x<=0 or win_y<=0 ) :
logger.warning("win_x and win_y should be greater than 0.")
return None
elif( is_number(window) ):
window = (window,window)
else :
window = (3,3)
if ( not new_version ) :
grayscale_ = grayscale
return self.smooth(algorithm_name='blur', aperture=window, grayscale=grayscale_)
else :
if grayscale:
img_blur = cv2.blur(self.getGrayNumpy(),window)
return Image(img_blur,colorSpace=ColorSpace.GRAY)
else :
img_blur = cv2.blur(self.getNumpy()[:,:, ::-1].transpose([1,0,2]),window)
img_blur = img_blur[:,:, ::-1].transpose([1,0,2])
return Image(img_blur,colorSpace=self._colorSpace)
def gaussianBlur(self, window = '', sigmaX=0 , sigmaY=0 ,grayscale=False):
"""
**SUMMARY**
Smoothes an image, typically used to reduce image noise and reduce detail.
The optional parameter is window.
see : http://en.wikipedia.org/wiki/Gaussian_blur
**Parameters**
* *window* - should be in the form a tuple (win_x,win_y). Where win_x and win_y should be positive and odd.
- By default it is set to 3x3, i.e window = (3x3).
* *sigmaX* - Gaussian kernel standard deviation in X direction.
* *sigmaY* - Gaussian kernel standard deviation in Y direction.
* *grayscale* - If true, the effect is applied on grayscale images.
**NOTE**
For OpenCV versions <= 2.3.0
-- this acts as Convience function derived from the :py:meth:`smooth` method. Which internally calls cv.Smooth
For OpenCV versions higher than 2.3.0. i.e >= 2.3.0
-- cv.GaussianBlur function is called
"""
try:
import cv2
ver = cv2.__version__
new_version = False
#For OpenCV versions till 2.4.0, cv2.__versions__ are of the form "$Rev: 4557 $"
if not ver.startswith('$Rev:'):
if int(ver.replace('.','0'))>=20300 :
new_version = True
except :
new_version = False
pass
if is_tuple(window):
win_x, win_y = window
if ( win_x>=0 and win_y>=0 and win_x%2==1 and win_y%2==1 ) :
pass
else :
logger.warning("The aperture (win_x,win_y) must be odd number and greater than 0.")
return None
elif (is_number(window)):
window = (window, window)
else:
window = (3,3) #set the default aperture window size (3x3)
if (not new_version):
grayscale_ = grayscale
return self.smooth(algorithm_name='blur', aperture=window, grayscale=grayscale_)
else:
image_gauss = cv2.GaussianBlur(self.getNumpyCv2(), window, sigmaX, sigmaY=sigmaY)
if grayscale:
return Image(image_gauss, colorSpace=ColorSpace.GRAY, cv2image=True)
else:
return Image(image_gauss, colorSpace=self._colorSpace, cv2image=True)
def invert(self):
"""
**SUMMARY**
Invert (negative) the image note that this can also be done with the
unary minus (-) operator. For binary image this turns black into white and white into black (i.e. white is the new black).
**RETURNS**
The opposite of the current image.
**EXAMPLE**
>>> img = Image("polar_bear_in_the_snow.png")
>>> img.invert().save("black_bear_at_night.png")
**SEE ALSO**
:py:meth:`binarize`
"""
return -self
def grayscale(self):
"""
**SUMMARY**
This method returns a gray scale version of the image. It makes everything look like an old movie.
**RETURNS**
A grayscale SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> img.grayscale().binarize().show()
**SEE ALSO**
:py:meth:`binarize`
"""
return Image(self._getGrayscaleBitmap(), colorSpace = ColorSpace.GRAY)
def flipHorizontal(self):
"""
**SUMMARY**
Horizontally mirror an image.
.. Warning::
Note that flip does not mean rotate 180 degrees! The two are different.
**RETURNS**
The flipped SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> upsidedown = img.flipHorizontal()
**SEE ALSO**
:py:meth:`flipVertical`
:py:meth:`rotate`
"""
newimg = self.getEmpty()
cv.Flip(self.getBitmap(), newimg, 1)
return Image(newimg, colorSpace=self._colorSpace)
def flipVertical(self):
"""
**SUMMARY**
Vertically mirror an image.
.. Warning::
Note that flip does not mean rotate 180 degrees! The two are different.
**RETURNS**
The flipped SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> upsidedown = img.flipHorizontal()
**SEE ALSO**
:py:meth:`rotate`
:py:meth:`flipHorizontal`
"""
newimg = self.getEmpty()
cv.Flip(self.getBitmap(), newimg, 0)
return Image(newimg, colorSpace=self._colorSpace)
def stretch(self, thresh_low = 0, thresh_high = 255):
"""
**SUMMARY**
The stretch filter works on a greyscale image, if the image
is color, it returns a greyscale image. The filter works by
taking in a lower and upper threshold. Anything below the lower
threshold is pushed to black (0) and anything above the upper
threshold is pushed to white (255)
**PARAMETERS**
* *thresh_low* - The lower threshold for the stretch operation.
This should be a value between 0 and 255.
* *thresh_high* - The upper threshold for the stretch operation.
This should be a value between 0 and 255.
**RETURNS**
A gray scale version of the image with the appropriate histogram stretching.
**EXAMPLE**
>>> img = Image("orson_welles.jpg")
>>> img2 = img.stretch(56.200)
>>> img2.show()
**NOTES**
TODO - make this work on RGB images with thresholds for each channel.
**SEE ALSO**
:py:meth:`binarize`
:py:meth:`equalize`
"""
try:
newimg = self.getEmpty(1)
cv.Threshold(self._getGrayscaleBitmap(), newimg, thresh_low, 255, cv.CV_THRESH_TOZERO)
cv.Not(newimg, newimg)
cv.Threshold(newimg, newimg, 255 - thresh_high, 255, cv.CV_THRESH_TOZERO)
cv.Not(newimg, newimg)
return Image(newimg)
except:
return None
def gammaCorrect(self, gamma = 1):
"""
**DESCRIPTION**
Transforms an image according to Gamma Correction also known as
Power Law Transform.
**PARAMETERS**
* *gamma* - A non-negative real number.
**RETURNS**
A Gamma corrected image.
**EXAMPLE**
>>> img = Image('SimpleCV/sampleimages/family_watching_television_1958.jpg')
>>> img.show()
>>> img.gammaCorrect(1.5).show()
>>> img.gammaCorrect(0.7).show()
"""
if gamma < 0:
return "Gamma should be a non-negative real number"
scale = 255.0
src = self.getNumpy()
dst = (((1.0/scale)*src)**gamma)*scale
return Image(dst)
def binarize(self, thresh = -1, maxv = 255, blocksize = 0, p = 5):
"""
**SUMMARY**
Do a binary threshold the image, changing all values below thresh to maxv
and all above to black. If a color tuple is provided, each color channel
is thresholded separately.
If threshold is -1 (default), an adaptive method (OTSU's method) is used.
If then a blocksize is specified, a moving average over each region of block*block
pixels a threshold is applied where threshold = local_mean - p.
**PARAMETERS**
* *thresh* - the threshold as an integer or an (r,g,b) tuple , where pixels below (darker) than thresh are set to to max value,
and all values above this value are set to black. If this parameter is -1 we use Otsu's method.
* *maxv* - The maximum value for pixels below the threshold. Ordinarily this should be 255 (white)
* *blocksize* - the size of the block used in the adaptive binarize operation.
.. Warning::
This parameter must be an odd number.
* *p* - The difference from the local mean to use for thresholding in Otsu's method.
**RETURNS**
A binary (two colors, usually black and white) SimpleCV image. This works great for the findBlobs
family of functions.
**EXAMPLE**
Example of a vanila threshold versus an adaptive threshold:
>>> img = Image("orson_welles.jpg")
>>> b1 = img.binarize(128)
>>> b2 = img.binarize(blocksize=11,p=7)
>>> b3 = b1.sideBySide(b2)
>>> b3.show()
**NOTES**
`Otsu's Method Description<http://en.wikipedia.org/wiki/Otsu's_method>`
**SEE ALSO**
:py:meth:`threshold`
:py:meth:`findBlobs`
:py:meth:`invert`
:py:meth:`dilate`
:py:meth:`erode`
"""
if is_tuple(thresh):
r = self.getEmpty(1)
g = self.getEmpty(1)
b = self.getEmpty(1)
cv.Split(self.getBitmap(), b, g, r, None)
cv.Threshold(r, r, thresh[0], maxv, cv.CV_THRESH_BINARY_INV)
cv.Threshold(g, g, thresh[1], maxv, cv.CV_THRESH_BINARY_INV)
cv.Threshold(b, b, thresh[2], maxv, cv.CV_THRESH_BINARY_INV)
cv.Add(r, g, r)
cv.Add(r, b, r)
return Image(r, colorSpace=self._colorSpace)
elif thresh == -1:
newbitmap = self.getEmpty(1)
if blocksize:
cv.AdaptiveThreshold(self._getGrayscaleBitmap(), newbitmap, maxv,
cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C, cv.CV_THRESH_BINARY_INV, blocksize, p)
else:
cv.Threshold(self._getGrayscaleBitmap(), newbitmap, thresh, float(maxv), cv.CV_THRESH_BINARY_INV + cv.CV_THRESH_OTSU)
return Image(newbitmap, colorSpace=self._colorSpace)
else:
newbitmap = self.getEmpty(1)
#desaturate the image, and apply the new threshold
cv.Threshold(self._getGrayscaleBitmap(), newbitmap, thresh, float(maxv), cv.CV_THRESH_BINARY_INV)
return Image(newbitmap, colorSpace=self._colorSpace)
def meanColor(self, colorSpace = None):
"""
**SUMMARY**
This method finds the average color of all the pixels in the image and displays tuple in the colorspace specfied by the user.
If no colorspace is specified , (B,G,R) colorspace is taken as default.
**RETURNS**
A tuple of the average image values. Tuples are in the channel order. *For most images this means the results are (B,G,R).*
**EXAMPLE**
>>> img = Image('lenna')
>>> colors = img.meanColor() # returns tuple in Image's colorspace format.
>>> colors = img.meanColor('BGR') # returns tuple in (B,G,R) format.
>>> colors = img.meanColor('RGB') # returns tuple in (R,G,B) format.
>>> colors = img.meanColor('HSV') # returns tuple in (H,S,V) format.
>>> colors = img.meanColor('XYZ') # returns tuple in (X,Y,Z) format.
>>> colors = img.meanColor('Gray') # returns float of mean intensity.
>>> colors = img.meanColor('YCrCb') # returns tuple in (Y,Cr,Cb) format.
>>> colors = img.meanColor('HLS') # returns tuple in (H,L,S) format.
"""
if colorSpace == None:
return tuple(cv.Avg(self.getBitmap())[0:3])
elif colorSpace == 'BGR':
return tuple(cv.Avg(self.toBGR().getBitmap())[0:3])
elif colorSpace == 'RGB':
return tuple(cv.Avg(self.toRGB().getBitmap())[0:3])
elif colorSpace == 'HSV':
return tuple(cv.Avg(self.toHSV().getBitmap())[0:3])
elif colorSpace == 'XYZ':
return tuple(cv.Avg(self.toXYZ().getBitmap())[0:3])
elif colorSpace == 'Gray':
return (cv.Avg(self._getGrayscaleBitmap())[0])
elif colorSpace == 'YCrCb':
return tuple(cv.Avg(self.toYCrCb().getBitmap())[0:3])
elif colorSpace == 'HLS':
return tuple(cv.Avg(self.toHLS().getBitmap())[0:3])
else:
logger.warning("Image.meanColor: There is no supported conversion to the specified colorspace. Use one of these as argument: 'BGR' , 'RGB' , 'HSV' , 'Gray' , 'XYZ' , 'YCrCb' , 'HLS' .")
return None
def findCorners(self, maxnum = 50, minquality = 0.04, mindistance = 1.0):
"""
**SUMMARY**
This will find corner Feature objects and return them as a FeatureSet
strongest corners first. The parameters give the number of corners to look
for, the minimum quality of the corner feature, and the minimum distance
between corners.
**PARAMETERS**
* *maxnum* - The maximum number of corners to return.
* *minquality* - The minimum quality metric. This shoudl be a number between zero and one.
* *mindistance* - The minimum distance, in pixels, between successive corners.
**RETURNS**
A featureset of :py:class:`Corner` features or None if no corners are found.
**EXAMPLE**
Standard Test:
>>> img = Image("sampleimages/simplecv.png")
>>> corners = img.findCorners()
>>> if corners: True
True
Validation Test:
>>> img = Image("sampleimages/black.png")
>>> corners = img.findCorners()
>>> if not corners: True
True
**SEE ALSO**
:py:class:`Corner`
:py:meth:`findKeypoints`
"""
#initialize buffer frames
eig_image = cv.CreateImage(cv.GetSize(self.getBitmap()), cv.IPL_DEPTH_32F, 1)
temp_image = cv.CreateImage(cv.GetSize(self.getBitmap()), cv.IPL_DEPTH_32F, 1)
corner_coordinates = cv.GoodFeaturesToTrack(self._getGrayscaleBitmap(), eig_image, temp_image, maxnum, minquality, mindistance, None)
corner_features = []
for (x, y) in corner_coordinates:
corner_features.append(Corner(self, x, y))
return FeatureSet(corner_features)
def findBlobs(self, threshval = -1, minsize=10, maxsize=0, threshblocksize=0, threshconstant=5,appx_level=3):
"""
**SUMMARY**
Find blobs will look for continuous
light regions and return them as Blob features in a FeatureSet. Parameters
specify the binarize filter threshold value, and minimum and maximum size for blobs.
If a threshold value is -1, it will use an adaptive threshold. See binarize() for
more information about thresholding. The threshblocksize and threshconstant
parameters are only used for adaptive threshold.
**PARAMETERS**
* *threshval* - the threshold as an integer or an (r,g,b) tuple , where pixels below (darker) than thresh are set to to max value,
and all values above this value are set to black. If this parameter is -1 we use Otsu's method.
* *minsize* - the minimum size of the blobs, in pixels, of the returned blobs. This helps to filter out noise.
* *maxsize* - the maximim size of the blobs, in pixels, of the returned blobs.
* *threshblocksize* - the size of the block used in the adaptive binarize operation. *TODO - make this match binarize*
* *appx_level* - The blob approximation level - an integer for the maximum distance between the true edge and the
approximation edge - lower numbers yield better approximation.
.. warning::
This parameter must be an odd number.
* *threshconstant* - The difference from the local mean to use for thresholding in Otsu's method. *TODO - make this match binarize*
**RETURNS**
Returns a featureset (basically a list) of :py:class:`blob` features. If no blobs are found this method returns None.
**EXAMPLE**
>>> img = Image("lenna")
>>> fs = img.findBlobs()
>>> if( fs is not None ):
>>> fs.draw()
**NOTES**
.. Warning::
For blobs that live right on the edge of the image OpenCV reports the position and width
height as being one over for the true position. E.g. if a blob is at (0,0) OpenCV reports
its position as (1,1). Likewise the width and height for the other corners is reported as
being one less than the width and height. This is a known bug.
**SEE ALSO**
:py:meth:`threshold`
:py:meth:`binarize`
:py:meth:`invert`
:py:meth:`dilate`
:py:meth:`erode`
:py:meth:`findBlobsFromPalette`
:py:meth:`smartFindBlobs`
"""
if (maxsize == 0):
maxsize = self.width * self.height
#create a single channel image, thresholded to parameters
blobmaker = BlobMaker()
blobs = blobmaker.extractFromBinary(self.binarize(threshval, 255, threshblocksize, threshconstant).invert(),
self, minsize = minsize, maxsize = maxsize,appx_level=appx_level)
if not len(blobs):
return None
return FeatureSet(blobs).sortArea()
def findSkintoneBlobs(self, minsize=10, maxsize=0,dilate_iter=1):
"""
**SUMMARY**
Find Skintone blobs will look for continuous
regions of Skintone in a color image and return them as Blob features in a FeatureSet.
Parameters specify the binarize filter threshold value, and minimum and maximum size for
blobs. If a threshold value is -1, it will use an adaptive threshold. See binarize() for
more information about thresholding. The threshblocksize and threshconstant
parameters are only used for adaptive threshold.
**PARAMETERS**
* *minsize* - the minimum size of the blobs, in pixels, of the returned blobs. This helps to filter out noise.
* *maxsize* - the maximim size of the blobs, in pixels, of the returned blobs.
* *dilate_iter* - the number of times to run the dilation operation.
**RETURNS**
Returns a featureset (basically a list) of :py:class:`blob` features. If no blobs are found this method returns None.
**EXAMPLE**
>>> img = Image("lenna")
>>> fs = img.findSkintoneBlobs()
>>> if( fs is not None ):
>>> fs.draw()
**NOTES**
It will be really awesome for making UI type stuff, where you want to track a hand or a face.
**SEE ALSO**
:py:meth:`threshold`
:py:meth:`binarize`
:py:meth:`invert`
:py:meth:`dilate`
:py:meth:`erode`
:py:meth:`findBlobsFromPalette`
:py:meth:`smartFindBlobs`
"""
if (maxsize == 0):
maxsize = self.width * self.height
mask = self.getSkintoneMask(dilate_iter)
blobmaker = BlobMaker()
blobs = blobmaker.extractFromBinary(mask, self, minsize = minsize, maxsize = maxsize)
if not len(blobs):
return None
return FeatureSet(blobs).sortArea()
def getSkintoneMask(self, dilate_iter=0):
"""
**SUMMARY**
Find Skintone mask will look for continuous
regions of Skintone in a color image and return a binary mask where the white pixels denote Skintone region.
**PARAMETERS**
* *dilate_iter* - the number of times to run the dilation operation.
**RETURNS**
Returns a binary mask.
**EXAMPLE**
>>> img = Image("lenna")
>>> mask = img.findSkintoneMask()
>>> mask.show()
"""
if( self._colorSpace != ColorSpace.YCrCb ):
YCrCb = self.toYCrCb()
else:
YCrCb = self
Y = np.ones((256,1),dtype=uint8)*0
Y[5:] = 255
Cr = np.ones((256,1),dtype=uint8)*0
Cr[140:180] = 255
Cb = np.ones((256,1),dtype=uint8)*0
Cb[77:135] = 255
Y_img = YCrCb.getEmpty(1)
Cr_img = YCrCb.getEmpty(1)
Cb_img = YCrCb.getEmpty(1)
cv.Split(YCrCb.getBitmap(),Y_img,Cr_img,Cb_img,None)
cv.LUT(Y_img,Y_img,cv.fromarray(Y))
cv.LUT(Cr_img,Cr_img,cv.fromarray(Cr))
cv.LUT(Cb_img,Cb_img,cv.fromarray(Cb))
temp = self.getEmpty()
cv.Merge(Y_img,Cr_img,Cb_img,None,temp)
mask=Image(temp,colorSpace = ColorSpace.YCrCb)
mask = mask.binarize((128,128,128))
mask = mask.toRGB().binarize()
mask.dilate(dilate_iter)
return mask
#this code is based on code that's based on code from
#http://blog.jozilla.net/2008/06/27/fun-with-python-opencv-and-face-detection/
def findHaarFeatures(self, cascade, scale_factor=1.2, min_neighbors=2, use_canny=cv.CV_HAAR_DO_CANNY_PRUNING, min_size=(20,20), max_size=(1000,1000)):
"""
**SUMMARY**
A Haar like feature cascase is a really robust way of finding the location
of a known object. This technique works really well for a few specific applications
like face, pedestrian, and vehicle detection. It is worth noting that this
approach **IS NOT A MAGIC BULLET** . Creating a cascade file requires a large
number of images that have been sorted by a human.vIf you want to find Haar
Features (useful for face detection among other purposes) this will return
Haar feature objects in a FeatureSet.
For more information, consult the cv.HaarDetectObjects documentation.
To see what features are available run img.listHaarFeatures() or you can
provide your own haarcascade file if you have one available.
Note that the cascade parameter can be either a filename, or a HaarCascade
loaded with cv.Load(), or a SimpleCV HaarCascade object.
**PARAMETERS**
* *cascade* - The Haar Cascade file, this can be either the path to a cascade
file or a HaarCascased SimpleCV object that has already been
loaded.
* *scale_factor* - The scaling factor for subsequent rounds of the Haar cascade
(default 1.2) in terms of a percentage (i.e. 1.2 = 20% increase in size)
* *min_neighbors* - The minimum number of rectangles that makes up an object. Ususally
detected faces are clustered around the face, this is the number
of detections in a cluster that we need for detection. Higher
values here should reduce false positives and decrease false negatives.
* *use-canny* - Whether or not to use Canny pruning to reject areas with too many edges
(default yes, set to 0 to disable)
* *min_size* - Minimum window size. By default, it is set to the size
of samples the classifier has been trained on ((20,20) for face detection)
* *max_size* - Maximum window size. By default, it is set to the size
of samples the classifier has been trained on ((1000,1000) for face detection)
**RETURNS**
A feature set of HaarFeatures
**EXAMPLE**
>>> faces = HaarCascade("./SimpleCV/Features/HaarCascades/face.xml","myFaces")
>>> cam = Camera()
>>> while True:
>>> f = cam.getImage().findHaarFeatures(faces)
>>> if( f is not None ):
>>> f.show()
**NOTES**
OpenCV Docs:
- http://opencv.willowgarage.com/documentation/python/objdetect_cascade_classification.html
Wikipedia:
- http://en.wikipedia.org/wiki/Viola-Jones_object_detection_framework
- http://en.wikipedia.org/wiki/Haar-like_features
The video on this pages shows how Haar features and cascades work to located faces:
- http://dismagazine.com/dystopia/evolved-lifestyles/8115/anti-surveillance-how-to-hide-from-machines/
"""
storage = cv.CreateMemStorage(0)
#lovely. This segfaults if not present
from SimpleCV.Features.HaarCascade import HaarCascade
if isinstance(cascade, basestring):
cascade = HaarCascade(cascade)
if not cascade.getCascade():
return None
elif isinstance(cascade,HaarCascade):
pass
else:
logger.warning('Could not initialize HaarCascade. Enter Valid cascade value.')
# added all of the arguments from the opencv docs arglist
try:
import cv2
haarClassify = cv2.CascadeClassifier(cascade.getFHandle())
objects = haarClassify.detectMultiScale(self.getGrayNumpyCv2(),scaleFactor=scale_factor,minNeighbors=min_neighbors,minSize=min_size,flags=use_canny)
cv2flag = True
except ImportError:
objects = cv.HaarDetectObjects(self._getEqualizedGrayscaleBitmap(),
cascade.getCascade(), storage, scale_factor, min_neighbors,
use_canny, min_size)
cv2flag = False
if objects is not None:
return FeatureSet([HaarFeature(self, o, cascade,cv2flag) for o in objects])
return None
def drawCircle(self, ctr, rad, color = (0, 0, 0), thickness = 1):
"""
**SUMMARY**
Draw a circle on the image.
**PARAMETERS**
* *ctr* - The center of the circle as an (x,y) tuple.
* *rad* - The radius of the circle in pixels
* *color* - A color tuple (default black)
* *thickness* - The thickness of the circle, -1 means filled in.
**RETURNS**
.. Warning::
This is an inline operation. Nothing is returned, but a circle is drawn on the images's
drawing layer.
**EXAMPLE**
>>> img = Image("lenna")
>>> img.drawCircle((img.width/2,img.height/2),r=50,color=Colors.RED,width=3)
>>> img.show()
**NOTES**
.. Warning::
Note that this function is depricated, try to use DrawingLayer.circle() instead.
**SEE ALSO**
:py:meth:`drawLine`
:py:meth:`drawText`
:py:meth:`dl`
:py:meth:`drawRectangle`
:py:class:`DrawingLayer`
"""
if( thickness < 0):
self.getDrawingLayer().circle((int(ctr[0]), int(ctr[1])), int(rad), color, int(thickness),filled=True)
else:
self.getDrawingLayer().circle((int(ctr[0]), int(ctr[1])), int(rad), color, int(thickness))
def drawLine(self, pt1, pt2, color = (0, 0, 0), thickness = 1):
"""
**SUMMARY**
Draw a line on the image.
**PARAMETERS**
* *pt1* - the first point for the line (tuple).
* *pt2* - the second point on the line (tuple).
* *color* - a color tuple (default black).
* *thickness* the thickness of the line in pixels.
**RETURNS**
.. Warning::
This is an inline operation. Nothing is returned, but a circle is drawn on the images's
drawing layer.
**EXAMPLE**
>>> img = Image("lenna")
>>> img.drawLine((0,0),(img.width,img.height),color=Color.RED,thickness=3)
>>> img.show()
**NOTES**
.. Warning::
Note that this function is depricated, try to use DrawingLayer.line() instead.
**SEE ALSO**
:py:meth:`drawText`
:py:meth:`dl`
:py:meth:`drawCircle`
:py:meth:`drawRectangle`
"""
pt1 = (int(pt1[0]), int(pt1[1]))
pt2 = (int(pt2[0]), int(pt2[1]))
self.getDrawingLayer().line(pt1, pt2, color, thickness)
def size(self):
"""
**SUMMARY**
Returns a tuple that lists the width and height of the image.
**RETURNS**
The width and height as a tuple.
"""
if self.width and self.height:
return cv.GetSize(self.getBitmap())
else:
return (0, 0)
def isEmpty(self):
"""
**SUMMARY**
Checks if the image is empty by checking its width and height.
**RETURNS**
True if the image's size is (0, 0), False for any other size.
"""
return self.size() == (0, 0)
def split(self, cols, rows):
"""
**SUMMARY**
This method can be used to brak and image into a series of image chunks.
Given number of cols and rows, splits the image into a cols x rows 2d array
of cropped images
**PARAMETERS**
* *rows* - an integer number of rows.
* *cols* - an integer number of cols.
**RETURNS**
A list of SimpleCV images.
**EXAMPLE**
>>> img = Image("lenna")
>>> quadrant =img.split(2,2)
>>> for f in quadrant:
>>> f.show()
>>> time.sleep(1)
**NOTES**
TODO: This should return and ImageList
"""
crops = []
wratio = self.width / cols
hratio = self.height / rows
for i in range(rows):
row = []
for j in range(cols):
row.append(self.crop(j * wratio, i * hratio, wratio, hratio))
crops.append(row)
return crops
def splitChannels(self, grayscale = True):
"""
**SUMMARY**
Split the channels of an image into RGB (not the default BGR)
single parameter is whether to return the channels as grey images (default)
or to return them as tinted color image
**PARAMETERS**
* *grayscale* - If this is true we return three grayscale images, one per channel.
if it is False return tinted images.
**RETURNS**
A tuple of of 3 image objects.
**EXAMPLE**
>>> img = Image("lenna")
>>> data = img.splitChannels()
>>> for d in data:
>>> d.show()
>>> time.sleep(1)
**SEE ALSO**
:py:meth:`mergeChannels`
"""
r = self.getEmpty(1)
g = self.getEmpty(1)
b = self.getEmpty(1)
cv.Split(self.getBitmap(), b, g, r, None)
red = self.getEmpty()
green = self.getEmpty()
blue = self.getEmpty()
if (grayscale):
cv.Merge(r, r, r, None, red)
cv.Merge(g, g, g, None, green)
cv.Merge(b, b, b, None, blue)
else:
cv.Merge(None, None, r, None, red)
cv.Merge(None, g, None, None, green)
cv.Merge(b, None, None, None, blue)
return (Image(red), Image(green), Image(blue))
def mergeChannels(self,r=None,g=None,b=None):
"""
**SUMMARY**
Merge channels is the oposite of splitChannels. The image takes one image for each
of the R,G,B channels and then recombines them into a single image. Optionally any of these
channels can be None.
**PARAMETERS**
* *r* - The r or last channel of the result SimpleCV Image.
* *g* - The g or center channel of the result SimpleCV Image.
* *b* - The b or first channel of the result SimpleCV Image.
**RETURNS**
A SimpleCV Image.
**EXAMPLE**
>>> img = Image("lenna")
>>> [r,g,b] = img.splitChannels()
>>> r = r.binarize()
>>> g = g.binarize()
>>> b = b.binarize()
>>> result = img.mergeChannels(r,g,b)
>>> result.show()
**SEE ALSO**
:py:meth:`splitChannels`
"""
if( r is None and g is None and b is None ):
logger.warning("ImageClass.mergeChannels - we need at least one valid channel")
return None
if( r is None ):
r = self.getEmpty(1)
cv.Zero(r);
else:
rt = r.getEmpty(1)
cv.Split(r.getBitmap(),rt,rt,rt,None)
r = rt
if( g is None ):
g = self.getEmpty(1)
cv.Zero(g);
else:
gt = g.getEmpty(1)
cv.Split(g.getBitmap(),gt,gt,gt,None)
g = gt
if( b is None ):
b = self.getEmpty(1)
cv.Zero(b);
else:
bt = b.getEmpty(1)
cv.Split(b.getBitmap(),bt,bt,bt,None)
b = bt
retVal = self.getEmpty()
cv.Merge(b,g,r,None,retVal)
return Image(retVal)
def applyHLSCurve(self, hCurve, lCurve, sCurve):
"""
**SUMMARY**
Apply a color correction curve in HSL space. This method can be used
to change values for each channel. The curves are :py:class:`ColorCurve` class objects.
**PARAMETERS**
* *hCurve* - the hue ColorCurve object.
* *lCurve* - the lightnes / value ColorCurve object.
* *sCurve* - the saturation ColorCurve object
**RETURNS**
A SimpleCV Image
**EXAMPLE**
>>> img = Image("lenna")
>>> hc = ColorCurve([[0,0], [100, 120], [180, 230], [255, 255]])
>>> lc = ColorCurve([[0,0], [90, 120], [180, 230], [255, 255]])
>>> sc = ColorCurve([[0,0], [70, 110], [180, 230], [240, 255]])
>>> img2 = img.applyHLSCurve(hc,lc,sc)
**SEE ALSO**
:py:class:`ColorCurve`
:py:meth:`applyRGBCurve`
"""
#TODO CHECK ROI
#TODO CHECK CURVE SIZE
#TODO CHECK COLORSPACE
#TODO CHECK CURVE SIZE
temp = cv.CreateImage(self.size(), 8, 3)
#Move to HLS space
cv.CvtColor(self._bitmap, temp, cv.CV_RGB2HLS)
tempMat = cv.GetMat(temp) #convert the bitmap to a matrix
#now apply the color curve correction
tempMat = np.array(self.getMatrix()).copy()
tempMat[:, :, 0] = np.take(hCurve.mCurve, tempMat[:, :, 0])
tempMat[:, :, 1] = np.take(sCurve.mCurve, tempMat[:, :, 1])
tempMat[:, :, 2] = np.take(lCurve.mCurve, tempMat[:, :, 2])
#Now we jimmy the np array into a cvMat
image = cv.CreateImageHeader((tempMat.shape[1], tempMat.shape[0]), cv.IPL_DEPTH_8U, 3)
cv.SetData(image, tempMat.tostring(), tempMat.dtype.itemsize * 3 * tempMat.shape[1])
cv.CvtColor(image, image, cv.CV_HLS2RGB)
return Image(image, colorSpace=self._colorSpace)
def applyRGBCurve(self, rCurve, gCurve, bCurve):
"""
**SUMMARY**
Apply a color correction curve in RGB space. This method can be used
to change values for each channel. The curves are :py:class:`ColorCurve` class objects.
**PARAMETERS**
* *rCurve* - the red ColorCurve object, or appropriately formatted list
* *gCurve* - the green ColorCurve object, or appropriately formatted list
* *bCurve* - the blue ColorCurve object, or appropriately formatted list
**RETURNS**
A SimpleCV Image
**EXAMPLE**
>>> img = Image("lenna")
>>> rc = ColorCurve([[0,0], [100, 120], [180, 230], [255, 255]])
>>> gc = ColorCurve([[0,0], [90, 120], [180, 230], [255, 255]])
>>> bc = ColorCurve([[0,0], [70, 110], [180, 230], [240, 255]])
>>> img2 = img.applyRGBCurve(rc,gc,bc)
**SEE ALSO**
:py:class:`ColorCurve`
:py:meth:`applyHLSCurve`
"""
if isinstance(bCurve, list):
bCurve = ColorCurve(bCurve)
if isinstance(gCurve, list):
gCurve = ColorCurve(gCurve)
if isinstance(rCurve, list):
rCurve = ColorCurve(rCurve)
tempMat = np.array(self.getMatrix()).copy()
tempMat[:, :, 0] = np.take(bCurve.mCurve, tempMat[:, :, 0])
tempMat[:, :, 1] = np.take(gCurve.mCurve, tempMat[:, :, 1])
tempMat[:, :, 2] = np.take(rCurve.mCurve, tempMat[:, :, 2])
#Now we jimmy the np array into a cvMat
image = cv.CreateImageHeader((tempMat.shape[1], tempMat.shape[0]), cv.IPL_DEPTH_8U, 3)
cv.SetData(image, tempMat.tostring(), tempMat.dtype.itemsize * 3 * tempMat.shape[1])
return Image(image, colorSpace=self._colorSpace)
def applyIntensityCurve(self, curve):
"""
**SUMMARY**
Intensity applied to all three color channels
**PARAMETERS**
* *curve* - a ColorCurve object, or 2d list that can be conditioned into one
**RETURNS**
A SimpleCV Image
**EXAMPLE**
>>> img = Image("lenna")
>>> rc = ColorCurve([[0,0], [100, 120], [180, 230], [255, 255]])
>>> gc = ColorCurve([[0,0], [90, 120], [180, 230], [255, 255]])
>>> bc = ColorCurve([[0,0], [70, 110], [180, 230], [240, 255]])
>>> img2 = img.applyRGBCurve(rc,gc,bc)
**SEE ALSO**
:py:class:`ColorCurve`
:py:meth:`applyHLSCurve`
"""
return self.applyRGBCurve(curve, curve, curve)
def colorDistance(self, color = Color.BLACK):
"""
**SUMMARY**
Returns an image representing the distance of each pixel from a given color
tuple, scaled between 0 (the given color) and 255. Pixels distant from the
given tuple will appear as brighter and pixels closest to the target color
will be darker.
By default this will give image intensity (distance from pure black)
**PARAMETERS**
* *color* - Color object or Color Tuple
**RETURNS**
A SimpleCV Image.
**EXAMPLE**
>>> img = Image("logo")
>>> img2 = img.colorDistance(color=Color.BLACK)
>>> img2.show()
**SEE ALSO**
:py:meth:`binarize`
:py:meth:`hueDistance`
:py:meth:`findBlobsFromMask`
"""
pixels = np.array(self.getNumpy()).reshape(-1, 3) #reshape our matrix to 1xN
distances = spsd.cdist(pixels, [color]) #calculate the distance each pixel is
distances *= (255.0/distances.max()) #normalize to 0 - 255
return Image(distances.reshape(self.width, self.height)) #return an Image
def hueDistance(self, color = Color.BLACK, minsaturation = 20, minvalue = 20, maxvalue=255):
"""
**SUMMARY**
Returns an image representing the distance of each pixel from the given hue
of a specific color. The hue is "wrapped" at 180, so we have to take the shorter
of the distances between them -- this gives a hue distance of max 90, which we'll
scale into a 0-255 grayscale image.
The minsaturation and minvalue are optional parameters to weed out very weak hue
signals in the picture, they will be pushed to max distance [255]
**PARAMETERS**
* *color* - Color object or Color Tuple.
* *minsaturation* - the minimum saturation value for color (from 0 to 255).
* *minvalue* - the minimum hue value for the color (from 0 to 255).
**RETURNS**
A simpleCV image.
**EXAMPLE**
>>> img = Image("logo")
>>> img2 = img.hueDistance(color=Color.BLACK)
>>> img2.show()
**SEE ALSO**
:py:meth:`binarize`
:py:meth:`hueDistance`
:py:meth:`morphOpen`
:py:meth:`morphClose`
:py:meth:`morphGradient`
:py:meth:`findBlobsFromMask`
"""
if isinstance(color, (float,int,long,complex)):
color_hue = color
else:
color_hue = Color.hsv(color)[0]
vsh_matrix = self.toHSV().getNumpy().reshape(-1,3) #again, gets transposed to vsh
hue_channel = np.cast['int'](vsh_matrix[:,2])
if color_hue < 90:
hue_loop = 180
else:
hue_loop = -180
#set whether we need to move back or forward on the hue circle
distances = np.minimum( np.abs(hue_channel - color_hue), np.abs(hue_channel - (color_hue + hue_loop)))
#take the minimum distance for each pixel
distances = np.where(
np.logical_and(vsh_matrix[:,0] > minvalue, vsh_matrix[:,1] > minsaturation),
distances * (255.0 / 90.0), #normalize 0 - 90 -> 0 - 255
255.0) #use the maxvalue if it false outside of our value/saturation tolerances
return Image(distances.reshape(self.width, self.height))
def erode(self, iterations=1, kernelsize=3):
"""
**SUMMARY**
Apply a morphological erosion. An erosion has the effect of removing small bits of noise
and smothing blobs.
This implementation uses the default openCV 3X3 square kernel
Erosion is effectively a local minima detector, the kernel moves over the image and
takes the minimum value inside the kernel.
iterations - this parameters is the number of times to apply/reapply the operation
* See: http://en.wikipedia.org/wiki/Erosion_(morphology).
* See: http://opencv.willowgarage.com/documentation/cpp/image_filtering.html#cv-erode
* Example Use: A threshold/blob image has 'salt and pepper' noise.
* Example Code: /examples/MorphologyExample.py
**PARAMETERS**
* *iterations* - the number of times to run the erosion operation.
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> derp = img.binarize()
>>> derp.erode(3).show()
**SEE ALSO**
:py:meth:`dilate`
:py:meth:`binarize`
:py:meth:`morphOpen`
:py:meth:`morphClose`
:py:meth:`morphGradient`
:py:meth:`findBlobsFromMask`
"""
retVal = self.getEmpty()
kern = cv.CreateStructuringElementEx(kernelsize,kernelsize, 1, 1, cv.CV_SHAPE_RECT)
cv.Erode(self.getBitmap(), retVal, kern, iterations)
return Image(retVal, colorSpace=self._colorSpace)
def dilate(self, iterations=1):
"""
**SUMMARY**
Apply a morphological dilation. An dilation has the effect of smoothing blobs while
intensifying the amount of noise blobs.
This implementation uses the default openCV 3X3 square kernel
Erosion is effectively a local maxima detector, the kernel moves over the image and
takes the maxima value inside the kernel.
* See: http://en.wikipedia.org/wiki/Dilation_(morphology)
* See: http://opencv.willowgarage.com/documentation/cpp/image_filtering.html#cv-dilate
* Example Use: A part's blob needs to be smoother
* Example Code: ./examples/MorphologyExample.py
**PARAMETERS**
* *iterations* - the number of times to run the dilation operation.
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> derp = img.binarize()
>>> derp.dilate(3).show()
**SEE ALSO**
:py:meth:`erode`
:py:meth:`binarize`
:py:meth:`morphOpen`
:py:meth:`morphClose`
:py:meth:`morphGradient`
:py:meth:`findBlobsFromMask`
"""
retVal = self.getEmpty()
kern = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT)
cv.Dilate(self.getBitmap(), retVal, kern, iterations)
return Image(retVal, colorSpace=self._colorSpace)
def morphOpen(self):
"""
**SUMMARY**
morphologyOpen applies a morphological open operation which is effectively
an erosion operation followed by a morphological dilation. This operation
helps to 'break apart' or 'open' binary regions which are close together.
* `Morphological opening on Wikipedia <http://en.wikipedia.org/wiki/Opening_(morphology)>`_
* `OpenCV documentation <http://opencv.willowgarage.com/documentation/cpp/image_filtering.html#cv-morphologyex>`_
* Example Use: two part blobs are 'sticking' together.
* Example Code: ./examples/MorphologyExample.py
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> derp = img.binarize()
>>> derp.morphOpen.show()
**SEE ALSO**
:py:meth:`erode`
:py:meth:`dilate`
:py:meth:`binarize`
:py:meth:`morphClose`
:py:meth:`morphGradient`
:py:meth:`findBlobsFromMask`
"""
retVal = self.getEmpty()
temp = self.getEmpty()
kern = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT)
try:
cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.MORPH_OPEN, 1)
except:
cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.CV_MOP_OPEN, 1)
#OPENCV 2.2 vs 2.3 compatability
return( Image(retVal) )
def morphClose(self):
"""
**SUMMARY**
morphologyClose applies a morphological close operation which is effectively
a dilation operation followed by a morphological erosion. This operation
helps to 'bring together' or 'close' binary regions which are close together.
* See: `Closing <http://en.wikipedia.org/wiki/Closing_(morphology)>`_
* See: `Morphology from OpenCV <http://opencv.willowgarage.com/documentation/cpp/image_filtering.html#cv-morphologyex>`_
* Example Use: Use when a part, which should be one blob is really two blobs.
* Example Code: ./examples/MorphologyExample.py
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> derp = img.binarize()
>>> derp.morphClose.show()
**SEE ALSO**
:py:meth:`erode`
:py:meth:`dilate`
:py:meth:`binarize`
:py:meth:`morphOpen`
:py:meth:`morphGradient`
:py:meth:`findBlobsFromMask`
"""
retVal = self.getEmpty()
temp = self.getEmpty()
kern = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT)
try:
cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.MORPH_CLOSE, 1)
except:
cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.CV_MOP_CLOSE, 1)
#OPENCV 2.2 vs 2.3 compatability
return Image(retVal, colorSpace=self._colorSpace)
def morphGradient(self):
"""
**SUMMARY**
The morphological gradient is the difference betwen the morphological
dilation and the morphological gradient. This operation extracts the
edges of a blobs in the image.
* `See Morph Gradient of Wikipedia <http://en.wikipedia.org/wiki/Morphological_Gradient>`_
* `OpenCV documentation <http://opencv.willowgarage.com/documentation/cpp/image_filtering.html#cv-morphologyex>`_
* Example Use: Use when you have blobs but you really just want to know the blob edges.
* Example Code: ./examples/MorphologyExample.py
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> derp = img.binarize()
>>> derp.morphGradient.show()
**SEE ALSO**
:py:meth:`erode`
:py:meth:`dilate`
:py:meth:`binarize`
:py:meth:`morphOpen`
:py:meth:`morphClose`
:py:meth:`findBlobsFromMask`
"""
retVal = self.getEmpty()
temp = self.getEmpty()
kern = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT)
try:
cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.MORPH_GRADIENT, 1)
except:
cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.CV_MOP_GRADIENT, 1)
return Image(retVal, colorSpace=self._colorSpace )
def histogram(self, numbins = 50):
"""
**SUMMARY**
Return a numpy array of the 1D histogram of intensity for pixels in the image
Single parameter is how many "bins" to have.
**PARAMETERS**
* *numbins* - An interger number of bins in a histogram.
**RETURNS**
A list of histogram bin values.
**EXAMPLE**
>>> img = Image('lenna')
>>> hist = img.histogram()
**SEE ALSO**
:py:meth:`hueHistogram`
"""
gray = self._getGrayscaleBitmap()
(hist, bin_edges) = np.histogram(np.asarray(cv.GetMat(gray)), bins=numbins)
return hist.tolist()
def hueHistogram(self, bins = 179, dynamicRange=True):
"""
**SUMMARY**
Returns the histogram of the hue channel for the image
**PARAMETERS**
* *numbins* - An interger number of bins in a histogram.
**RETURNS**
A list of histogram bin values.
**SEE ALSO**
:py:meth:`histogram`
"""
if dynamicRange:
return np.histogram(self.toHSV().getNumpy()[:,:,2], bins = bins)[0]
else:
return np.histogram(self.toHSV().getNumpy()[:,:,2], bins = bins, range=(0.0,360.0))[0]
def huePeaks(self, bins = 179):
"""
**SUMMARY**
Takes the histogram of hues, and returns the peak hue values, which
can be useful for determining what the "main colors" in a picture.
The bins parameter can be used to lump hues together, by default it is 179
(the full resolution in OpenCV's HSV format)
Peak detection code taken from https://gist.github.com/1178136
Converted from/based on a MATLAB script at http://billauer.co.il/peakdet.html
Returns a list of tuples, each tuple contains the hue, and the fraction
of the image that has it.
**PARAMETERS**
* *bins* - the integer number of bins, between 0 and 179.
**RETURNS**
A list of (hue,fraction) tuples.
"""
# keyword arguments:
# y_axis -- A list containg the signal over which to find peaks
# x_axis -- A x-axis whose values correspond to the 'y_axis' list and is used
# in the return to specify the postion of the peaks. If omitted the index
# of the y_axis is used. (default: None)
# lookahead -- (optional) distance to look ahead from a peak candidate to
# determine if it is the actual peak (default: 500)
# '(sample / period) / f' where '4 >= f >= 1.25' might be a good value
# delta -- (optional) this specifies a minimum difference between a peak and
# the following points, before a peak may be considered a peak. Useful
# to hinder the algorithm from picking up false peaks towards to end of
# the signal. To work well delta should be set to 'delta >= RMSnoise * 5'.
# (default: 0)
# Delta function causes a 20% decrease in speed, when omitted
# Correctly used it can double the speed of the algorithm
# return -- Each cell of the lists contains a tupple of:
# (position, peak_value)
# to get the average peak value do 'np.mean(maxtab, 0)[1]' on the results
y_axis, x_axis = np.histogram(self.toHSV().getNumpy()[:,:,2], bins = bins)
x_axis = x_axis[0:bins]
lookahead = int(bins / 17)
delta = 0
maxtab = []
mintab = []
dump = [] #Used to pop the first hit which always if false
length = len(y_axis)
if x_axis is None:
x_axis = range(length)
#perform some checks
if length != len(x_axis):
raise ValueError, "Input vectors y_axis and x_axis must have same length"
if lookahead < 1:
raise ValueError, "Lookahead must be above '1' in value"
if not (np.isscalar(delta) and delta >= 0):
raise ValueError, "delta must be a positive number"
#needs to be a numpy array
y_axis = np.asarray(y_axis)
#maxima and minima candidates are temporarily stored in
#mx and mn respectively
mn, mx = np.Inf, -np.Inf
#Only detect peak if there is 'lookahead' amount of points after it
for index, (x, y) in enumerate(zip(x_axis[:-lookahead], y_axis[:-lookahead])):
if y > mx:
mx = y
mxpos = x
if y < mn:
mn = y
mnpos = x
####look for max####
if y < mx-delta and mx != np.Inf:
#Maxima peak candidate found
#look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index+lookahead].max() < mx:
maxtab.append((mxpos, mx))
dump.append(True)
#set algorithm to only find minima now
mx = np.Inf
mn = np.Inf
####look for min####
if y > mn+delta and mn != -np.Inf:
#Minima peak candidate found
#look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index+lookahead].min() > mn:
mintab.append((mnpos, mn))
dump.append(False)
#set algorithm to only find maxima now
mn = -np.Inf
mx = -np.Inf
#Remove the false hit on the first value of the y_axis
try:
if dump[0]:
maxtab.pop(0)
#print "pop max"
else:
mintab.pop(0)
#print "pop min"
del dump
except IndexError:
#no peaks were found, should the function return empty lists?
pass
huetab = []
for hue, pixelcount in maxtab:
huetab.append((hue, pixelcount / float(self.width * self.height)))
return huetab
def __getitem__(self, coord):
ret = self.getMatrix()[tuple(reversed(coord))]
if (type(ret) == cv.cvmat):
(width, height) = cv.GetSize(ret)
newmat = cv.CreateMat(height, width, ret.type)
cv.Copy(ret, newmat) #this seems to be a bug in opencv
#if you don't copy the matrix slice, when you convert to bmp you get
#a slice-sized hunk starting at 0, 0
return Image(newmat)
if self.isBGR():
return tuple(reversed(ret))
else:
return tuple(ret)
def __setitem__(self, coord, value):
value = tuple(reversed(value)) #RGB -> BGR
if(isinstance(coord[0],slice)):
cv.Set(self.getMatrix()[tuple(reversed(coord))], value)
self._clearBuffers("_matrix")
else:
self.getMatrix()[tuple(reversed(coord))] = value
self._clearBuffers("_matrix")
def __sub__(self, other):
newbitmap = self.getEmpty()
if is_number(other):
cv.SubS(self.getBitmap(), cv.Scalar(other,other,other), newbitmap)
else:
cv.Sub(self.getBitmap(), other.getBitmap(), newbitmap)
return Image(newbitmap, colorSpace=self._colorSpace)
def __add__(self, other):
newbitmap = self.getEmpty()
if is_number(other):
cv.AddS(self.getBitmap(), cv.Scalar(other,other,other), newbitmap)
else:
cv.Add(self.getBitmap(), other.getBitmap(), newbitmap)
return Image(newbitmap, colorSpace=self._colorSpace)
def __and__(self, other):
newbitmap = self.getEmpty()
if is_number(other):
cv.AndS(self.getBitmap(), cv.Scalar(other,other,other), newbitmap)
else:
cv.And(self.getBitmap(), other.getBitmap(), newbitmap)
return Image(newbitmap, colorSpace=self._colorSpace)
def __or__(self, other):
newbitmap = self.getEmpty()
if is_number(other):
cv.OrS(self.getBitmap(), cv.Scalar(other,other,other), newbitmap)
else:
cv.Or(self.getBitmap(), other.getBitmap(), newbitmap)
return Image(newbitmap, colorSpace=self._colorSpace)
def __div__(self, other):
newbitmap = self.getEmpty()
if (not is_number(other)):
cv.Div(self.getBitmap(), other.getBitmap(), newbitmap)
else:
cv.ConvertScale(self.getBitmap(), newbitmap, 1.0/float(other))
return Image(newbitmap, colorSpace=self._colorSpace)
def __mul__(self, other):
newbitmap = self.getEmpty()
if (not is_number(other)):
cv.Mul(self.getBitmap(), other.getBitmap(), newbitmap)
else:
cv.ConvertScale(self.getBitmap(), newbitmap, float(other))
return Image(newbitmap, colorSpace=self._colorSpace)
def __pow__(self, other):
newbitmap = self.getEmpty()
cv.Pow(self.getBitmap(), newbitmap, other)
return Image(newbitmap, colorSpace=self._colorSpace)
def __neg__(self):
newbitmap = self.getEmpty()
cv.Not(self.getBitmap(), newbitmap)
return Image(newbitmap, colorSpace=self._colorSpace)
def __invert__(self):
return self.invert()
def max(self, other):
"""
**SUMMARY**
The maximum value of my image, and the other image, in each channel
If other is a number, returns the maximum of that and the number
**PARAMETERS**
* *other* - Image of the same size or a number.
**RETURNS**
A SimpelCV image.
"""
newbitmap = self.getEmpty()
if is_number(other):
cv.MaxS(self.getBitmap(), other, newbitmap)
else:
if self.size() != other.size():
warnings.warn("Both images should have same sizes. Returning None.")
return None
cv.Max(self.getBitmap(), other.getBitmap(), newbitmap)
return Image(newbitmap, colorSpace=self._colorSpace)
def min(self, other):
"""
**SUMMARY**
The minimum value of my image, and the other image, in each channel
If other is a number, returns the minimum of that and the number
**Parameter**
* *other* - Image of the same size or number
**Returns**
IMAGE
"""
newbitmap = self.getEmpty()
if is_number(other):
cv.MinS(self.getBitmap(), other, newbitmap)
else:
if self.size() != other.size():
warnings.warn("Both images should have same sizes. Returning None.")
return None
cv.Min(self.getBitmap(), other.getBitmap(), newbitmap)
return Image(newbitmap, colorSpace=self._colorSpace)
def _clearBuffers(self, clearexcept = "_bitmap"):
for k, v in self._initialized_buffers.items():
if k == clearexcept:
continue
self.__dict__[k] = v
def findBarcode(self,doZLib=True,zxing_path=""):
"""
**SUMMARY**
This function requires zbar and the zbar python wrapper
to be installed or zxing and the zxing python library.
**ZBAR**
To install please visit:
http://zbar.sourceforge.net/
On Ubuntu Linux 12.04 or greater:
sudo apt-get install python-zbar
**ZXING**
If you have the python-zxing library installed, you can find 2d and 1d
barcodes in your image. These are returned as Barcode feature objects
in a FeatureSet. The single parameter is the ZXing_path along with
setting the doZLib flag to False. You do not need the parameter if you
don't have the ZXING_LIBRARY env parameter set.
You can clone python-zxing at:
http://github.com/oostendo/python-zxing
**INSTALLING ZEBRA CROSSING**
* Download the latest version of zebra crossing from: http://code.google.com/p/zxing/
* unpack the zip file where ever you see fit
>>> cd zxing-x.x, where x.x is the version number of zebra crossing
>>> ant -f core/build.xml
>>> ant -f javase/build.xml
This should build the library, but double check the readme
* Get our helper library
>>> git clone git://github.com/oostendo/python-zxing.git
>>> cd python-zxing
>>> python setup.py install
* Our library does not have a setup file. You will need to add
it to your path variables. On OSX/Linux use a text editor to modify your shell file (e.g. .bashrc)
export ZXING_LIBRARY=<FULL PATH OF ZXING LIBRARY - (i.e. step 2)>
for example:
export ZXING_LIBRARY=/my/install/path/zxing-x.x/
On windows you will need to add these same variables to the system variable, e.g.
http://www.computerhope.com/issues/ch000549.htm
* On OSX/Linux source your shell rc file (e.g. source .bashrc). Windows users may need to restart.
* Go grab some barcodes!
.. Warning::
Users on OSX may see the following error:
RuntimeWarning: tmpnam is a potential security risk to your program
We are working to resolve this issue. For normal use this should not be a problem.
**Returns**
A :py:class:`FeatureSet` of :py:class:`Barcode` objects. If no barcodes are detected the method returns None.
**EXAMPLE**
>>> bc = cam.getImage()
>>> barcodes = img.findBarcodes()
>>> for b in barcodes:
>>> b.draw()
**SEE ALSO**
:py:class:`FeatureSet`
:py:class:`Barcode`
"""
if( doZLib ):
try:
import zbar
except:
logger.warning('The zbar library is not installed, please install to read barcodes')
return None
#configure zbar
scanner = zbar.ImageScanner()
scanner.parse_config('enable')
raw = self.getPIL().convert('L').tostring()
width = self.width
height = self.height
# wrap image data
image = zbar.Image(width, height, 'Y800', raw)
# scan the image for barcodes
scanner.scan(image)
barcode = None
# extract results
for symbol in image:
# do something useful with results
barcode = symbol
# clean up
del(image)
else:
if not ZXING_ENABLED:
warnings.warn("Zebra Crossing (ZXing) Library not installed. Please see the release notes.")
return None
if (not self._barcodeReader):
if not zxing_path:
self._barcodeReader = zxing.BarCodeReader()
else:
self._barcodeReader = zxing.BarCodeReader(zxing_path)
tmp_filename = os.tmpnam() + ".png"
self.save(tmp_filename)
barcode = self._barcodeReader.decode(tmp_filename)
os.unlink(tmp_filename)
if barcode:
f = Barcode(self, barcode)
return FeatureSet([f])
else:
return None
#this function contains two functions -- the basic edge detection algorithm
#and then a function to break the lines down given a threshold parameter
def findLines(self, threshold=80, minlinelength=30, maxlinegap=10, cannyth1=50, cannyth2=100, useStandard=False, nLines=-1, maxpixelgap=1):
"""
**SUMMARY**
findLines will find line segments in your image and returns line feature
objects in a FeatureSet. This method uses the Hough (pronounced "HUFF") transform.
See http://en.wikipedia.org/wiki/Hough_transform
**PARAMETERS**
* *threshold* - which determines the minimum "strength" of the line.
* *minlinelength* - how many pixels long the line must be to be returned.
* *maxlinegap* - how much gap is allowed between line segments to consider them the same line .
* *cannyth1* - thresholds used in the edge detection step, refer to :py:meth:`_getEdgeMap` for details.
* *cannyth2* - thresholds used in the edge detection step, refer to :py:meth:`_getEdgeMap` for details.
* *useStandard* - use standard or probabilistic Hough transform.
* *nLines* - maximum number of lines for return.
* *maxpixelgap* - how much distance between pixels is allowed to consider them the same line.
**RETURNS**
Returns a :py:class:`FeatureSet` of :py:class:`Line` objects. If no lines are found the method returns None.
**EXAMPLE**
>>> img = Image("lenna")
>>> lines = img.findLines()
>>> lines.draw()
>>> img.show()
**SEE ALSO**
:py:class:`FeatureSet`
:py:class:`Line`
:py:meth:`edges`
"""
em = self._getEdgeMap(cannyth1, cannyth2)
linesFS = FeatureSet()
if useStandard:
lines = cv.HoughLines2(em, cv.CreateMemStorage(), cv.CV_HOUGH_STANDARD, 1.0, cv.CV_PI/180.0, threshold, minlinelength, maxlinegap)
if nLines == -1:
nLines = len(lines)
# All white points (edges) in Canny edge image
em = Image(em)
x,y = np.where(em.getGrayNumpy() > 128)
# Put points in dictionary for fast checkout if point is white
pts = dict((p, 1) for p in zip(x, y))
w, h = self.width-1, self.height-1
for rho, theta in lines[:nLines]:
ep = []
ls = []
a = math.cos(theta)
b = math.sin(theta)
# Find endpoints of line on the image's edges
if round(b, 4) == 0: # slope of the line is infinity
ep.append( (int(round(abs(rho))), 0) )
ep.append( (int(round(abs(rho))), h) )
elif round(a, 4) == 0: # slope of the line is zero
ep.append( (0, int(round(abs(rho)))) )
ep.append( (w, int(round(abs(rho)))) )
else:
# top edge
x = rho/float(a)
if 0 <= x <= w:
ep.append((int(round(x)), 0))
# bottom edge
x = (rho - h*b)/float(a)
if 0 <= x <= w:
ep.append((int(round(x)), h))
# left edge
y = rho/float(b)
if 0 <= y <= h:
ep.append((0, int(round(y))))
# right edge
y = (rho - w*a)/float(b)
if 0 <= y <= h:
ep.append((w, int(round(y))))
ep = list(set(ep)) # remove duplicates if line crosses the image at corners
ep.sort()
brl = self.bresenham_line(ep[0], ep[1])
# Follow the points on Bresenham's line. Look for white points.
# If the distance between two adjacent white points (dist) is less than or
# equal maxpixelgap then consider them the same line. If dist is bigger
# maxpixelgap then check if length of the line is bigger than minlinelength.
# If so then add line.
dist = float('inf') # distance between two adjacent white points
len_l = float('-inf') # length of the line
for p in brl:
if p in pts:
if dist > maxpixelgap: # found the end of the previous line and the start of the new line
if len_l >= minlinelength:
if ls:
# If the gap between current line and previous
# is less than maxlinegap then merge this lines
l = ls[-1]
gap = round(math.sqrt( (start_p[0]-l[1][0])**2 + (start_p[1]-l[1][1])**2 ))
if gap <= maxlinegap:
ls.pop()
start_p = l[0]
ls.append( (start_p, last_p) )
# First white point of the new line found
dist = 1
len_l = 1
start_p = p # first endpoint of the line
else:
# dist is less than or equal maxpixelgap, so line doesn't end yet
len_l += dist
dist = 1
last_p = p # last white point
else:
dist += 1
for l in ls:
linesFS.append(Line(self, l))
linesFS = linesFS[:nLines]
else:
lines = cv.HoughLines2(em, cv.CreateMemStorage(), cv.CV_HOUGH_PROBABILISTIC, 1.0, cv.CV_PI/180.0, threshold, minlinelength, maxlinegap)
if nLines == -1:
nLines = len(lines)
for l in lines[:nLines]:
linesFS.append(Line(self, l))
return linesFS
def findChessboard(self, dimensions = (8, 5), subpixel = True):
"""
**SUMMARY**
Given an image, finds a chessboard within that image. Returns the Chessboard featureset.
The Chessboard is typically used for calibration because of its evenly spaced corners.
The single parameter is the dimensions of the chessboard, typical one can be found in \SimpleCV\tools\CalibGrid.png
**PARAMETERS**
* *dimensions* - A tuple of the size of the chessboard in width and height in grid objects.
* *subpixel* - Boolean if True use sub-pixel accuracy, otherwise use regular pixel accuracy.
**RETURNS**
A :py:class:`FeatureSet` of :py:class:`Chessboard` objects. If no chessboards are found None is returned.
**EXAMPLE**
>>> img = cam.getImage()
>>> cb = img.findChessboard()
>>> cb.draw()
**SEE ALSO**
:py:class:`FeatureSet`
:py:class:`Chessboard`
"""
corners = cv.FindChessboardCorners(self._getEqualizedGrayscaleBitmap(), dimensions, cv.CV_CALIB_CB_ADAPTIVE_THRESH + cv.CV_CALIB_CB_NORMALIZE_IMAGE )
if(len(corners[1]) == dimensions[0]*dimensions[1]):
if (subpixel):
spCorners = cv.FindCornerSubPix(self.getGrayscaleMatrix(), corners[1], (11, 11), (-1, -1), (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 10, 0.01))
else:
spCorners = corners[1]
return FeatureSet([ Chessboard(self, dimensions, spCorners) ])
else:
return None
def edges(self, t1=50, t2=100):
"""
**SUMMARY**
Finds an edge map Image using the Canny edge detection method. Edges will be brighter than the surrounding area.
The t1 parameter is roughly the "strength" of the edge required, and the value between t1 and t2 is used for edge linking.
For more information:
* http://opencv.willowgarage.com/documentation/python/imgproc_feature_detection.html
* http://en.wikipedia.org/wiki/Canny_edge_detector
**PARAMETERS**
* *t1* - Int - the lower Canny threshold.
* *t2* - Int - the upper Canny threshold.
**RETURNS**
A SimpleCV image where the edges are white on a black background.
**EXAMPLE**
>>> cam = Camera()
>>> while True:
>>> cam.getImage().edges().show()
**SEE ALSO**
:py:meth:`findLines`
"""
return Image(self._getEdgeMap(t1, t2), colorSpace=self._colorSpace)
def _getEdgeMap(self, t1=50, t2=100):
"""
Return the binary bitmap which shows where edges are in the image. The two
parameters determine how much change in the image determines an edge,
and how edges are linked together. For more information refer to:
http://en.wikipedia.org/wiki/Canny_edge_detector
http://opencv.willowgarage.com/documentation/python/imgproc_feature_detection.html?highlight=canny#Canny
"""
if (self._edgeMap and self._cannyparam[0] == t1 and self._cannyparam[1] == t2):
return self._edgeMap
self._edgeMap = self.getEmpty(1)
cv.Canny(self._getGrayscaleBitmap(), self._edgeMap, t1, t2)
self._cannyparam = (t1, t2)
return self._edgeMap
def rotate(self, angle, fixed=True, point=[-1, -1], scale = 1.0):
"""
**SUMMARY***
This function rotates an image around a specific point by the given angle
By default in "fixed" mode, the returned Image is the same dimensions as the original Image, and the contents will be scaled to fit. In "full" mode the
contents retain the original size, and the Image object will scale
by default, the point is the center of the image.
you can also specify a scaling parameter
.. Note:
that when fixed is set to false selecting a rotation point has no effect since the image is move to fit on the screen.
**PARAMETERS**
* *angle* - angle in degrees positive is clockwise, negative is counter clockwise
* *fixed* - if fixed is true,keep the original image dimensions, otherwise scale the image to fit the rotation
* *point* - the point about which we want to rotate, if none is defined we use the center.
* *scale* - and optional floating point scale parameter.
**RETURNS**
The rotated SimpleCV image.
**EXAMPLE**
>>> img = Image('logo')
>>> img2 = img.rotate( 73.00, point=(img.width/2,img.height/2))
>>> img3 = img.rotate( 73.00, fixed=False, point=(img.width/2,img.height/2))
>>> img4 = img2.sideBySide(img3)
>>> img4.show()
**SEE ALSO**
:py:meth:`rotate90`
"""
if( point[0] == -1 or point[1] == -1 ):
point[0] = (self.width-1)/2
point[1] = (self.height-1)/2
if (fixed):
retVal = self.getEmpty()
cv.Zero(retVal)
rotMat = cv.CreateMat(2, 3, cv.CV_32FC1)
cv.GetRotationMatrix2D((float(point[0]), float(point[1])), float(angle), float(scale), rotMat)
cv.WarpAffine(self.getBitmap(), retVal, rotMat)
return Image(retVal, colorSpace=self._colorSpace)
#otherwise, we're expanding the matrix to fit the image at original size
rotMat = cv.CreateMat(2, 3, cv.CV_32FC1)
# first we create what we thing the rotation matrix should be
cv.GetRotationMatrix2D((float(point[0]), float(point[1])), float(angle), float(scale), rotMat)
A = np.array([0, 0, 1])
B = np.array([self.width, 0, 1])
C = np.array([self.width, self.height, 1])
D = np.array([0, self.height, 1])
#So we have defined our image ABC in homogenous coordinates
#and apply the rotation so we can figure out the image size
a = np.dot(rotMat, A)
b = np.dot(rotMat, B)
c = np.dot(rotMat, C)
d = np.dot(rotMat, D)
#I am not sure about this but I think the a/b/c/d are transposed
#now we calculate the extents of the rotated components.
minY = min(a[1], b[1], c[1], d[1])
minX = min(a[0], b[0], c[0], d[0])
maxY = max(a[1], b[1], c[1], d[1])
maxX = max(a[0], b[0], c[0], d[0])
#from the extents we calculate the new size
newWidth = np.ceil(maxX-minX)
newHeight = np.ceil(maxY-minY)
#now we calculate a new translation
tX = 0
tY = 0
#calculate the translation that will get us centered in the new image
if( minX < 0 ):
tX = -1.0*minX
elif(maxX > newWidth-1 ):
tX = -1.0*(maxX-newWidth)
if( minY < 0 ):
tY = -1.0*minY
elif(maxY > newHeight-1 ):
tY = -1.0*(maxY-newHeight)
#now we construct an affine map that will the rotation and scaling we want with the
#the corners all lined up nicely with the output image.
src = ((A[0], A[1]), (B[0], B[1]), (C[0], C[1]))
dst = ((a[0]+tX, a[1]+tY), (b[0]+tX, b[1]+tY), (c[0]+tX, c[1]+tY))
cv.GetAffineTransform(src, dst, rotMat)
#calculate the translation of the corners to center the image
#use these new corner positions as the input to cvGetAffineTransform
retVal = cv.CreateImage((int(newWidth), int(newHeight)), 8, int(3))
cv.Zero(retVal)
cv.WarpAffine(self.getBitmap(), retVal, rotMat)
#cv.AddS(retVal,(0,255,0),retVal)
return Image(retVal, colorSpace=self._colorSpace)
def transpose(self):
"""
**SUMMARY**
Does a fast 90 degree rotation to the right with a flip.
.. Warning::
Subsequent calls to this function *WILL NOT* keep rotating it to the right!!!
This function just does a matrix transpose so following one transpose by another will
just yield the original image.
**RETURNS**
The rotated SimpleCV Image.
**EXAMPLE**
>>> img = Image("logo")
>>> img2 = img.transpose()
>>> img2.show()
**SEE ALSO**
:py:meth:`rotate`
"""
retVal = cv.CreateImage((self.height, self.width), cv.IPL_DEPTH_8U, 3)
cv.Transpose(self.getBitmap(), retVal)
return(Image(retVal, colorSpace=self._colorSpace))
def shear(self, cornerpoints):
"""
**SUMMARY**
Given a set of new corner points in clockwise order, return a shear-ed image
that transforms the image contents. The returned image is the same
dimensions.
**PARAMETERS**
* *cornerpoints* - a 2x4 tuple of points. The order is (top_left, top_right, bottom_left, bottom_right)
**RETURNS**
A simpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> points = ((50,0),(img.width+50,0),(img.width,img.height),(0,img.height))
>>> img.shear(points).show()
**SEE ALSO**
:py:meth:`transformAffine`
:py:meth:`warp`
:py:meth:`rotate`
http://en.wikipedia.org/wiki/Transformation_matrix
"""
src = ((0, 0), (self.width-1, 0), (self.width-1, self.height-1))
#set the original points
aWarp = cv.CreateMat(2, 3, cv.CV_32FC1)
#create the empty warp matrix
cv.GetAffineTransform(src, cornerpoints, aWarp)
return self.transformAffine(aWarp)
def transformAffine(self, rotMatrix):
"""
**SUMMARY**
This helper function for shear performs an affine rotation using the supplied matrix.
The matrix can be a either an openCV mat or an np.ndarray type.
The matrix should be a 2x3
**PARAMETERS**
* *rotMatrix* - A 2x3 numpy array or CvMat of the affine transform.
**RETURNS**
The rotated image. Note that the rotation is done in place, i.e. the image is not enlarged to fit the transofmation.
**EXAMPLE**
>>> img = Image("lenna")
>>> points = ((50,0),(img.width+50,0),(img.width,img.height),(0,img.height))
>>> src = ((0, 0), (img.width-1, 0), (img.width-1, img.height-1))
>>> result = cv.createMat(2,3,cv.CV_32FC1)
>>> cv.GetAffineTransform(src,points,result)
>>> img.transformAffine(result).show()
**SEE ALSO**
:py:meth:`shear`
:py:meth`warp`
:py:meth:`transformPerspective`
:py:meth:`rotate`
http://en.wikipedia.org/wiki/Transformation_matrix
"""
retVal = self.getEmpty()
if(type(rotMatrix) == np.ndarray ):
rotMatrix = npArray2cvMat(rotMatrix)
cv.WarpAffine(self.getBitmap(), retVal, rotMatrix)
return Image(retVal, colorSpace=self._colorSpace)
def warp(self, cornerpoints):
"""
**SUMMARY**
This method performs and arbitrary perspective transform.
Given a new set of corner points in clockwise order frin top left, return an Image with
the images contents warped to the new coordinates. The returned image
will be the same size as the original image
**PARAMETERS**
* *cornerpoints* - A list of four tuples corresponding to the destination corners in the order of (top_left,top_right,bottom_left,bottom_right)
**RETURNS**
A simpleCV Image with the warp applied. Note that this operation does not enlarge the image.
**EXAMPLE**
>>> img = Image("lenna")
>>> points = ((30, 30), (img.width-10, 70), (img.width-1-40, img.height-1+30),(20,img.height+10))
>>> img.warp(points).show()
**SEE ALSO**
:py:meth:`shear`
:py:meth:`transformAffine`
:py:meth:`transformPerspective`
:py:meth:`rotate`
http://en.wikipedia.org/wiki/Transformation_matrix
"""
#original coordinates
src = ((0, 0), (self.width-1, 0), (self.width-1, self.height-1), (0, self.height-1))
pWarp = cv.CreateMat(3, 3, cv.CV_32FC1) #create an empty 3x3 matrix
cv.GetPerspectiveTransform(src, cornerpoints, pWarp) #figure out the warp matrix
return self.transformPerspective(pWarp)
def transformPerspective(self, rotMatrix):
"""
**SUMMARY**
This helper function for warp performs an affine rotation using the supplied matrix.
The matrix can be a either an openCV mat or an np.ndarray type.
The matrix should be a 3x3
**PARAMETERS**
* *rotMatrix* - Numpy Array or CvMat
**RETURNS**
The rotated image. Note that the rotation is done in place, i.e. the image is not enlarged to fit the transofmation.
**EXAMPLE**
>>> img = Image("lenna")
>>> points = ((50,0),(img.width+50,0),(img.width,img.height),(0,img.height))
>>> src = ((30, 30), (img.width-10, 70), (img.width-1-40, img.height-1+30),(20,img.height+10))
>>> result = cv.CreateMat(3,3,cv.CV_32FC1)
>>> cv.GetPerspectiveTransform(src,points,result)
>>> img.transformPerspective(result).show()
**SEE ALSO**
:py:meth:`shear`
:py:meth:`warp`
:py:meth:`transformPerspective`
:py:meth:`rotate`
http://en.wikipedia.org/wiki/Transformation_matrix
"""
try:
import cv2
if( type(rotMatrix) != np.ndarray ):
rotMatrix = np.array(rotMatrix)
retVal = cv2.warpPerspective(src=np.array(self.getMatrix()), dsize=(self.width,self.height),M=rotMatrix,flags = cv2.INTER_CUBIC)
return Image(retVal, colorSpace=self._colorSpace, cv2image=True)
except:
retVal = self.getEmpty()
if(type(rotMatrix) == np.ndarray ):
rotMatrix = npArray2cvMat(rotMatrix)
cv.WarpPerspective(self.getBitmap(), retVal, rotMatrix)
return Image(retVal, colorSpace=self._colorSpace)
def getPixel(self, x, y):
"""
**SUMMARY**
This function returns the RGB value for a particular image pixel given a specific row and column.
.. Warning::
this function will always return pixels in RGB format even if the image is BGR format.
**PARAMETERS**
* *x* - Int the x pixel coordinate.
* *y* - Int the y pixel coordinate.
**RETURNS**
A color value that is a three element integer tuple.
**EXAMPLE**
>>> img = Image(logo)
>>> color = img.getPixel(10,10)
.. Warning::
We suggest that this method be used sparingly. For repeated pixel access use python array notation. I.e. img[x][y].
"""
c = None
retVal = None
if( x < 0 or x >= self.width ):
logger.warning("getRGBPixel: X value is not valid.")
elif( y < 0 or y >= self.height ):
logger.warning("getRGBPixel: Y value is not valid.")
else:
c = cv.Get2D(self.getBitmap(), y, x)
if( self._colorSpace == ColorSpace.BGR ):
retVal = (c[2],c[1],c[0])
else:
retVal = (c[0],c[1],c[2])
return retVal
def getGrayPixel(self, x, y):
"""
**SUMMARY**
This function returns the gray value for a particular image pixel given a specific row and column.
.. Warning::
This function will always return pixels in RGB format even if the image is BGR format.
**PARAMETERS**
* *x* - Int the x pixel coordinate.
* *y* - Int the y pixel coordinate.
**RETURNS**
A gray value integer between 0 and 255.
**EXAMPLE**
>>> img = Image(logo)
>>> color = img.getGrayPixel(10,10)
.. Warning::
We suggest that this method be used sparingly. For repeated pixel access use python array notation. I.e. img[x][y].
"""
retVal = None
if( x < 0 or x >= self.width ):
logger.warning("getGrayPixel: X value is not valid.")
elif( y < 0 or y >= self.height ):
logger.warning("getGrayPixel: Y value is not valid.")
else:
retVal = cv.Get2D(self._getGrayscaleBitmap(), y, x)
retVal = retVal[0]
return retVal
def getVertScanline(self, column):
"""
**SUMMARY**
This function returns a single column of RGB values from the image as a numpy array. This is handy if you
want to crawl the image looking for an edge.
**PARAMETERS**
* *column* - the column number working from left=0 to right=img.width.
**RETURNS**
A numpy array of the pixel values. Ususally this is in BGR format.
**EXAMPLE**
>>> img = Image("lenna")
>>> myColor = [0,0,0]
>>> sl = img.getVertScanline(423)
>>> sll = sl.tolist()
>>> for p in sll:
>>> if( p == myColor ):
>>> # do something
**SEE ALSO**
:py:meth:`getHorzScanlineGray`
:py:meth:`getHorzScanline`
:py:meth:`getVertScanlineGray`
:py:meth:`getVertScanline`
"""
retVal = None
if( column < 0 or column >= self.width ):
logger.warning("getVertRGBScanline: column value is not valid.")
else:
retVal = cv.GetCol(self.getBitmap(), column)
retVal = np.array(retVal)
retVal = retVal[:, 0, :]
return retVal
def getHorzScanline(self, row):
"""
**SUMMARY**
This function returns a single row of RGB values from the image.
This is handy if you want to crawl the image looking for an edge.
**PARAMETERS**
* *row* - the row number working from top=0 to bottom=img.height.
**RETURNS**
A a lumpy numpy array of the pixel values. Ususally this is in BGR format.
**EXAMPLE**
>>> img = Image("lenna")
>>> myColor = [0,0,0]
>>> sl = img.getHorzScanline(422)
>>> sll = sl.tolist()
>>> for p in sll:
>>> if( p == myColor ):
>>> # do something
**SEE ALSO**
:py:meth:`getHorzScanlineGray`
:py:meth:`getVertScanlineGray`
:py:meth:`getVertScanline`
"""
retVal = None
if( row < 0 or row >= self.height ):
logger.warning("getHorzRGBScanline: row value is not valid.")
else:
retVal = cv.GetRow(self.getBitmap(), row)
retVal = np.array(retVal)
retVal = retVal[0, :, :]
return retVal
def getVertScanlineGray(self, column):
"""
**SUMMARY**
This function returns a single column of gray values from the image as a numpy array. This is handy if you
want to crawl the image looking for an edge.
**PARAMETERS**
* *column* - the column number working from left=0 to right=img.width.
**RETURNS**
A a lumpy numpy array of the pixel values.
**EXAMPLE**
>>> img = Image("lenna")
>>> myColor = [255]
>>> sl = img.getVertScanlineGray(421)
>>> sll = sl.tolist()
>>> for p in sll:
>>> if( p == myColor ):
>>> # do something
**SEE ALSO**
:py:meth:`getHorzScanlineGray`
:py:meth:`getHorzScanline`
:py:meth:`getVertScanline`
"""
retVal = None
if( column < 0 or column >= self.width ):
logger.warning("getHorzRGBScanline: row value is not valid.")
else:
retVal = cv.GetCol(self._getGrayscaleBitmap(), column )
retVal = np.array(retVal)
#retVal = retVal.transpose()
return retVal
def getHorzScanlineGray(self, row):
"""
**SUMMARY**
This function returns a single row of gray values from the image as a numpy array. This is handy if you
want to crawl the image looking for an edge.
**PARAMETERS**
* *row* - the row number working from top=0 to bottom=img.height.
**RETURNS**
A a lumpy numpy array of the pixel values.
**EXAMPLE**
>>> img = Image("lenna")
>>> myColor = [255]
>>> sl = img.getHorzScanlineGray(420)
>>> sll = sl.tolist()
>>> for p in sll:
>>> if( p == myColor ):
>>> # do something
**SEE ALSO**
:py:meth:`getHorzScanlineGray`
:py:meth:`getHorzScanline`
:py:meth:`getVertScanlineGray`
:py:meth:`getVertScanline`
"""
retVal = None
if( row < 0 or row >= self.height ):
logger.warning("getHorzRGBScanline: row value is not valid.")
else:
retVal = cv.GetRow(self._getGrayscaleBitmap(), row )
retVal = np.array(retVal)
retVal = retVal.transpose()
return retVal
def crop(self, x , y = None, w = None, h = None, centered=False, smart=False):
"""
**SUMMARY**
Consider you want to crop a image with the following dimension::
(x,y)
+--------------+
| |
| |h
| |
+--------------+
w (x1,y1)
Crop attempts to use the x and y position variables and the w and h width
and height variables to crop the image. When centered is false, x and y
define the top and left of the cropped rectangle. When centered is true
the function uses x and y as the centroid of the cropped region.
You can also pass a feature into crop and have it automatically return
the cropped image within the bounding outside area of that feature
Or parameters can be in the form of a
- tuple or list : (x,y,w,h) or [x,y,w,h]
- two points : (x,y),(x1,y1) or [(x,y),(x1,y1)]
**PARAMETERS**
* *x* - An integer or feature.
- If it is a feature we crop to the features dimensions.
- This can be either the top left corner of the image or the center cooridnate of the the crop region.
- or in the form of tuple/list. i,e (x,y,w,h) or [x,y,w,h]
- Otherwise in two point form. i,e [(x,y),(x1,y1)] or (x,y)
* *y* - The y coordinate of the center, or top left corner of the crop region.
- Otherwise in two point form. i,e (x1,y1)
* *w* - Int - the width of the cropped region in pixels.
* *h* - Int - the height of the cropped region in pixels.
* *centered* - Boolean - if True we treat the crop region as being the center
coordinate and a width and height. If false we treat it as the top left corner of the crop region.
* *smart* - Will make sure you don't try and crop outside the image size, so if your image is 100x100 and you tried a crop like img.crop(50,50,100,100), it will autoscale the crop to the max width.
**RETURNS**
A SimpleCV Image cropped to the specified width and height.
**EXAMPLE**
>>> img = Image('lenna')
>>> img.crop(50,40,128,128).show()
>>> img.crop((50,40,128,128)).show() #roi
>>> img.crop([50,40,128,128]) #roi
>>> img.crop((50,40),(178,168)) # two point form
>>> img.crop([(50,40),(178,168)]) # two point form
>>> img.crop([x1,x2,x3,x4,x5],[y1,y1,y3,y4,y5]) # list of x's and y's
>>> img.crop([(x,y),(x,y),(x,y),(x,y),(x,y)] # list of (x,y)
>>> img.crop(x,y,100,100, smart=True)
**SEE ALSO**
:py:meth:`embiggen`
:py:meth:`regionSelect`
"""
if smart:
if x > self.width:
x = self.width
elif x < 0:
x = 0
elif y > self.height:
y = self.height
elif y < 0:
y = 0
elif (x + w) > self.width:
w = self.width - x
elif (y + h) > self.height:
h = self.height - y
if(isinstance(x,np.ndarray)):
x = x.tolist()
if(isinstance(y,np.ndarray)):
y = y.tolist()
#If it's a feature extract what we need
if(isinstance(x, Feature)):
theFeature = x
x = theFeature.points[0][0]
y = theFeature.points[0][1]
w = theFeature.width()
h = theFeature.height()
elif(isinstance(x, (tuple,list)) and len(x) == 4 and isinstance(x[0],(int, long, float))
and y == None and w == None and h == None):
x,y,w,h = x
# x of the form [(x,y),(x1,y1),(x2,y2),(x3,y3)]
# x of the form [[x,y],[x1,y1],[x2,y2],[x3,y3]]
# x of the form ([x,y],[x1,y1],[x2,y2],[x3,y3])
# x of the form ((x,y),(x1,y1),(x2,y2),(x3,y3))
# x of the form (x,y,x1,y2) or [x,y,x1,y2]
elif( isinstance(x, (list,tuple)) and
isinstance(x[0],(list,tuple)) and
(len(x) == 4 and len(x[0]) == 2 ) and
y == None and w == None and h == None):
if (len(x[0])==2 and len(x[1])==2 and len(x[2])==2 and len(x[3])==2):
xmax = np.max([x[0][0],x[1][0],x[2][0],x[3][0]])
ymax = np.max([x[0][1],x[1][1],x[2][1],x[3][1]])
xmin = np.min([x[0][0],x[1][0],x[2][0],x[3][0]])
ymin = np.min([x[0][1],x[1][1],x[2][1],x[3][1]])
x = xmin
y = ymin
w = xmax-xmin
h = ymax-ymin
else:
logger.warning("x should be in the form ((x,y),(x1,y1),(x2,y2),(x3,y3))")
return None
# x,y of the form [x1,x2,x3,x4,x5....] and y similar
elif(isinstance(x, (tuple,list)) and
isinstance(y, (tuple,list)) and
len(x) > 4 and len(y) > 4 ):
if(isinstance(x[0],(int, long, float)) and isinstance(y[0],(int, long, float))):
xmax = np.max(x)
ymax = np.max(y)
xmin = np.min(x)
ymin = np.min(y)
x = xmin
y = ymin
w = xmax-xmin
h = ymax-ymin
else:
logger.warning("x should be in the form x = [1,2,3,4,5] y =[0,2,4,6,8]")
return None
# x of the form [(x,y),(x,y),(x,y),(x,y),(x,y),(x,y)]
elif(isinstance(x, (list,tuple)) and
len(x) > 4 and len(x[0]) == 2 and y == None and w == None and h == None):
if(isinstance(x[0][0],(int, long, float))):
xs = [pt[0] for pt in x]
ys = [pt[1] for pt in x]
xmax = np.max(xs)
ymax = np.max(ys)
xmin = np.min(xs)
ymin = np.min(ys)
x = xmin
y = ymin
w = xmax-xmin
h = ymax-ymin
else:
logger.warning("x should be in the form [(x,y),(x,y),(x,y),(x,y),(x,y),(x,y)]")
return None
# x of the form [(x,y),(x1,y1)]
elif(isinstance(x,(list,tuple)) and len(x) == 2 and isinstance(x[0],(list,tuple)) and isinstance(x[1],(list,tuple)) and y == None and w == None and h == None):
if (len(x[0])==2 and len(x[1])==2):
xt = np.min([x[0][0],x[1][0]])
yt = np.min([x[0][0],x[1][0]])
w = np.abs(x[0][0]-x[1][0])
h = np.abs(x[0][1]-x[1][1])
x = xt
y = yt
else:
logger.warning("x should be in the form [(x1,y1),(x2,y2)]")
return None
# x and y of the form (x,y),(x1,y2)
elif(isinstance(x, (tuple,list)) and isinstance(y,(tuple,list)) and w == None and h == None):
if (len(x)==2 and len(y)==2):
xt = np.min([x[0],y[0]])
yt = np.min([x[1],y[1]])
w = np.abs(y[0]-x[0])
h = np.abs(y[1]-x[1])
x = xt
y = yt
else:
logger.warning("if x and y are tuple it should be in the form (x1,y1) and (x2,y2)")
return None
if(y == None or w == None or h == None):
print "Please provide an x, y, width, height to function"
if( w <= 0 or h <= 0 ):
logger.warning("Can't do a negative crop!")
return None
retVal = cv.CreateImage((int(w),int(h)), cv.IPL_DEPTH_8U, 3)
if( x < 0 or y < 0 ):
logger.warning("Crop will try to help you, but you have a negative crop position, your width and height may not be what you want them to be.")
if( centered ):
rectangle = (int(x-(w/2)), int(y-(h/2)), int(w), int(h))
else:
rectangle = (int(x), int(y), int(w), int(h))
(topROI, bottomROI) = self._rectOverlapROIs((rectangle[2],rectangle[3]),(self.width,self.height),(rectangle[0],rectangle[1]))
if( bottomROI is None ):
logger.warning("Hi, your crop rectangle doesn't even overlap your image. I have no choice but to return None.")
return None
retVal = np.zeros((bottomROI[3],bottomROI[2],3),dtype='uint8')
retVal= self.getNumpyCv2()[bottomROI[1]:bottomROI[1] + bottomROI[3],bottomROI[0]:bottomROI[0] + bottomROI[2],:]
img = Image(retVal, colorSpace=self._colorSpace,cv2image = True)
#Buffering the top left point (x, y) in a image.
img._uncroppedX = self._uncroppedX + int(x)
img._uncroppedY = self._uncroppedY + int(y)
return img
def regionSelect(self, x1, y1, x2, y2 ):
"""
**SUMMARY**
Region select is similar to crop, but instead of taking a position and width
and height values it simply takes two points on the image and returns the selected
region. This is very helpful for creating interactive scripts that require
the user to select a region.
**PARAMETERS**
* *x1* - Int - Point one x coordinate.
* *y1* - Int - Point one y coordinate.
* *x2* - Int - Point two x coordinate.
* *y2* - Int - Point two y coordinate.
**RETURNS**
A cropped SimpleCV Image.
**EXAMPLE**
>>> img = Image("lenna")
>>> subreg = img.regionSelect(10,10,100,100) # often this comes from a mouse click
>>> subreg.show()
**SEE ALSO**
:py:meth:`crop`
"""
w = abs(x1-x2)
h = abs(y1-y2)
retVal = None
if( w <= 0 or h <= 0 or w > self.width or h > self.height ):
logger.warning("regionSelect: the given values will not fit in the image or are too small.")
else:
xf = x2
if( x1 < x2 ):
xf = x1
yf = y2
if( y1 < y2 ):
yf = y1
retVal = self.crop(xf, yf, w, h)
return retVal
def clear(self):
"""
**SUMMARY**
This is a slightly unsafe method that clears out the entire image state
it is usually used in conjunction with the drawing blobs to fill in draw
a single large blob in the image.
.. Warning:
Do not use this method unless you have a particularly compelling reason.
"""
cv.SetZero(self._bitmap)
self._clearBuffers()
def draw(self, features, color=Color.GREEN, width=1, autocolor=False):
"""
**SUMMARY**
This is a method to draw Features on any given image.
**PARAMETERS**
* *features* - FeatureSet or any Feature (eg. Line, Circle, Corner, etc)
* *color* - Color of the Feature to be drawn
* *width* - width of the Feature to be drawn
* *autocolor*- If true a color is randomly selected for each feature
**RETURNS**
None
**EXAMPLE**
img = Image("lenna")
lines = img.equalize().findLines()
img.draw(lines)
img.show()
"""
if type(features) == type(self):
warnings.warn("You need to pass drawable features.")
return None
if hasattr(features, 'draw'):
from copy import deepcopy
if isinstance(features, FeatureSet):
cfeatures = deepcopy(features)
for cfeat in cfeatures:
cfeat.image = self
cfeatures.draw(color, width, autocolor)
else:
cfeatures = deepcopy(features)
cfeatures.image = self
cfeatures.draw(color, width)
else:
warnings.warn("You need to pass drawable features.")
return None
def drawText(self, text = "", x = None, y = None, color = Color.BLUE, fontsize = 16):
"""
**SUMMARY**
This function draws the string that is passed on the screen at the specified coordinates.
The Default Color is blue but you can pass it various colors
The text will default to the center of the screen if you don't pass it a value
**PARAMETERS**
* *text* - String - the text you want to write. ASCII only please.
* *x* - Int - the x position in pixels.
* *y* - Int - the y position in pixels.
* *color* - Color object or Color Tuple
* *fontsize* - Int - the font size - roughly in points.
**RETURNS**
Nothing. This is an in place function. Text is added to the Images drawing layer.
**EXAMPLE**
>>> img = Image("lenna")
>>> img.drawText("xamox smells like cool ranch doritos.", 50,50,color=Color.BLACK,fontsize=48)
>>> img.show()
**SEE ALSO**
:py:meth:`dl`
:py:meth:`drawCircle`
:py:meth:`drawRectangle`
"""
if(x == None):
x = (self.width / 2)
if(y == None):
y = (self.height / 2)
self.getDrawingLayer().setFontSize(fontsize)
self.getDrawingLayer().text(text, (x, y), color)
def drawRectangle(self,x,y,w,h,color=Color.RED,width=1,alpha=255):
"""
**SUMMARY**
Draw a rectangle on the screen given the upper left corner of the rectangle
and the width and height.
**PARAMETERS**
* *x* - the x position.
* *y* - the y position.
* *w* - the width of the rectangle.
* *h* - the height of the rectangle.
* *color* - an RGB tuple indicating the desired color.
* *width* - the width of the rectangle, a value less than or equal to zero means filled in completely.
* *alpha* - the alpha value on the interval from 255 to 0, 255 is opaque, 0 is completely transparent.
**RETURNS**
None - this operation is in place and adds the rectangle to the drawing layer.
**EXAMPLE**
>>> img = Image("lenna")
>>> img.drawREctange( 50,50,100,123)
>>> img.show()
**SEE ALSO**
:py:meth:`dl`
:py:meth:`drawCircle`
:py:meth:`drawRectangle`
:py:meth:`applyLayers`
:py:class:`DrawingLayer`
"""
if( width < 1 ):
self.getDrawingLayer().rectangle((x,y),(w,h),color,filled=True,alpha=alpha)
else:
self.getDrawingLayer().rectangle((x,y),(w,h),color,width,alpha=alpha)
def drawRotatedRectangle(self,boundingbox,color=Color.RED,width=1):
"""
**SUMMARY**
Draw the minimum bouding rectangle. This rectangle is a series of four points.
**TODO**
**KAT FIX THIS**
"""
cv.EllipseBox(self.getBitmap(),box=boundingbox,color=color,thicness=width)
def show(self, type = 'window'):
"""
**SUMMARY**
This function automatically pops up a window and shows the current image.
**PARAMETERS**
* *type* - this string can have one of two values, either 'window', or 'browser'. Window opens
a display window, while browser opens the default web browser to show an image.
**RETURNS**
This method returns the display object. In the case of the window this is a JpegStreamer
object. In the case of a window a display object is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> img.show()
>>> img.show('browser')
**SEE ALSO**
:py:class:`JpegStreamer`
:py:class:`Display`
"""
if(type == 'browser'):
import webbrowser
js = JpegStreamer(8080)
self.save(js)
webbrowser.open("http://localhost:8080", 2)
return js
elif (type == 'window'):
from SimpleCV.Display import Display
if init_options_handler.on_notebook:
d = Display(displaytype='notebook')
else:
d = Display(self.size())
self.save(d)
return d
else:
print "Unknown type to show"
def _surface2Image(self,surface):
imgarray = pg.surfarray.array3d(surface)
retVal = Image(imgarray)
retVal._colorSpace = ColorSpace.RGB
return retVal.toBGR().transpose()
def _image2Surface(self,img):
return pg.image.fromstring(img.getPIL().tostring(),img.size(), "RGB")
#return pg.surfarray.make_surface(img.toRGB().getNumpy())
def toPygameSurface(self):
"""
**SUMMARY**
Converts this image to a pygame surface. This is useful if you want
to treat an image as a sprite to render onto an image. An example
would be rendering blobs on to an image.
.. Warning::
*THIS IS EXPERIMENTAL*. We are plannng to remove this functionality sometime in the near future.
**RETURNS**
The image as a pygame surface.
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`insertDrawingLayer`
:py:meth:`addDrawingLayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`clearLayers`
:py:meth:`layers`
:py:meth:`mergedLayers`
:py:meth:`applyLayers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
return pg.image.fromstring(self.getPIL().tostring(),self.size(), "RGB")
def addDrawingLayer(self, layer = None):
"""
**SUMMARY**
Push a new drawing layer onto the back of the layer stack
**PARAMETERS**
* *layer* - The new drawing layer to add.
**RETURNS**
The index of the new layer as an integer.
**EXAMPLE**
>>> img = Image("Lenna")
>>> myLayer = DrawingLayer((img.width,img.height))
>>> img.addDrawingLayer(myLayer)
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`insertDrawinglayer`
:py:meth:`addDrawinglayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`clearLayers`
:py:meth:`layers`
:py:meth:`mergedLayers`
:py:meth:`applyLayers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
if not isinstance(layer, DrawingLayer):
return "Please pass a DrawingLayer object"
if not layer:
layer = DrawingLayer(self.size())
self._mLayers.append(layer)
return len(self._mLayers)-1
def insertDrawingLayer(self, layer, index):
"""
**SUMMARY**
Insert a new layer into the layer stack at the specified index.
**PARAMETERS**
* *layer* - A drawing layer with crap you want to draw.
* *index* - The index at which to insert the layer.
**RETURNS**
None - that's right - nothing.
**EXAMPLE**
>>> img = Image("Lenna")
>>> myLayer1 = DrawingLayer((img.width,img.height))
>>> myLayer2 = DrawingLayer((img.width,img.height))
>>> #Draw on the layers
>>> img.insertDrawingLayer(myLayer1,1) # on top
>>> img.insertDrawingLayer(myLayer2,2) # on the bottom
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`addDrawinglayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`clearLayers`
:py:meth:`layers`
:py:meth:`mergedLayers`
:py:meth:`applyLayers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
self._mLayers.insert(index, layer)
return None
def removeDrawingLayer(self, index = -1):
"""
**SUMMARY**
Remove a layer from the layer stack based on the layer's index.
**PARAMETERS**
* *index* - Int - the index of the layer to remove.
**RETURNS**
This method returns the removed drawing layer.
**EXAMPLES**
>>> img = Image("Lenna")
>>> img.removeDrawingLayer(1) # removes the layer with index = 1
>>> img.removeDrawingLayer() # if no index is specified it removes the top layer
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`addDrawinglayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`clearLayers`
:py:meth:`layers`
:py:meth:`mergedLayers`
:py:meth:`applyLayers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
try:
return self._mLayers.pop(index)
except IndexError:
print 'Not a valid index or No layers to remove!'
def getDrawingLayer(self, index = -1):
"""
**SUMMARY**
Return a drawing layer based on the provided index. If not provided, will
default to the top layer. If no layers exist, one will be created
**PARAMETERS**
* *index* - returns the drawing layer at the specified index.
**RETURNS**
A drawing layer.
**EXAMPLE**
>>> img = Image("Lenna")
>>> myLayer1 = DrawingLayer((img.width,img.height))
>>> myLayer2 = DrawingLayer((img.width,img.height))
>>> #Draw on the layers
>>> img.insertDrawingLayer(myLayer1,1) # on top
>>> img.insertDrawingLayer(myLayer2,2) # on the bottom
>>> layer2 =img.getDrawingLayer(2)
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`addDrawinglayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`clearLayers`
:py:meth:`layers`
:py:meth:`mergedLayers`
:py:meth:`applyLayers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
if not len(self._mLayers):
layer = DrawingLayer(self.size())
self.addDrawingLayer(layer)
try:
return self._mLayers[index]
except IndexError:
print 'Not a valid index'
def dl(self, index = -1):
"""
**SUMMARY**
Alias for :py:meth:`getDrawingLayer`
"""
return self.getDrawingLayer(index)
def clearLayers(self):
"""
**SUMMARY**
Remove all of the drawing layers.
**RETURNS**
None.
**EXAMPLE**
>>> img = Image("Lenna")
>>> myLayer1 = DrawingLayer((img.width,img.height))
>>> myLayer2 = DrawingLayer((img.width,img.height))
>>> img.insertDrawingLayer(myLayer1,1) # on top
>>> img.insertDrawingLayer(myLayer2,2) # on the bottom
>>> img.clearLayers()
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`layers`
:py:meth:`mergedLayers`
:py:meth:`applyLayers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
for i in self._mLayers:
self._mLayers.remove(i)
return None
def layers(self):
"""
**SUMMARY**
Return the array of DrawingLayer objects associated with the image.
**RETURNS**
A list of of drawing layers.
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`addDrawingLayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`mergedLayers`
:py:meth:`applyLayers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
return self._mLayers
#render the image.
def _renderImage(self, layer):
imgSurf = self.getPGSurface(self).copy()
imgSurf.blit(layer._mSurface, (0, 0))
return Image(imgSurf)
def mergedLayers(self):
"""
**SUMMARY**
Return all DrawingLayer objects as a single DrawingLayer.
**RETURNS**
Returns a drawing layer with all of the drawing layers of this image merged into one.
**EXAMPLE**
>>> img = Image("Lenna")
>>> myLayer1 = DrawingLayer((img.width,img.height))
>>> myLayer2 = DrawingLayer((img.width,img.height))
>>> img.insertDrawingLayer(myLayer1,1) # on top
>>> img.insertDrawingLayer(myLayer2,2) # on the bottom
>>> derp = img.mergedLayers()
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`addDrawingLayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`layers`
:py:meth:`applyLayers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
final = DrawingLayer(self.size())
for layers in self._mLayers: #compose all the layers
layers.renderToOtherLayer(final)
return final
def applyLayers(self, indicies=-1):
"""
**SUMMARY**
Render all of the layers onto the current image and return the result.
Indicies can be a list of integers specifying the layers to be used.
**PARAMETERS**
* *indicies* - Indicies can be a list of integers specifying the layers to be used.
**RETURNS**
The image after applying the drawing layers.
**EXAMPLE**
>>> img = Image("Lenna")
>>> myLayer1 = DrawingLayer((img.width,img.height))
>>> myLayer2 = DrawingLayer((img.width,img.height))
>>> #Draw some stuff
>>> img.insertDrawingLayer(myLayer1,1) # on top
>>> img.insertDrawingLayer(myLayer2,2) # on the bottom
>>> derp = img.applyLayers()
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`layers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
if not len(self._mLayers):
return self
if(indicies==-1 and len(self._mLayers) > 0 ):
final = self.mergedLayers()
imgSurf = self.getPGSurface().copy()
imgSurf.blit(final._mSurface, (0, 0))
return Image(imgSurf)
else:
final = DrawingLayer((self.width, self.height))
retVal = self
indicies.reverse()
for idx in indicies:
retVal = self._mLayers[idx].renderToOtherLayer(final)
imgSurf = self.getPGSurface().copy()
imgSurf.blit(final._mSurface, (0, 0))
indicies.reverse()
return Image(imgSurf)
def adaptiveScale(self, resolution,fit=True):
"""
**SUMMARY**
Adapative Scale is used in the Display to automatically
adjust image size to match the display size. This method attempts to scale
an image to the desired resolution while keeping the aspect ratio the same.
If fit is False we simply crop and center the image to the resolution.
In general this method should look a lot better than arbitrary cropping and scaling.
**PARAMETERS**
* *resolution* - The size of the returned image as a (width,height) tuple.
* *fit* - If fit is true we try to fit the image while maintaining the aspect ratio.
If fit is False we crop and center the image to fit the resolution.
**RETURNS**
A SimpleCV Image.
**EXAMPLE**
This is typically used in this instance:
>>> d = Display((800,600))
>>> i = Image((640, 480))
>>> i.save(d)
Where this would scale the image to match the display size of 800x600
"""
wndwAR = float(resolution[0])/float(resolution[1])
imgAR = float(self.width)/float(self.height)
img = self
targetx = 0
targety = 0
targetw = resolution[0]
targeth = resolution[1]
if( self.size() == resolution): # we have to resize
retVal = self
elif( imgAR == wndwAR and fit):
retVal = img.scale(resolution[0],resolution[1])
return retVal
elif(fit):
#scale factors
retVal = np.zeros((resolution[1],resolution[0],3),dtype='uint8')
wscale = (float(self.width)/float(resolution[0]))
hscale = (float(self.height)/float(resolution[1]))
if(wscale>1): #we're shrinking what is the percent reduction
wscale=1-(1.0/wscale)
else: # we need to grow the image by a percentage
wscale = 1.0-wscale
if(hscale>1):
hscale=1-(1.0/hscale)
else:
hscale=1.0-hscale
if( wscale == 0 ): #if we can get away with not scaling do that
targetx = 0
targety = (resolution[1]-self.height)/2
targetw = img.width
targeth = img.height
elif( hscale == 0 ): #if we can get away with not scaling do that
targetx = (resolution[0]-img.width)/2
targety = 0
targetw = img.width
targeth = img.height
elif(wscale < hscale): # the width has less distortion
sfactor = float(resolution[0])/float(self.width)
targetw = int(float(self.width)*sfactor)
targeth = int(float(self.height)*sfactor)
if( targetw > resolution[0] or targeth > resolution[1]):
#aw shucks that still didn't work do the other way instead
sfactor = float(resolution[1])/float(self.height)
targetw = int(float(self.width)*sfactor)
targeth = int(float(self.height)*sfactor)
targetx = (resolution[0]-targetw)/2
targety = 0
else:
targetx = 0
targety = (resolution[1]-targeth)/2
img = img.scale(targetw,targeth)
else: #the height has more distortion
sfactor = float(resolution[1])/float(self.height)
targetw = int(float(self.width)*sfactor)
targeth = int(float(self.height)*sfactor)
if( targetw > resolution[0] or targeth > resolution[1]):
#aw shucks that still didn't work do the other way instead
sfactor = float(resolution[0])/float(self.width)
targetw = int(float(self.width)*sfactor)
targeth = int(float(self.height)*sfactor)
targetx = 0
targety = (resolution[1]-targeth)/2
else:
targetx = (resolution[0]-targetw)/2
targety = 0
img = img.scale(targetw,targeth)
else: # we're going to crop instead
if(self.width <= resolution[0] and self.height <= resolution[1] ): # center a too small image
#we're too small just center the thing
retVal = np.zeros((resolution[1],resolution[0],3),dtype='uint8')
targetx = (resolution[0]/2)-(self.width/2)
targety = (resolution[1]/2)-(self.height/2)
targeth = self.height
targetw = self.width
elif(self.width > resolution[0] and self.height > resolution[1]): #crop too big on both axes
targetw = resolution[0]
targeth = resolution[1]
targetx = 0
targety = 0
x = (self.width-resolution[0])/2
y = (self.height-resolution[1])/2
img = img.crop(x,y,targetw,targeth)
return img
elif( self.width <= resolution[0] and self.height > resolution[1]): #height too big
#crop along the y dimension and center along the x dimension
retVal = np.zeros((resolution[1],resolution[0],3),dtype='uint8')
targetw = self.width
targeth = resolution[1]
targetx = (resolution[0]-self.width)/2
targety = 0
x = 0
y = (self.height-resolution[1])/2
img = img.crop(x,y,targetw,targeth)
elif( self.width > resolution[0] and self.height <= resolution[1]): #width too big
#crop along the y dimension and center along the x dimension
retVal = np.zeros((resolution[1],resolution[0],3),dtype='uint8')
targetw = resolution[0]
targeth = self.height
targetx = 0
targety = (resolution[1]-self.height)/2
x = (self.width-resolution[0])/2
y = 0
img = img.crop(x,y,targetw,targeth)
retVal[targety:targety + targeth,targetx:targetx + targetw,:] = img.getNumpyCv2()
retVal = Image(retVal,cv2image = True)
return(retVal)
def blit(self, img, pos=None,alpha=None,mask=None,alphaMask=None):
"""
**SUMMARY**
Blit aka bit blit - which in ye olden days was an acronym for bit-block transfer. In other words blit is
when you want to smash two images together, or add one image to another. This method takes in a second
SimpleCV image, and then allows you to add to some point on the calling image. A general blit command
will just copy all of the image. You can also copy the image with an alpha value to the source image
is semi-transparent. A binary mask can be used to blit non-rectangular image onto the souce image.
An alpha mask can be used to do and arbitrarily transparent image to this image. Both the mask and
alpha masks are SimpleCV Images.
**PARAMETERS**
* *img* - an image to place ontop of this image.
* *pos* - an (x,y) position tuple of the top left corner of img on this image. Note that these values
can be negative.
* *alpha* - a single floating point alpha value (0=see the bottom image, 1=see just img, 0.5 blend the two 50/50).
* *mask* - a binary mask the same size as the input image. White areas are blitted, black areas are not blitted.
* *alphaMask* - an alpha mask where each grayscale value maps how much of each image is shown.
**RETURNS**
A SimpleCV Image. The size will remain the same.
**EXAMPLE**
>>> topImg = Image("top.png")
>>> bottomImg = Image("bottom.png")
>>> mask = Image("mask.png")
>>> aMask = Image("alpphaMask.png")
>>> bottomImg.blit(top,pos=(100,100)).show()
>>> bottomImg.blit(top,alpha=0.5).show()
>>> bottomImg.blit(top,pos=(100,100),mask=mask).show()
>>> bottomImg.blit(top,pos=(-10,-10)alphaMask=aMask).show()
**SEE ALSO**
:py:meth:`createBinaryMask`
:py:meth:`createAlphaMask`
"""
retVal = Image(self.getEmpty())
cv.Copy(self.getBitmap(),retVal.getBitmap())
w = img.width
h = img.height
if( pos is None ):
pos = (0,0)
(topROI, bottomROI) = self._rectOverlapROIs((img.width,img.height),(self.width,self.height),pos)
if( alpha is not None ):
cv.SetImageROI(img.getBitmap(),topROI);
cv.SetImageROI(retVal.getBitmap(),bottomROI);
a = float(alpha)
b = float(1.00-a)
g = float(0.00)
cv.AddWeighted(img.getBitmap(),a,retVal.getBitmap(),b,g,retVal.getBitmap())
cv.ResetImageROI(img.getBitmap());
cv.ResetImageROI(retVal.getBitmap());
elif( alphaMask is not None ):
if( alphaMask is not None and (alphaMask.width != img.width or alphaMask.height != img.height ) ):
logger.warning("Image.blit: your mask and image don't match sizes, if the mask doesn't fit, you can not blit! Try using the scale function.")
return None
cImg = img.crop(topROI[0],topROI[1],topROI[2],topROI[3])
cMask = alphaMask.crop(topROI[0],topROI[1],topROI[2],topROI[3])
retValC = retVal.crop(bottomROI[0],bottomROI[1],bottomROI[2],bottomROI[3])
r = cImg.getEmpty(1)
g = cImg.getEmpty(1)
b = cImg.getEmpty(1)
cv.Split(cImg.getBitmap(), b, g, r, None)
rf=cv.CreateImage((cImg.width,cImg.height),cv.IPL_DEPTH_32F,1)
gf=cv.CreateImage((cImg.width,cImg.height),cv.IPL_DEPTH_32F,1)
bf=cv.CreateImage((cImg.width,cImg.height),cv.IPL_DEPTH_32F,1)
af=cv.CreateImage((cImg.width,cImg.height),cv.IPL_DEPTH_32F,1)
cv.ConvertScale(r,rf)
cv.ConvertScale(g,gf)
cv.ConvertScale(b,bf)
cv.ConvertScale(cMask._getGrayscaleBitmap(),af)
cv.ConvertScale(af,af,scale=(1.0/255.0))
cv.Mul(rf,af,rf)
cv.Mul(gf,af,gf)
cv.Mul(bf,af,bf)
dr = retValC.getEmpty(1)
dg = retValC.getEmpty(1)
db = retValC.getEmpty(1)
cv.Split(retValC.getBitmap(), db, dg, dr, None)
drf=cv.CreateImage((retValC.width,retValC.height),cv.IPL_DEPTH_32F,1)
dgf=cv.CreateImage((retValC.width,retValC.height),cv.IPL_DEPTH_32F,1)
dbf=cv.CreateImage((retValC.width,retValC.height),cv.IPL_DEPTH_32F,1)
daf=cv.CreateImage((retValC.width,retValC.height),cv.IPL_DEPTH_32F,1)
cv.ConvertScale(dr,drf)
cv.ConvertScale(dg,dgf)
cv.ConvertScale(db,dbf)
cv.ConvertScale(cMask.invert()._getGrayscaleBitmap(),daf)
cv.ConvertScale(daf,daf,scale=(1.0/255.0))
cv.Mul(drf,daf,drf)
cv.Mul(dgf,daf,dgf)
cv.Mul(dbf,daf,dbf)
cv.Add(rf,drf,rf)
cv.Add(gf,dgf,gf)
cv.Add(bf,dbf,bf)
cv.ConvertScaleAbs(rf,r)
cv.ConvertScaleAbs(gf,g)
cv.ConvertScaleAbs(bf,b)
cv.Merge(b,g,r,None,retValC.getBitmap())
cv.SetImageROI(retVal.getBitmap(),bottomROI)
cv.Copy(retValC.getBitmap(),retVal.getBitmap())
cv.ResetImageROI(retVal.getBitmap())
elif( mask is not None):
if( mask is not None and (mask.width != img.width or mask.height != img.height ) ):
logger.warning("Image.blit: your mask and image don't match sizes, if the mask doesn't fit, you can not blit! Try using the scale function. ")
return None
cv.SetImageROI(img.getBitmap(),topROI)
cv.SetImageROI(mask.getBitmap(),topROI)
cv.SetImageROI(retVal.getBitmap(),bottomROI)
cv.Copy(img.getBitmap(),retVal.getBitmap(),mask.getBitmap())
cv.ResetImageROI(img.getBitmap())
cv.ResetImageROI(mask.getBitmap())
cv.ResetImageROI(retVal.getBitmap())
else: #vanilla blit
cv.SetImageROI(img.getBitmap(),topROI)
cv.SetImageROI(retVal.getBitmap(),bottomROI)
cv.Copy(img.getBitmap(),retVal.getBitmap())
cv.ResetImageROI(img.getBitmap())
cv.ResetImageROI(retVal.getBitmap())
return retVal
def sideBySide(self, image, side="right", scale=True ):
"""
**SUMMARY**
Combine two images as a side by side images. Great for before and after images.
**PARAMETERS**
* *side* - what side of this image to place the other image on.
choices are ('left'/'right'/'top'/'bottom').
* *scale* - if true scale the smaller of the two sides to match the
edge touching the other image. If false we center the smaller
of the two images on the edge touching the larger image.
**RETURNS**
A new image that is a combination of the two images.
**EXAMPLE**
>>> img = Image("lenna")
>>> img2 = Image("orson_welles.jpg")
>>> img3 = img.sideBySide(img2)
**TODO**
Make this accept a list of images.
"""
#there is probably a cleaner way to do this, but I know I hit every case when they are enumerated
retVal = None
if( side == "top" ):
#clever
retVal = image.sideBySide(self,"bottom",scale)
elif( side == "bottom" ):
if( self.width > image.width ):
if( scale ):
#scale the other image width to fit
resized = image.resize(w=self.width)
nW = self.width
nH = self.height + resized.height
newCanvas = cv.CreateImage((nW,nH), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
cv.SetImageROI(newCanvas,(0,0,nW,self.height))
cv.Copy(self.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
cv.SetImageROI(newCanvas,(0,self.height,resized.width,resized.height))
cv.Copy(resized.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
retVal = Image(newCanvas,colorSpace=self._colorSpace)
else:
nW = self.width
nH = self.height + image.height
newCanvas = cv.CreateImage((nW,nH), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
cv.SetImageROI(newCanvas,(0,0,nW,self.height))
cv.Copy(self.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
xc = (self.width-image.width)/2
cv.SetImageROI(newCanvas,(xc,self.height,image.width,image.height))
cv.Copy(image.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
retVal = Image(newCanvas,colorSpace=self._colorSpace)
else: #our width is smaller than the other image
if( scale ):
#scale the other image width to fit
resized = self.resize(w=image.width)
nW = image.width
nH = resized.height + image.height
newCanvas = cv.CreateImage((nW,nH), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
cv.SetImageROI(newCanvas,(0,0,resized.width,resized.height))
cv.Copy(resized.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
cv.SetImageROI(newCanvas,(0,resized.height,nW,image.height))
cv.Copy(image.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
retVal = Image(newCanvas,colorSpace=self._colorSpace)
else:
nW = image.width
nH = self.height + image.height
newCanvas = cv.CreateImage((nW,nH), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
xc = (image.width - self.width)/2
cv.SetImageROI(newCanvas,(xc,0,self.width,self.height))
cv.Copy(self.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
cv.SetImageROI(newCanvas,(0,self.height,image.width,image.height))
cv.Copy(image.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
retVal = Image(newCanvas,colorSpace=self._colorSpace)
elif( side == "right" ):
retVal = image.sideBySide(self,"left",scale)
else: #default to left
if( self.height > image.height ):
if( scale ):
#scale the other image height to fit
resized = image.resize(h=self.height)
nW = self.width + resized.width
nH = self.height
newCanvas = cv.CreateImage((nW,nH), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
cv.SetImageROI(newCanvas,(0,0,resized.width,resized.height))
cv.Copy(resized.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
cv.SetImageROI(newCanvas,(resized.width,0,self.width,self.height))
cv.Copy(self.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
retVal = Image(newCanvas,colorSpace=self._colorSpace)
else:
nW = self.width+image.width
nH = self.height
newCanvas = cv.CreateImage((nW,nH), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
yc = (self.height-image.height)/2
cv.SetImageROI(newCanvas,(0,yc,image.width,image.height))
cv.Copy(image.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
cv.SetImageROI(newCanvas,(image.width,0,self.width,self.height))
cv.Copy(self.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
retVal = Image(newCanvas,colorSpace=self._colorSpace)
else: #our height is smaller than the other image
if( scale ):
#scale our height to fit
resized = self.resize(h=image.height)
nW = image.width + resized.width
nH = image.height
newCanvas = cv.CreateImage((nW,nH), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
cv.SetImageROI(newCanvas,(0,0,image.width,image.height))
cv.Copy(image.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
cv.SetImageROI(newCanvas,(image.width,0,resized.width,resized.height))
cv.Copy(resized.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
retVal = Image(newCanvas,colorSpace=self._colorSpace)
else:
nW = image.width + self.width
nH = image.height
newCanvas = cv.CreateImage((nW,nH), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
cv.SetImageROI(newCanvas,(0,0,image.width,image.height))
cv.Copy(image.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
yc = (image.height-self.height)/2
cv.SetImageROI(newCanvas,(image.width,yc,self.width,self.height))
cv.Copy(self.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
retVal = Image(newCanvas,colorSpace=self._colorSpace)
return retVal
def embiggen(self, size=None, color=Color.BLACK, pos=None):
"""
**SUMMARY**
Make the canvas larger but keep the image the same size.
**PARAMETERS**
* *size* - width and heigt tuple of the new canvas or give a single vaule in which to scale the image size, for instance size=2 would make the image canvas twice the size
* *color* - the color of the canvas
* *pos* - the position of the top left corner of image on the new canvas,
if none the image is centered.
**RETURNS**
The enlarged SimpleCV Image.
**EXAMPLE**
>>> img = Image("lenna")
>>> img = img.embiggen((1024,1024),color=Color.BLUE)
>>> img.show()
"""
if not isinstance(size, tuple) and size > 1:
size = (self.width * size, self.height * size)
if( size == None or size[0] < self.width or size[1] < self.height ):
logger.warning("image.embiggenCanvas: the size provided is invalid")
return None
newCanvas = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
newColor = cv.RGB(color[0],color[1],color[2])
cv.AddS(newCanvas,newColor,newCanvas)
topROI = None
bottomROI = None
if( pos is None ):
pos = (((size[0]-self.width)/2),((size[1]-self.height)/2))
(topROI, bottomROI) = self._rectOverlapROIs((self.width,self.height),size,pos)
if( topROI is None or bottomROI is None):
logger.warning("image.embiggenCanvas: the position of the old image doesn't make sense, there is no overlap")
return None
cv.SetImageROI(newCanvas, bottomROI)
cv.SetImageROI(self.getBitmap(),topROI)
cv.Copy(self.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
cv.ResetImageROI(self.getBitmap())
return Image(newCanvas)
def _rectOverlapROIs(self,top, bottom, pos):
"""
top is a rectangle (w,h)
bottom is a rectangle (w,h)
pos is the top left corner of the top rectangle with respect to the bottom rectangle's top left corner
method returns none if the two rectangles do not overlap. Otherwise returns the top rectangle's ROI (x,y,w,h)
and the bottom rectangle's ROI (x,y,w,h)
"""
# the position of the top rect coordinates give bottom top right = (0,0)
tr = (pos[0]+top[0],pos[1])
tl = pos
br = (pos[0]+top[0],pos[1]+top[1])
bl = (pos[0],pos[1]+top[1])
# do an overlap test to weed out corner cases and errors
def inBounds((w,h), (x,y)):
retVal = True
if( x < 0 or y < 0 or x > w or y > h):
retVal = False
return retVal
trc = inBounds(bottom,tr)
tlc = inBounds(bottom,tl)
brc = inBounds(bottom,br)
blc = inBounds(bottom,bl)
if( not trc and not tlc and not brc and not blc ): # no overlap
return None,None
elif( trc and tlc and brc and blc ): # easy case top is fully inside bottom
tRet = (0,0,top[0],top[1])
bRet = (pos[0],pos[1],top[0],top[1])
return tRet,bRet
# let's figure out where the top rectangle sits on the bottom
# we clamp the corners of the top rectangle to live inside
# the bottom rectangle and from that get the x,y,w,h
tl = (np.clip(tl[0],0,bottom[0]),np.clip(tl[1],0,bottom[1]))
br = (np.clip(br[0],0,bottom[0]),np.clip(br[1],0,bottom[1]))
bx = tl[0]
by = tl[1]
bw = abs(tl[0]-br[0])
bh = abs(tl[1]-br[1])
# now let's figure where the bottom rectangle is in the top rectangle
# we do the same thing with different coordinates
pos = (-1*pos[0], -1*pos[1])
#recalculate the bottoms's corners with respect to the top.
tr = (pos[0]+bottom[0],pos[1])
tl = pos
br = (pos[0]+bottom[0],pos[1]+bottom[1])
bl = (pos[0],pos[1]+bottom[1])
tl = (np.clip(tl[0],0,top[0]), np.clip(tl[1],0,top[1]))
br = (np.clip(br[0],0,top[0]), np.clip(br[1],0,top[1]))
tx = tl[0]
ty = tl[1]
tw = abs(br[0]-tl[0])
th = abs(br[1]-tl[1])
return (tx,ty,tw,th),(bx,by,bw,bh)
def createBinaryMask(self,color1=(0,0,0),color2=(255,255,255)):
"""
**SUMMARY**
Generate a binary mask of the image based on a range of rgb values.
A binary mask is a black and white image where the white area is kept and the
black area is removed.
This method is used by specifying two colors as the range between the minimum and maximum
values that will be masked white.
**PARAMETERS**
* *color1* - The starting color range for the mask..
* *color2* - The end of the color range for the mask.
**RETURNS**
A binary (black/white) image mask as a SimpleCV Image.
**EXAMPLE**
>>> img = Image("lenna")
>>> mask = img.createBinaryMask(color1=(0,128,128),color2=(255,255,255)
>>> mask.show()
**SEE ALSO**
:py:meth:`createBinaryMask`
:py:meth:`createAlphaMask`
:py:meth:`blit`
:py:meth:`threshold`
"""
if( color1[0]-color2[0] == 0 or
color1[1]-color2[1] == 0 or
color1[2]-color2[2] == 0 ):
logger.warning("No color range selected, the result will be black, returning None instead.")
return None
if( color1[0] > 255 or color1[0] < 0 or
color1[1] > 255 or color1[1] < 0 or
color1[2] > 255 or color1[2] < 0 or
color2[0] > 255 or color2[0] < 0 or
color2[1] > 255 or color2[1] < 0 or
color2[2] > 255 or color2[2] < 0 ):
logger.warning("One of the tuple values falls outside of the range of 0 to 255")
return None
r = self.getEmpty(1)
g = self.getEmpty(1)
b = self.getEmpty(1)
rl = self.getEmpty(1)
gl = self.getEmpty(1)
bl = self.getEmpty(1)
rh = self.getEmpty(1)
gh = self.getEmpty(1)
bh = self.getEmpty(1)
cv.Split(self.getBitmap(),b,g,r,None);
#the difference == 255 case is where open CV
#kinda screws up, this should just be a white image
if( abs(color1[0]-color2[0]) == 255 ):
cv.Zero(rl)
cv.AddS(rl,255,rl)
#there is a corner case here where difference == 0
#right now we throw an error on this case.
#also we use the triplets directly as OpenCV is
# SUPER FINICKY about the type of the threshold.
elif( color1[0] < color2[0] ):
cv.Threshold(r,rl,color1[0],255,cv.CV_THRESH_BINARY)
cv.Threshold(r,rh,color2[0],255,cv.CV_THRESH_BINARY)
cv.Sub(rl,rh,rl)
else:
cv.Threshold(r,rl,color2[0],255,cv.CV_THRESH_BINARY)
cv.Threshold(r,rh,color1[0],255,cv.CV_THRESH_BINARY)
cv.Sub(rl,rh,rl)
if( abs(color1[1]-color2[1]) == 255 ):
cv.Zero(gl)
cv.AddS(gl,255,gl)
elif( color1[1] < color2[1] ):
cv.Threshold(g,gl,color1[1],255,cv.CV_THRESH_BINARY)
cv.Threshold(g,gh,color2[1],255,cv.CV_THRESH_BINARY)
cv.Sub(gl,gh,gl)
else:
cv.Threshold(g,gl,color2[1],255,cv.CV_THRESH_BINARY)
cv.Threshold(g,gh,color1[1],255,cv.CV_THRESH_BINARY)
cv.Sub(gl,gh,gl)
if( abs(color1[2]-color2[2]) == 255 ):
cv.Zero(bl)
cv.AddS(bl,255,bl)
elif( color1[2] < color2[2] ):
cv.Threshold(b,bl,color1[2],255,cv.CV_THRESH_BINARY)
cv.Threshold(b,bh,color2[2],255,cv.CV_THRESH_BINARY)
cv.Sub(bl,bh,bl)
else:
cv.Threshold(b,bl,color2[2],255,cv.CV_THRESH_BINARY)
cv.Threshold(b,bh,color1[2],255,cv.CV_THRESH_BINARY)
cv.Sub(bl,bh,bl)
cv.And(rl,gl,rl)
cv.And(rl,bl,rl)
return Image(rl)
def applyBinaryMask(self, mask,bg_color=Color.BLACK):
"""
**SUMMARY**
Apply a binary mask to the image. The white areas of the mask will be kept,
and the black areas removed. The removed areas will be set to the color of
bg_color.
**PARAMETERS**
* *mask* - the binary mask image. White areas are kept, black areas are removed.
* *bg_color* - the color of the background on the mask.
**RETURNS**
A binary (black/white) image mask as a SimpleCV Image.
**EXAMPLE**
>>> img = Image("lenna")
>>> mask = img.createBinaryMask(color1=(0,128,128),color2=(255,255,255)
>>> result = img.applyBinaryMask(mask)
>>> result.show()
**SEE ALSO**
:py:meth:`createBinaryMask`
:py:meth:`createAlphaMask`
:py:meth:`applyBinaryMask`
:py:meth:`blit`
:py:meth:`threshold`
"""
newCanvas = cv.CreateImage((self.width,self.height), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
newBG = cv.RGB(bg_color[0],bg_color[1],bg_color[2])
cv.AddS(newCanvas,newBG,newCanvas)
if( mask.width != self.width or mask.height != self.height ):
logger.warning("Image.applyBinaryMask: your mask and image don't match sizes, if the mask doesn't fit, you can't apply it! Try using the scale function. ")
return None
cv.Copy(self.getBitmap(),newCanvas,mask.getBitmap());
return Image(newCanvas,colorSpace=self._colorSpace);
def createAlphaMask(self, hue=60, hue_lb=None,hue_ub=None):
"""
**SUMMARY**
Generate a grayscale or binary mask image based either on a hue or an RGB triplet that can be used
like an alpha channel. In the resulting mask, the hue/rgb_color will be treated as transparent (black).
When a hue is used the mask is treated like an 8bit alpha channel.
When an RGB triplet is used the result is a binary mask.
rgb_thresh is a distance measure between a given a pixel and the mask value that we will
add to the mask. For example, if rgb_color=(0,255,0) and rgb_thresh=5 then any pixel
winthin five color values of the rgb_color will be added to the mask (e.g. (0,250,0),(5,255,0)....)
Invert flips the mask values.
**PARAMETERS**
* *hue* - a hue used to generate the alpha mask.
* *hue_lb* - the upper value of a range of hue values to use.
* *hue_ub* - the lower value of a range of hue values to use.
**RETURNS**
A grayscale alpha mask as a SimpleCV Image.
>>> img = Image("lenna")
>>> mask = img.createAlphaMask(hue_lb=50,hue_ub=70)
>>> mask.show()
**SEE ALSO**
:py:meth:`createBinaryMask`
:py:meth:`createAlphaMask`
:py:meth:`applyBinaryMask`
:py:meth:`blit`
:py:meth:`threshold`
"""
if( hue<0 or hue > 180 ):
logger.warning("Invalid hue color, valid hue range is 0 to 180.")
if( self._colorSpace != ColorSpace.HSV ):
hsv = self.toHSV()
else:
hsv = self
h = hsv.getEmpty(1)
s = hsv.getEmpty(1)
retVal = hsv.getEmpty(1)
mask = hsv.getEmpty(1)
cv.Split(hsv.getBitmap(),h,None,s,None)
hlut = np.zeros((256,1),dtype=uint8) #thankfully we're not doing a LUT on saturation
if(hue_lb is not None and hue_ub is not None):
hlut[hue_lb:hue_ub]=255
else:
hlut[hue] = 255
cv.LUT(h,mask,cv.fromarray(hlut))
cv.Copy(s,retVal,mask) #we'll save memory using hue
return Image(retVal)
def applyPixelFunction(self, theFunc):
"""
**SUMMARY**
apply a function to every pixel and return the result
The function must be of the form int (r,g,b)=func((r,g,b))
**PARAMETERS**
* *theFunc* - a function pointer to a function of the form (r,g.b) = theFunc((r,g,b))
**RETURNS**
A simpleCV image after mapping the function to the image.
**EXAMPLE**
>>> def derp(pixels):
>>> return (int(b*.2),int(r*.3),int(g*.5))
>>>
>>> img = Image("lenna")
>>> img2 = img.applyPixelFunction(derp)
"""
#there should be a way to do this faster using numpy vectorize
#but I can get vectorize to work with the three channels together... have to split them
#TODO: benchmark this against vectorize
pixels = np.array(self.getNumpy()).reshape(-1,3).tolist()
result = np.array(map(theFunc,pixels),dtype=uint8).reshape(self.width,self.height,3)
return Image(result)
def integralImage(self,tilted=False):
"""
**SUMMARY**
Calculate the integral image and return it as a numpy array.
The integral image gives the sum of all of the pixels above and to the
right of a given pixel location. It is useful for computing Haar cascades.
The return type is a numpy array the same size of the image. The integral
image requires 32Bit values which are not easily supported by the SimpleCV
Image class.
**PARAMETERS**
* *tilted* - if tilted is true we tilt the image 45 degrees and then calculate the results.
**RETURNS**
A numpy array of the values.
**EXAMPLE**
>>> img = Image("logo")
>>> derp = img.integralImage()
**SEE ALSO**
http://en.wikipedia.org/wiki/Summed_area_table
"""
if(tilted):
img2 = cv.CreateImage((self.width+1, self.height+1), cv.IPL_DEPTH_32F, 1)
img3 = cv.CreateImage((self.width+1, self.height+1), cv.IPL_DEPTH_32F, 1)
cv.Integral(self._getGrayscaleBitmap(),img3,None,img2)
else:
img2 = cv.CreateImage((self.width+1, self.height+1), cv.IPL_DEPTH_32F, 1)
cv.Integral(self._getGrayscaleBitmap(),img2)
return np.array(cv.GetMat(img2))
def convolve(self,kernel = [[1,0,0],[0,1,0],[0,0,1]],center=None):
"""
**SUMMARY**
Convolution performs a shape change on an image. It is similiar to
something like a dilate. You pass it a kernel in the form of a list, np.array, or cvMat
**PARAMETERS**
* *kernel* - The convolution kernel. As a cvArray, cvMat, or Numpy Array.
* *center* - If true we use the center of the kernel.
**RETURNS**
The image after we apply the convolution.
**EXAMPLE**
>>> img = Image("sampleimages/simplecv.png")
>>> kernel = [[1,0,0],[0,1,0],[0,0,1]]
>>> conv = img.convolve()
**SEE ALSO**
http://en.wikipedia.org/wiki/Convolution
"""
if(isinstance(kernel, list)):
kernel = np.array(kernel)
if(type(kernel)==np.ndarray):
sz = kernel.shape
kernel = kernel.astype(np.float32)
myKernel = cv.CreateMat(sz[0], sz[1], cv.CV_32FC1)
cv.SetData(myKernel, kernel.tostring(), kernel.dtype.itemsize * kernel.shape[1])
elif(type(kernel)==cv.mat):
myKernel = kernel
else:
logger.warning("Convolution uses numpy arrays or cv.mat type.")
return None
retVal = self.getEmpty(3)
if(center is None):
cv.Filter2D(self.getBitmap(),retVal,myKernel)
else:
cv.Filter2D(self.getBitmap(),retVal,myKernel,center)
return Image(retVal)
def findTemplate(self, template_image = None, threshold = 5, method = "SQR_DIFF_NORM", grayscale=True, rawmatches = False):
"""
**SUMMARY**
This function searches an image for a template image. The template
image is a smaller image that is searched for in the bigger image.
This is a basic pattern finder in an image. This uses the standard
OpenCV template (pattern) matching and cannot handle scaling or rotation
Template matching returns a match score for every pixel in the image.
Often pixels that are near to each other and a close match to the template
are returned as a match. If the threshold is set too low expect to get
a huge number of values. The threshold parameter is in terms of the
number of standard deviations from the mean match value you are looking
For example, matches that are above three standard deviations will return
0.1% of the pixels. In a 800x600 image this means there will be
800*600*0.001 = 480 matches.
This method returns the locations of wherever it finds a match above a
threshold. Because of how template matching works, very often multiple
instances of the template overlap significantly. The best approach is to
find the centroid of all of these values. We suggest using an iterative
k-means approach to find the centroids.
**PARAMETERS**
* *template_image* - The template image.
* *threshold* - Int
* *method* -
* SQR_DIFF_NORM - Normalized square difference
* SQR_DIFF - Square difference
* CCOEFF -
* CCOEFF_NORM -
* CCORR - Cross correlation
* CCORR_NORM - Normalize cross correlation
* *grayscale* - Boolean - If false, template Match is found using BGR image.
**EXAMPLE**
>>> image = Image("/path/to/img.png")
>>> pattern_image = image.crop(100,100,100,100)
>>> found_patterns = image.findTemplate(pattern_image)
>>> found_patterns.draw()
>>> image.show()
**RETURNS**
This method returns a FeatureSet of TemplateMatch objects.
"""
if(template_image == None):
logger.info( "Need image for matching")
return
if(template_image.width > self.width):
#logger.info( "Image too wide")
return
if(template_image.height > self.height):
logger.info("Image too tall")
return
check = 0; # if check = 0 we want maximal value, otherwise minimal
if(method is None or method == "" or method == "SQR_DIFF_NORM"):#minimal
method = cv.CV_TM_SQDIFF_NORMED
check = 1;
elif(method == "SQR_DIFF"): #minimal
method = cv.CV_TM_SQDIFF
check = 1
elif(method == "CCOEFF"): #maximal
method = cv.CV_TM_CCOEFF
elif(method == "CCOEFF_NORM"): #maximal
method = cv.CV_TM_CCOEFF_NORMED
elif(method == "CCORR"): #maximal
method = cv.CV_TM_CCORR
elif(method == "CCORR_NORM"): #maximal
method = cv.CV_TM_CCORR_NORMED
else:
logger.warning("ooops.. I don't know what template matching method you are looking for.")
return None
#create new image for template matching computation
matches = cv.CreateMat( (self.height - template_image.height + 1),
(self.width - template_image.width + 1),
cv.CV_32FC1)
#choose template matching method to be used
if grayscale:
cv.MatchTemplate( self._getGrayscaleBitmap(), template_image._getGrayscaleBitmap(), matches, method )
else:
cv.MatchTemplate( self.getBitmap(), template_image.getBitmap(), matches, method )
mean = np.mean(matches)
sd = np.std(matches)
if(check > 0):
compute = np.where((matches < mean-threshold*sd) )
else:
compute = np.where((matches > mean+threshold*sd) )
mapped = map(tuple, np.column_stack(compute))
fs = FeatureSet()
for location in mapped:
fs.append(TemplateMatch(self, template_image, (location[1],location[0]), matches[location[0], location[1]]))
if (rawmatches):
return fs
#cluster overlapping template matches
finalfs = FeatureSet()
if( len(fs) > 0 ):
finalfs.append(fs[0])
for f in fs:
match = False
for f2 in finalfs:
if( f2._templateOverlaps(f) ): #if they overlap
f2.consume(f) #merge them
match = True
break
if( not match ):
finalfs.append(f)
for f in finalfs: #rescale the resulting clusters to fit the template size
f.rescale(template_image.width,template_image.height)
fs = finalfs
return fs
def findTemplateOnce(self, template_image = None, threshold = 0.2, method = "SQR_DIFF_NORM", grayscale=True):
"""
**SUMMARY**
This function searches an image for a single template image match.The template
image is a smaller image that is searched for in the bigger image.
This is a basic pattern finder in an image. This uses the standard
OpenCV template (pattern) matching and cannot handle scaling or rotation
This method returns the single best match if and only if that
match less than the threshold (greater than in the case of
some methods).
**PARAMETERS**
* *template_image* - The template image.
* *threshold* - Int
* *method* -
* SQR_DIFF_NORM - Normalized square difference
* SQR_DIFF - Square difference
* CCOEFF -
* CCOEFF_NORM -
* CCORR - Cross correlation
* CCORR_NORM - Normalize cross correlation
* *grayscale* - Boolean - If false, template Match is found using BGR image.
**EXAMPLE**
>>> image = Image("/path/to/img.png")
>>> pattern_image = image.crop(100,100,100,100)
>>> found_patterns = image.findTemplateOnce(pattern_image)
>>> found_patterns.draw()
>>> image.show()
**RETURNS**
This method returns a FeatureSet of TemplateMatch objects.
"""
if(template_image == None):
logger.info( "Need image for template matching.")
return
if(template_image.width > self.width):
logger.info( "Template image is too wide for the given image.")
return
if(template_image.height > self.height):
logger.info("Template image too tall for the given image.")
return
check = 0; # if check = 0 we want maximal value, otherwise minimal
if(method is None or method == "" or method == "SQR_DIFF_NORM"):#minimal
method = cv.CV_TM_SQDIFF_NORMED
check = 1;
elif(method == "SQR_DIFF"): #minimal
method = cv.CV_TM_SQDIFF
check = 1
elif(method == "CCOEFF"): #maximal
method = cv.CV_TM_CCOEFF
elif(method == "CCOEFF_NORM"): #maximal
method = cv.CV_TM_CCOEFF_NORMED
elif(method == "CCORR"): #maximal
method = cv.CV_TM_CCORR
elif(method == "CCORR_NORM"): #maximal
method = cv.CV_TM_CCORR_NORMED
else:
logger.warning("ooops.. I don't know what template matching method you are looking for.")
return None
#create new image for template matching computation
matches = cv.CreateMat( (self.height - template_image.height + 1),
(self.width - template_image.width + 1),
cv.CV_32FC1)
#choose template matching method to be used
if grayscale:
cv.MatchTemplate( self._getGrayscaleBitmap(), template_image._getGrayscaleBitmap(), matches, method )
else:
cv.MatchTemplate( self.getBitmap(), template_image.getBitmap(), matches, method )
mean = np.mean(matches)
sd = np.std(matches)
if(check > 0):
if( np.min(matches) <= threshold ):
compute = np.where( matches == np.min(matches) )
else:
return []
else:
if( np.max(matches) >= threshold ):
compute = np.where( matches == np.max(matches) )
else:
return []
mapped = map(tuple, np.column_stack(compute))
fs = FeatureSet()
for location in mapped:
fs.append(TemplateMatch(self, template_image, (location[1],location[0]), matches[location[0], location[1]]))
return fs
def readText(self):
"""
**SUMMARY**
This function will return any text it can find using OCR on the
image.
Please note that it does not handle rotation well, so if you need
it in your application try to rotate and/or crop the area so that
the text would be the same way a document is read
**RETURNS**
A String
**EXAMPLE**
>>> img = Imgae("somethingwithtext.png")
>>> text = img.readText()
>>> print text
**NOTE**
If you're having run-time problems I feel bad for your son,
I've got 99 problems but dependencies ain't one:
http://code.google.com/p/tesseract-ocr/
http://code.google.com/p/python-tesseract/
"""
if(not OCR_ENABLED):
return "Please install the correct OCR library required - http://code.google.com/p/tesseract-ocr/ http://code.google.com/p/python-tesseract/"
api = tesseract.TessBaseAPI()
api.SetOutputName("outputName")
api.Init(".","eng",tesseract.OEM_DEFAULT)
api.SetPageSegMode(tesseract.PSM_AUTO)
jpgdata = StringIO()
self.getPIL().save(jpgdata, "jpeg")
jpgdata.seek(0)
stringbuffer = jpgdata.read()
result = tesseract.ProcessPagesBuffer(stringbuffer,len(stringbuffer),api)
return result
def findCircle(self,canny=100,thresh=350,distance=-1):
"""
**SUMMARY**
Perform the Hough Circle transform to extract _perfect_ circles from the image
canny - the upper bound on a canny edge detector used to find circle edges.
**PARAMETERS**
* *thresh* - the threshold at which to count a circle. Small parts of a circle get
added to the accumulator array used internally to the array. This value is the
minimum threshold. Lower thresholds give more circles, higher thresholds give fewer circles.
.. ::Warning:
If this threshold is too high, and no circles are found the underlying OpenCV
routine fails and causes a segfault.
* *distance* - the minimum distance between each successive circle in pixels. 10 is a good
starting value.
**RETURNS**
A feature set of Circle objects.
**EXAMPLE**
>>> img = Image("lenna")
>>> circs = img.findCircles()
>>> for c in circs:
>>> print c
"""
storage = cv.CreateMat(self.width, 1, cv.CV_32FC3)
#a distnace metric for how apart our circles should be - this is sa good bench mark
if(distance < 0 ):
distance = 1 + max(self.width,self.height)/50
cv.HoughCircles(self._getGrayscaleBitmap(),storage, cv.CV_HOUGH_GRADIENT, 2, distance,canny,thresh)
if storage.rows == 0:
return None
circs = np.asarray(storage)
sz = circs.shape
circleFS = FeatureSet()
for i in range(sz[0]):
circleFS.append(Circle(self,int(circs[i][0][0]),int(circs[i][0][1]),int(circs[i][0][2])))
return circleFS
def whiteBalance(self,method="Simple"):
"""
**SUMMARY**
Attempts to perform automatic white balancing.
Gray World see: http://scien.stanford.edu/pages/labsite/2000/psych221/projects/00/trek/GWimages.html
Robust AWB: http://scien.stanford.edu/pages/labsite/2010/psych221/projects/2010/JasonSu/robustawb.html
http://scien.stanford.edu/pages/labsite/2010/psych221/projects/2010/JasonSu/Papers/Robust%20Automatic%20White%20Balance%20Algorithm%20using%20Gray%20Color%20Points%20in%20Images.pdf
Simple AWB:
http://www.ipol.im/pub/algo/lmps_simplest_color_balance/
http://scien.stanford.edu/pages/labsite/2010/psych221/projects/2010/JasonSu/simplestcb.html
**PARAMETERS**
* *method* - The method to use for white balancing. Can be one of the following:
* `Gray World <http://scien.stanford.edu/pages/labsite/2000/psych221/projects/00/trek/GWimages.html>`_
* `Robust AWB <http://scien.stanford.edu/pages/labsite/2010/psych221/projects/2010/JasonSu/robustawb.html>`_
* `Simple AWB <http://www.ipol.im/pub/algo/lmps_simplest_color_balance/>`_
**RETURNS**
A SimpleCV Image.
**EXAMPLE**
>>> img = Image("lenna")
>>> img2 = img.whiteBalance()
"""
img = self
if(method=="GrayWorld"):
avg = cv.Avg(img.getBitmap());
bf = float(avg[0])
gf = float(avg[1])
rf = float(avg[2])
af = (bf+gf+rf)/3.0
if( bf == 0.00 ):
b_factor = 1.00
else:
b_factor = af/bf
if( gf == 0.00 ):
g_factor = 1.00
else:
g_factor = af/gf
if( rf == 0.00 ):
r_factor = 1.00
else:
r_factor = af/rf
b = img.getEmpty(1)
g = img.getEmpty(1)
r = img.getEmpty(1)
cv.Split(self.getBitmap(), b, g, r, None)
bfloat = cv.CreateImage((img.width, img.height), cv.IPL_DEPTH_32F, 1)
gfloat = cv.CreateImage((img.width, img.height), cv.IPL_DEPTH_32F, 1)
rfloat = cv.CreateImage((img.width, img.height), cv.IPL_DEPTH_32F, 1)
cv.ConvertScale(b,bfloat,b_factor)
cv.ConvertScale(g,gfloat,g_factor)
cv.ConvertScale(r,rfloat,r_factor)
(minB,maxB,minBLoc,maxBLoc) = cv.MinMaxLoc(bfloat)
(minG,maxG,minGLoc,maxGLoc) = cv.MinMaxLoc(gfloat)
(minR,maxR,minRLoc,maxRLoc) = cv.MinMaxLoc(rfloat)
scale = max([maxR,maxG,maxB])
sfactor = 1.00
if(scale > 255 ):
sfactor = 255.00/float(scale)
cv.ConvertScale(bfloat,b,sfactor);
cv.ConvertScale(gfloat,g,sfactor);
cv.ConvertScale(rfloat,r,sfactor);
retVal = img.getEmpty()
cv.Merge(b,g,r,None,retVal);
retVal = Image(retVal)
elif( method == "Simple" ):
thresh = 0.003
sz = img.width*img.height
tempMat = img.getNumpy()
bcf = sss.cumfreq(tempMat[:,:,0], numbins=256)
bcf = bcf[0] # get our cumulative histogram of values for this color
blb = -1 #our upper bound
bub = 256 # our lower bound
lower_thresh = 0.00
upper_thresh = 0.00
#now find the upper and lower thresh% of our values live
while( lower_thresh < thresh ):
blb = blb+1
lower_thresh = bcf[blb]/sz
while( upper_thresh < thresh ):
bub = bub-1
upper_thresh = (sz-bcf[bub])/sz
gcf = sss.cumfreq(tempMat[:,:,1], numbins=256)
gcf = gcf[0]
glb = -1 #our upper bound
gub = 256 # our lower bound
lower_thresh = 0.00
upper_thresh = 0.00
#now find the upper and lower thresh% of our values live
while( lower_thresh < thresh ):
glb = glb+1
lower_thresh = gcf[glb]/sz
while( upper_thresh < thresh ):
gub = gub-1
upper_thresh = (sz-gcf[gub])/sz
rcf = sss.cumfreq(tempMat[:,:,2], numbins=256)
rcf = rcf[0]
rlb = -1 #our upper bound
rub = 256 # our lower bound
lower_thresh = 0.00
upper_thresh = 0.00
#now find the upper and lower thresh% of our values live
while( lower_thresh < thresh ):
rlb = rlb+1
lower_thresh = rcf[rlb]/sz
while( upper_thresh < thresh ):
rub = rub-1
upper_thresh = (sz-rcf[rub])/sz
#now we create the scale factors for the remaining pixels
rlbf = float(rlb)
rubf = float(rub)
glbf = float(glb)
gubf = float(gub)
blbf = float(blb)
bubf = float(bub)
rLUT = np.ones((256,1),dtype=uint8)
gLUT = np.ones((256,1),dtype=uint8)
bLUT = np.ones((256,1),dtype=uint8)
for i in range(256):
if(i <= rlb):
rLUT[i][0] = 0
elif( i >= rub):
rLUT[i][0] = 255
else:
rf = ((float(i)-rlbf)*255.00/(rubf-rlbf))
rLUT[i][0] = int(rf)
if( i <= glb):
gLUT[i][0] = 0
elif( i >= gub):
gLUT[i][0] = 255
else:
gf = ((float(i)-glbf)*255.00/(gubf-glbf))
gLUT[i][0] = int(gf)
if( i <= blb):
bLUT[i][0] = 0
elif( i >= bub):
bLUT[i][0] = 255
else:
bf = ((float(i)-blbf)*255.00/(bubf-blbf))
bLUT[i][0] = int(bf)
retVal = img.applyLUT(bLUT,rLUT,gLUT)
return retVal
def applyLUT(self,rLUT=None,bLUT=None,gLUT=None):
"""
**SUMMARY**
Apply LUT allows you to apply a LUT (look up table) to the pixels in a image. Each LUT is just
an array where each index in the array points to its value in the result image. For example
rLUT[0]=255 would change all pixels where the red channel is zero to the value 255.
**PARAMETERS**
* *rLUT* - a tuple or np.array of size (256x1) with dtype=uint8.
* *gLUT* - a tuple or np.array of size (256x1) with dtype=uint8.
* *bLUT* - a tuple or np.array of size (256x1) with dtype=uint8.
.. warning::
The dtype is very important. Will throw the following error without it:
error: dst.size() == src.size() && dst.type() == CV_MAKETYPE(lut.depth(), src.channels())
**RETURNS**
The SimpleCV image remapped using the LUT.
**EXAMPLE**
This example saturates the red channel:
>>> rlut = np.ones((256,1),dtype=uint8)*255
>>> img=img.applyLUT(rLUT=rlut)
NOTE:
-==== BUG NOTE ====-
This method seems to error on the LUT map for some versions of OpenCV.
I am trying to figure out why. -KAS
"""
r = self.getEmpty(1)
g = self.getEmpty(1)
b = self.getEmpty(1)
cv.Split(self.getBitmap(),b,g,r,None);
if(rLUT is not None):
cv.LUT(r,r,cv.fromarray(rLUT))
if(gLUT is not None):
cv.LUT(g,g,cv.fromarray(gLUT))
if(bLUT is not None):
cv.LUT(b,b,cv.fromarray(bLUT))
temp = self.getEmpty()
cv.Merge(b,g,r,None,temp)
return Image(temp)
def _getRawKeypoints(self,thresh=500.00,flavor="SURF", highQuality=1, forceReset=False):
"""
.. _getRawKeypoints:
This method finds keypoints in an image and returns them as the raw keypoints
and keypoint descriptors. When this method is called it caches a the features
and keypoints locally for quick and easy access.
Parameters:
min_quality - The minimum quality metric for SURF descriptors. Good values
range between about 300.00 and 600.00
flavor - a string indicating the method to use to extract features.
A good primer on how feature/keypoint extractiors can be found here:
http://en.wikipedia.org/wiki/Feature_detection_(computer_vision)
http://www.cg.tu-berlin.de/fileadmin/fg144/Courses/07WS/compPhoto/Feature_Detection.pdf
"SURF" - extract the SURF features and descriptors. If you don't know
what to use, use this.
See: http://en.wikipedia.org/wiki/SURF
"STAR" - The STAR feature extraction algorithm
See: http://pr.willowgarage.com/wiki/Star_Detector
"FAST" - The FAST keypoint extraction algorithm
See: http://en.wikipedia.org/wiki/Corner_detection#AST_based_feature_detectors
All the flavour specified below are for OpenCV versions >= 2.4.0 :
"MSER" - Maximally Stable Extremal Regions algorithm
See: http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions
"Dense" - Dense Scale Invariant Feature Transform.
See: http://www.vlfeat.org/api/dsift.html
"ORB" - The Oriented FAST and Rotated BRIEF
See: http://www.willowgarage.com/sites/default/files/orb_final.pdf
"SIFT" - Scale-invariant feature transform
See: http://en.wikipedia.org/wiki/Scale-invariant_feature_transform
"BRISK" - Binary Robust Invariant Scalable Keypoints
See: http://www.asl.ethz.ch/people/lestefan/personal/BRISK
"FREAK" - Fast Retina Keypoints
See: http://www.ivpe.com/freak.htm
Note: It's a keypoint descriptor and not a KeyPoint detector. SIFT KeyPoints
are detected and FERAK is used to extract keypoint descriptor.
highQuality - The SURF descriptor comes in two forms, a vector of 64 descriptor
values and a vector of 128 descriptor values. The latter are "high"
quality descriptors.
forceReset - If keypoints have already been calculated for this image those
keypoints are returned veresus recalculating the values. If
force reset is True we always recalculate the values, otherwise
we will used the cached copies.
Returns:
A tuple of keypoint objects and optionally a numpy array of the descriptors.
Example:
>>> img = Image("aerospace.jpg")
>>> kp,d = img._getRawKeypoints()
Notes:
If you would prefer to work with the raw keypoints and descriptors each image keeps
a local cache of the raw values. These are named:
self._mKeyPoints # A tuple of keypoint objects
See: http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_feature_detectors.html#keypoint-keypoint
self._mKPDescriptors # The descriptor as a floating point numpy array
self._mKPFlavor = "NONE" # The flavor of the keypoints as a string.
See Also:
ImageClass._getRawKeypoints(self,thresh=500.00,forceReset=False,flavor="SURF",highQuality=1)
ImageClass._getFLANNMatches(self,sd,td)
ImageClass.findKeypointMatch(self,template,quality=500.00,minDist=0.2,minMatch=0.4)
ImageClass.drawKeypointMatches(self,template,thresh=500.00,minDist=0.15,width=1)
"""
try:
import cv2
ver = cv2.__version__
new_version = 0
#For OpenCV versions till 2.4.0, cv2.__versions__ are of the form "$Rev: 4557 $"
if not ver.startswith('$Rev:'):
if int(ver.replace('.','0'))>=20400:
new_version = 1
except:
warnings.warn("Can't run Keypoints without OpenCV >= 2.3.0")
return (None, None)
if( forceReset ):
self._mKeyPoints = None
self._mKPDescriptors = None
_detectors = ["SIFT", "SURF", "FAST", "STAR", "FREAK", "ORB", "BRISK", "MSER", "Dense"]
_descriptors = ["SIFT", "SURF", "ORB", "FREAK", "BRISK"]
if flavor not in _detectors:
warnings.warn("Invalid choice of keypoint detector.")
return (None, None)
if self._mKeyPoints != None and self._mKPFlavor == flavor:
return (self._mKeyPoints, self._mKPDescriptors)
if hasattr(cv2, flavor):
if flavor == "SURF":
# cv2.SURF(hessianThreshold, nOctaves, nOctaveLayers, extended, upright)
detector = cv2.SURF(thresh, 4, 2, highQuality, 1)
if new_version == 0:
self._mKeyPoints, self._mKPDescriptors = detector.detect(self.getGrayNumpy(), None, False)
else:
self._mKeyPoints, self._mKPDescriptors = detector.detectAndCompute(self.getGrayNumpy(), None, False)
if len(self._mKeyPoints) == 0:
return (None, None)
if highQuality == 1:
self._mKPDescriptors = self._mKPDescriptors.reshape((-1, 128))
else:
self._mKPDescriptors = self._mKPDescriptors.reshape((-1, 64))
elif flavor in _descriptors:
detector = getattr(cv2, flavor)()
self._mKeyPoints, self._mKPDescriptors = detector.detectAndCompute(self.getGrayNumpy(), None, False)
elif flavor == "MSER":
if hasattr(cv2, "FeatureDetector_create"):
detector = cv2.FeatureDetector_create("MSER")
self._mKeyPoints = detector.detect(self.getGrayNumpy())
elif flavor == "STAR":
detector = cv2.StarDetector()
self._mKeyPoints = detector.detect(self.getGrayNumpy())
elif flavor == "FAST":
if not hasattr(cv2, "FastFeatureDetector"):
warnings.warn("You need OpenCV >= 2.4.0 to support FAST")
return None, None
detector = cv2.FastFeatureDetector(int(thresh), True)
self._mKeyPoints = detector.detect(self.getGrayNumpy(), None)
elif hasattr(cv2, "FeatureDetector_create"):
if flavor in _descriptors:
extractor = cv2.DescriptorExtractor_create(flavor)
if flavor == "FREAK":
if new_version == 0:
warnings.warn("You need OpenCV >= 2.4.3 to support FAST")
flavor = "SIFT"
detector = cv2.FeatureDetector_create(flavor)
self._mKeyPoints = detector.detect(self.getGrayNumpy())
self._mKeyPoints, self._mKPDescriptors = extractor.compute(self.getGrayNumpy(), self._mKeyPoints)
else:
detector = cv2.FeatureDetector_create(flavor)
self._mKeyPoints = detector.detect(self.getGrayNumpy())
else:
warnings.warn("SimpleCV can't seem to find appropriate function with your OpenCV version.")
return (None, None)
return (self._mKeyPoints, self._mKPDescriptors)
def _getFLANNMatches(self,sd,td):
"""
Summary:
This method does a fast local approximate nearest neighbors (FLANN) calculation between two sets
of feature vectors. The result are two numpy arrays the first one is a list of indexes of the
matches and the second one is the match distance value. For the match indices or idx, the index
values correspond to the values of td, and the value in the array is the index in td. I.
I.e. j = idx[i] is where td[i] matches sd[j].
The second numpy array, at the index i is the match distance between td[i] and sd[j].
Lower distances mean better matches.
Parameters:
sd - A numpy array of feature vectors of any size.
td - A numpy array of feature vectors of any size, this vector is used for indexing
and the result arrays will have a length matching this vector.
Returns:
Two numpy arrays, the first one, idx, is the idx of the matches of the vector td with sd.
The second one, dist, is the distance value for the closest match.
Example:
>>> kpt,td = img1._getRawKeypoints() # t is template
>>> kps,sd = img2._getRawKeypoints() # s is source
>>> idx,dist = img1._getFLANNMatches(sd,td)
>>> j = idx[42]
>>> print kps[j] # matches kp 42
>>> print dist[i] # the match quality.
Notes:
If you would prefer to work with the raw keypoints and descriptors each image keeps
a local cache of the raw values. These are named:
self._mKeyPoints # A tuple of keypoint objects
See: http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_feature_detectors.html#keypoint-keypoint
self._mKPDescriptors # The descriptor as a floating point numpy array
self._mKPFlavor = "NONE" # The flavor of the keypoints as a string.
See:
ImageClass._getRawKeypoints(self,thresh=500.00,forceReset=False,flavor="SURF",highQuality=1)
ImageClass._getFLANNMatches(self,sd,td)
ImageClass.drawKeypointMatches(self,template,thresh=500.00,minDist=0.15,width=1)
ImageClass.findKeypoints(self,min_quality=300.00,flavor="SURF",highQuality=False )
ImageClass.findKeypointMatch(self,template,quality=500.00,minDist=0.2,minMatch=0.4)
"""
try:
import cv2
except:
logger.warning("Can't run FLANN Matches without OpenCV >= 2.3.0")
return
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 4)
flann = cv2.flann_Index(sd, flann_params)
idx, dist = flann.knnSearch(td, 1, params = {}) # bug: need to provide empty dict
del flann
return idx,dist
def drawKeypointMatches(self,template,thresh=500.00,minDist=0.15,width=1):
"""
**SUMMARY**
Draw keypoints draws a side by side representation of two images, calculates
keypoints for both images, determines the keypoint correspondences, and then draws
the correspondences. This method is helpful for debugging keypoint calculations
and also looks really cool :) . The parameters mirror the parameters used
for findKeypointMatches to assist with debugging
**PARAMETERS**
* *template* - A template image.
* *quality* - The feature quality metric. This can be any value between about 300 and 500. Higher
values should return fewer, but higher quality features.
* *minDist* - The value below which the feature correspondence is considered a match. This
is the distance between two feature vectors. Good values are between 0.05 and 0.3
* *width* - The width of the drawn line.
**RETURNS**
A side by side image of the template and source image with each feature correspondence
draw in a different color.
**EXAMPLE**
>>> img = cam.getImage()
>>> template = Image("myTemplate.png")
>>> result = img.drawKeypointMatches(self,template,300.00,0.4):
**NOTES**
If you would prefer to work with the raw keypoints and descriptors each image keeps
a local cache of the raw values. These are named:
self._mKeyPoints # A tuple of keypoint objects
See: http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_feature_detectors.html#keypoint-keypoint
self._mKPDescriptors # The descriptor as a floating point numpy array
self._mKPFlavor = "NONE" # The flavor of the keypoints as a string.
**SEE ALSO**
:py:meth:`drawKeypointMatches`
:py:meth:`findKeypoints`
:py:meth:`findKeypointMatch`
"""
if template == None:
return None
resultImg = template.sideBySide(self,scale=False)
hdif = (self.height-template.height)/2
skp,sd = self._getRawKeypoints(thresh)
tkp,td = template._getRawKeypoints(thresh)
if( td == None or sd == None ):
logger.warning("We didn't get any descriptors. Image might be too uniform or blurry." )
return resultImg
template_points = float(td.shape[0])
sample_points = float(sd.shape[0])
magic_ratio = 1.00
if( sample_points > template_points ):
magic_ratio = float(sd.shape[0])/float(td.shape[0])
idx,dist = self._getFLANNMatches(sd,td) # match our keypoint descriptors
p = dist[:,0]
result = p*magic_ratio < minDist #, = np.where( p*magic_ratio < minDist )
for i in range(0,len(idx)):
if( result[i] ):
pt_a = (tkp[i].pt[1], tkp[i].pt[0]+hdif)
pt_b = (skp[idx[i]].pt[1]+template.width,skp[idx[i]].pt[0])
resultImg.drawLine(pt_a,pt_b,color=Color.getRandom(),thickness=width)
return resultImg
def findKeypointMatch(self,template,quality=500.00,minDist=0.2,minMatch=0.4):
"""
**SUMMARY**
findKeypointMatch allows you to match a template image with another image using
SURF keypoints. The method extracts keypoints from each image, uses the Fast Local
Approximate Nearest Neighbors algorithm to find correspondences between the feature
points, filters the correspondences based on quality, and then, attempts to calculate
a homography between the two images. This homography allows us to draw a matching
bounding box in the source image that corresponds to the template. This method allows
you to perform matchs the ordinarily fail when using the findTemplate method.
This method should be able to handle a reasonable changes in camera orientation and
illumination. Using a template that is close to the target image will yield much
better results.
.. Warning::
This method is only capable of finding one instance of the template in an image.
If more than one instance is visible the homography calculation and the method will
fail.
**PARAMETERS**
* *template* - A template image.
* *quality* - The feature quality metric. This can be any value between about 300 and 500. Higher
values should return fewer, but higher quality features.
* *minDist* - The value below which the feature correspondence is considered a match. This
is the distance between two feature vectors. Good values are between 0.05 and 0.3
* *minMatch* - The percentage of features which must have matches to proceed with homography calculation.
A value of 0.4 means 40% of features must match. Higher values mean better matches
are used. Good values are between about 0.3 and 0.7
**RETURNS**
If a homography (match) is found this method returns a feature set with a single
KeypointMatch feature. If no match is found None is returned.
**EXAMPLE**
>>> template = Image("template.png")
>>> img = camera.getImage()
>>> fs = img.findKeypointMatch(template)
>>> if( fs is not None ):
>>> fs.draw()
>>> img.show()
**NOTES**
If you would prefer to work with the raw keypoints and descriptors each image keeps
a local cache of the raw values. These are named:
| self._mKeyPoints # A Tuple of keypoint objects
| self._mKPDescriptors # The descriptor as a floating point numpy array
| self._mKPFlavor = "NONE" # The flavor of the keypoints as a string.
| `See Documentation <http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_feature_detectors.html#keypoint-keypoint>`_
**SEE ALSO**
:py:meth:`_getRawKeypoints`
:py:meth:`_getFLANNMatches`
:py:meth:`drawKeypointMatches`
:py:meth:`findKeypoints`
"""
try:
import cv2
except:
warnings.warn("Can't Match Keypoints without OpenCV >= 2.3.0")
return
if template == None:
return None
fs = FeatureSet()
skp,sd = self._getRawKeypoints(quality)
tkp,td = template._getRawKeypoints(quality)
if( skp == None or tkp == None ):
warnings.warn("I didn't get any keypoints. Image might be too uniform or blurry." )
return None
template_points = float(td.shape[0])
sample_points = float(sd.shape[0])
magic_ratio = 1.00
if( sample_points > template_points ):
magic_ratio = float(sd.shape[0])/float(td.shape[0])
idx,dist = self._getFLANNMatches(sd,td) # match our keypoint descriptors
p = dist[:,0]
result = p*magic_ratio < minDist #, = np.where( p*magic_ratio < minDist )
pr = result.shape[0]/float(dist.shape[0])
if( pr > minMatch and len(result)>4 ): # if more than minMatch % matches we go ahead and get the data
lhs = []
rhs = []
for i in range(0,len(idx)):
if( result[i] ):
lhs.append((tkp[i].pt[1], tkp[i].pt[0]))
rhs.append((skp[idx[i]].pt[0], skp[idx[i]].pt[1]))
rhs_pt = np.array(rhs)
lhs_pt = np.array(lhs)
if( len(rhs_pt) < 16 or len(lhs_pt) < 16 ):
return None
homography = []
(homography,mask) = cv2.findHomography(lhs_pt,rhs_pt,cv2.RANSAC, ransacReprojThreshold=1.0 )
w = template.width
h = template.height
pts = np.array([[0,0],[0,h],[w,h],[w,0]], dtype="float32")
pPts = cv2.perspectiveTransform(np.array([pts]), homography)
pt0i = (pPts[0][0][1], pPts[0][0][0])
pt1i = (pPts[0][1][1], pPts[0][1][0])
pt2i = (pPts[0][2][1], pPts[0][2][0])
pt3i = (pPts[0][3][1], pPts[0][3][0])
#construct the feature set and return it.
fs = FeatureSet()
fs.append(KeypointMatch(self,template,(pt0i,pt1i,pt2i,pt3i),homography))
#the homography matrix is necessary for many purposes like image stitching.
#fs.append(homography) # No need to add homography as it is already being
#added in KeyPointMatch class.
return fs
else:
return None
def findKeypoints(self,min_quality=300.00,flavor="SURF",highQuality=False ):
"""
**SUMMARY**
This method finds keypoints in an image and returns them as a feature set.
Keypoints are unique regions in an image that demonstrate some degree of
invariance to changes in camera pose and illumination. They are helpful
for calculating homographies between camera views, object rotations, and
multiple view overlaps.
We support four keypoint detectors and only one form of keypoint descriptors.
Only the surf flavor of keypoint returns feature and descriptors at this time.
**PARAMETERS**
* *min_quality* - The minimum quality metric for SURF descriptors. Good values
range between about 300.00 and 600.00
* *flavor* - a string indicating the method to use to extract features.
A good primer on how feature/keypoint extractiors can be found in
`feature detection on wikipedia <http://en.wikipedia.org/wiki/Feature_detection_(computer_vision)>`_
and
`this tutorial. <http://www.cg.tu-berlin.de/fileadmin/fg144/Courses/07WS/compPhoto/Feature_Detection.pdf>`_
* "SURF" - extract the SURF features and descriptors. If you don't know
what to use, use this.
See: http://en.wikipedia.org/wiki/SURF
* "STAR" - The STAR feature extraction algorithm
See: http://pr.willowgarage.com/wiki/Star_Detector
* "FAST" - The FAST keypoint extraction algorithm
See: http://en.wikipedia.org/wiki/Corner_detection#AST_based_feature_detectors
All the flavour specified below are for OpenCV versions >= 2.4.0 :
* "MSER" - Maximally Stable Extremal Regions algorithm
See: http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions
* "Dense" -
* "ORB" - The Oriented FAST and Rotated BRIEF
See: http://www.willowgarage.com/sites/default/files/orb_final.pdf
* "SIFT" - Scale-invariant feature transform
See: http://en.wikipedia.org/wiki/Scale-invariant_feature_transform
* "BRISK" - Binary Robust Invariant Scalable Keypoints
See: http://www.asl.ethz.ch/people/lestefan/personal/BRISK
* "FREAK" - Fast Retina Keypoints
See: http://www.ivpe.com/freak.htm
Note: It's a keypoint descriptor and not a KeyPoint detector. SIFT KeyPoints
are detected and FERAK is used to extract keypoint descriptor.
* *highQuality* - The SURF descriptor comes in two forms, a vector of 64 descriptor
values and a vector of 128 descriptor values. The latter are "high"
quality descriptors.
**RETURNS**
A feature set of KeypointFeatures. These KeypointFeatures let's you draw each
feature, crop the features, get the feature descriptors, etc.
**EXAMPLE**
>>> img = Image("aerospace.jpg")
>>> fs = img.findKeypoints(flavor="SURF",min_quality=500,highQuality=True)
>>> fs = fs.sortArea()
>>> fs[-1].draw()
>>> img.draw()
**NOTES**
If you would prefer to work with the raw keypoints and descriptors each image keeps
a local cache of the raw values. These are named:
:py:meth:`_getRawKeypoints`
:py:meth:`_getFLANNMatches`
:py:meth:`drawKeypointMatches`
:py:meth:`findKeypoints`
"""
try:
import cv2
except:
logger.warning("Can't use Keypoints without OpenCV >= 2.3.0")
return None
fs = FeatureSet()
kp = []
d = []
if highQuality:
kp,d = self._getRawKeypoints(thresh=min_quality,forceReset=True,flavor=flavor,highQuality=1)
else:
kp,d = self._getRawKeypoints(thresh=min_quality,forceReset=True,flavor=flavor,highQuality=0)
if( flavor in ["ORB", "SIFT", "SURF", "BRISK", "FREAK"] and kp!=None and d !=None ):
for i in range(0,len(kp)):
fs.append(KeyPoint(self,kp[i],d[i],flavor))
elif(flavor in ["FAST", "STAR", "MSER", "Dense"] and kp!=None ):
for i in range(0,len(kp)):
fs.append(KeyPoint(self,kp[i],None,flavor))
else:
logger.warning("ImageClass.Keypoints: I don't know the method you want to use")
return None
return fs
def findMotion(self, previous_frame, window=11, method='BM', aggregate=True):
"""
**SUMMARY**
findMotion performs an optical flow calculation. This method attempts to find
motion between two subsequent frames of an image. You provide it
with the previous frame image and it returns a feature set of motion
fetures that are vectors in the direction of motion.
**PARAMETERS**
* *previous_frame* - The last frame as an Image.
* *window* - The block size for the algorithm. For the the HS and LK methods
this is the regular sample grid at which we return motion samples.
For the block matching method this is the matching window size.
* *method* - The algorithm to use as a string.
Your choices are:
* 'BM' - default block matching robust but slow - if you are unsure use this.
* 'LK' - `Lucas-Kanade method <http://en.wikipedia.org/wiki/Lucas%E2%80%93Kanade_method>`_
* 'HS' - `Horn-Schunck method <http://en.wikipedia.org/wiki/Horn%E2%80%93Schunck_method>`_
* *aggregate* - If aggregate is true, each of our motion features is the average of
motion around the sample grid defined by window. If aggregate is false
we just return the the value as sampled at the window grid interval. For
block matching this flag is ignored.
**RETURNS**
A featureset of motion objects.
**EXAMPLES**
>>> cam = Camera()
>>> img1 = cam.getImage()
>>> img2 = cam.getImage()
>>> motion = img2.findMotion(img1)
>>> motion.draw()
>>> img2.show()
**SEE ALSO**
:py:class:`Motion`
:py:class:`FeatureSet`
"""
try:
import cv2
ver = cv2.__version__
#For OpenCV versions till 2.4.0, cv2.__versions__ are of the form "$Rev: 4557 $"
if not ver.startswith('$Rev:') :
if int(ver.replace('.','0'))>=20400 :
FLAG_VER = 1
if (window > 9):
window = 9
else :
FLAG_VER = 0
except :
FLAG_VER = 0
if( self.width != previous_frame.width or self.height != previous_frame.height):
logger.warning("ImageClass.getMotion: To find motion the current and previous frames must match")
return None
fs = FeatureSet()
max_mag = 0.00
if( method == "LK" or method == "HS" ):
# create the result images.
xf = cv.CreateImage((self.width, self.height), cv.IPL_DEPTH_32F, 1)
yf = cv.CreateImage((self.width, self.height), cv.IPL_DEPTH_32F, 1)
win = (window,window)
if( method == "LK" ):
cv.CalcOpticalFlowLK(self._getGrayscaleBitmap(),previous_frame._getGrayscaleBitmap(),win,xf,yf)
else:
cv.CalcOpticalFlowHS(previous_frame._getGrayscaleBitmap(),self._getGrayscaleBitmap(),0,xf,yf,1.0,(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 10, 0.01))
w = math.floor((float(window))/2.0)
cx = ((self.width-window)/window)+1 #our sample rate
cy = ((self.height-window)/window)+1
vx = 0.00
vy = 0.00
for x in range(0,int(cx)): # go through our sample grid
for y in range(0,int(cy)):
xi = (x*window)+w # calculate the sample point
yi = (y*window)+w
if( aggregate ):
lowx = int(xi-w)
highx = int(xi+w)
lowy = int(yi-w)
highy = int(yi+w)
xderp = xf[lowy:highy,lowx:highx] # get the average x/y components in the output
yderp = yf[lowy:highy,lowx:highx]
vx = np.average(xderp)
vy = np.average(yderp)
else: # other wise just sample
vx = xf[yi,xi]
vy = yf[yi,xi]
mag = (vx*vx)+(vy*vy)
if(mag > max_mag): # calculate the max magnitude for normalizing our vectors
max_mag = mag
fs.append(Motion(self,xi,yi,vx,vy,window)) # add the sample to the feature set
elif( method == "BM"):
# In the interest of keep the parameter list short
# I am pegging these to the window size.
# For versions with OpenCV 2.4.0 and below.
if ( FLAG_VER==0):
block = (window,window) # block size
shift = (int(window*1.2),int(window*1.2)) # how far to shift the block
spread = (window*2,window*2) # the search windows.
wv = (self.width - block[0]) / shift[0] # the result image size
hv = (self.height - block[1]) / shift[1]
xf = cv.CreateMat(hv, wv, cv.CV_32FC1)
yf = cv.CreateMat(hv, wv, cv.CV_32FC1)
cv.CalcOpticalFlowBM(previous_frame._getGrayscaleBitmap(),self._getGrayscaleBitmap(),block,shift,spread,0,xf,yf)
#For versions with OpenCV 2.4.0 and above.
elif ( FLAG_VER==1) :
block = (window,window) # block size
shift = (int(window*0.2),int(window*0.2)) # how far to shift the block
spread = (window,window) # the search windows.
wv = self.width-block[0]+shift[0]
hv = self.height-block[1]+shift[1]
xf = cv.CreateImage((wv,hv), cv.IPL_DEPTH_32F, 1)
yf = cv.CreateImage((wv,hv), cv.IPL_DEPTH_32F, 1)
cv.CalcOpticalFlowBM(previous_frame._getGrayscaleBitmap(),self._getGrayscaleBitmap(),block,shift,spread,0,xf,yf)
for x in range(0,int(wv)): # go through the sample grid
for y in range(0,int(hv)):
xi = (shift[0]*(x))+block[0] #where on the input image the samples live
yi = (shift[1]*(y))+block[1]
vx = xf[y,x] # the result image values
vy = yf[y,x]
fs.append(Motion(self,xi,yi,vx,vy,window)) # add the feature
mag = (vx*vx)+(vy*vy) # same the magnitude
if(mag > max_mag):
max_mag = mag
else:
logger.warning("ImageClass.findMotion: I don't know what algorithm you want to use. Valid method choices are Block Matching -> \"BM\" Horn-Schunck -> \"HS\" and Lucas-Kanade->\"LK\" ")
return None
max_mag = math.sqrt(max_mag) # do the normalization
for f in fs:
f.normalizeTo(max_mag)
return fs
def _generatePalette(self,bins,hue, centroids = None):
"""
**SUMMARY**
This is the main entry point for palette generation. A palette, for our purposes,
is a list of the main colors in an image. Creating a palette with 10 bins, tries
to cluster the colors in rgb space into ten distinct groups. In hue space we only
look at the hue channel. All of the relevant palette data is cached in the image
class.
**PARAMETERS**
* *bins* - an integer number of bins into which to divide the colors in the image.
* *hue* - if hue is true we do only cluster on the image hue values.
* *centroids* - A list of tuples that are the initial k-means estimates. This is handy if you want consisten results from the palettize.
**RETURNS**
Nothing, but creates the image's cached values for:
self._mDoHuePalette
self._mPaletteBins
self._mPalette
self._mPaletteMembers
self._mPalettePercentages
**EXAMPLE**
>>> img._generatePalette(bins=42)
**NOTES**
The hue calculations should be siginificantly faster than the generic RGB calculation as
it works in a one dimensional space. Sometimes the underlying scipy method freaks out
about k-means initialization with the following warning:
UserWarning: One of the clusters is empty. Re-run kmean with a different initialization.
This shouldn't be a real problem.
**SEE ALSO**
ImageClass.getPalette(self,bins=10,hue=False
ImageClass.rePalette(self,palette,hue=False):
ImageClass.drawPaletteColors(self,size=(-1,-1),horizontal=True,bins=10,hue=False)
ImageClass.palettize(self,bins=10,hue=False)
ImageClass.binarizeFromPalette(self, palette_selection)
ImageClass.findBlobsFromPalette(self, palette_selection, dilate = 0, minsize=5, maxsize=0)
"""
if( self._mPaletteBins != bins or
self._mDoHuePalette != hue ):
total = float(self.width*self.height)
percentages = []
result = None
if( not hue ):
pixels = np.array(self.getNumpy()).reshape(-1, 3) #reshape our matrix to 1xN
if( centroids == None ):
result = scv.kmeans(pixels,bins)
else:
if(isinstance(centroids,list)):
centroids = np.array(centroids,dtype='uint8')
result = scv.kmeans(pixels,centroids)
self._mPaletteMembers = scv.vq(pixels,result[0])[0]
else:
hsv = self
if( self._colorSpace != ColorSpace.HSV ):
hsv = self.toHSV()
h = hsv.getEmpty(1)
cv.Split(hsv.getBitmap(),None,None,h,None)
mat = cv.GetMat(h)
pixels = np.array(mat).reshape(-1,1)
if( centroids == None ):
result = scv.kmeans(pixels,bins)
else:
if(isinstance( centroids,list)):
centroids = np.array( centroids,dtype='uint8')
centroids = centroids.reshape(centroids.shape[0],1)
result = scv.kmeans(pixels,centroids)
self._mPaletteMembers = scv.vq(pixels,result[0])[0]
for i in range(0,bins):
count = np.where(self._mPaletteMembers==i)
v = float(count[0].shape[0])/total
percentages.append(v)
self._mDoHuePalette = hue
self._mPaletteBins = bins
self._mPalette = np.array(result[0],dtype='uint8')
self._mPalettePercentages = percentages
def getPalette(self,bins=10,hue=False,centroids=None):
"""
**SUMMARY**
This method returns the colors in the palette of the image. A palette is the
set of the most common colors in an image. This method is helpful for segmentation.
**PARAMETERS**
* *bins* - an integer number of bins into which to divide the colors in the image.
* *hue* - if hue is true we do only cluster on the image hue values.
* *centroids* - A list of tuples that are the initial k-means estimates. This is handy if you want consisten results from the palettize.
**RETURNS**
A numpy array of the BGR color tuples.
**EXAMPLE**
>>> p = img.getPalette(bins=42)
>>> print p[2]
**NOTES**
The hue calculations should be siginificantly faster than the generic RGB calculation as
it works in a one dimensional space. Sometimes the underlying scipy method freaks out
about k-means initialization with the following warning:
.. Warning::
One of the clusters is empty. Re-run kmean with a different initialization.
This shouldn't be a real problem.
**SEE ALSO**
:py:meth:`rePalette`
:py:meth:`drawPaletteColors`
:py:meth:`palettize`
:py:meth:`getPalette`
:py:meth:`binarizeFromPalette`
:py:meth:`findBlobsFromPalette`
"""
self._generatePalette(bins,hue,centroids)
return self._mPalette
def rePalette(self,palette,hue=False):
"""
**SUMMARY**
rePalette takes in the palette from another image and attempts to apply it to this image.
This is helpful if you want to speed up the palette computation for a series of images (like those in a
video stream.
**PARAMETERS**
* *palette* - The pre-computed palette from another image.
* *hue* - Boolean Hue - if hue is True we use a hue palette, otherwise we use a BGR palette.
**RETURNS**
A SimpleCV Image.
**EXAMPLE**
>>> img = Image("lenna")
>>> img2 = Image("logo")
>>> p = img.getPalette()
>>> result = img2.rePalette(p)
>>> result.show()
**SEE ALSO**
:py:meth:`rePalette`
:py:meth:`drawPaletteColors`
:py:meth:`palettize`
:py:meth:`getPalette`
:py:meth:`binarizeFromPalette`
:py:meth:`findBlobsFromPalette`
"""
retVal = None
if(hue):
hsv = self
if( self._colorSpace != ColorSpace.HSV ):
hsv = self.toHSV()
h = hsv.getEmpty(1)
cv.Split(hsv.getBitmap(),None,None,h,None)
mat = cv.GetMat(h)
pixels = np.array(mat).reshape(-1,1)
result = scv.vq(pixels,palette)
derp = palette[result[0]]
retVal = Image(derp[::-1].reshape(self.height,self.width)[::-1])
retVal = retVal.rotate(-90,fixed=False)
retVal._mDoHuePalette = True
retVal._mPaletteBins = len(palette)
retVal._mPalette = palette
retVal._mPaletteMembers = result[0]
else:
result = scv.vq(self.getNumpy().reshape(-1,3),palette)
retVal = Image(palette[result[0]].reshape(self.width,self.height,3))
retVal._mDoHuePalette = False
retVal._mPaletteBins = len(palette)
retVal._mPalette = palette
pixels = np.array(self.getNumpy()).reshape(-1, 3)
retVal._mPaletteMembers = scv.vq(pixels,palette)[0]
percentages = []
total = self.width*self.height
for i in range(0,len(palette)):
count = np.where(self._mPaletteMembers==i)
v = float(count[0].shape[0])/total
percentages.append(v)
self._mPalettePercentages = percentages
return retVal
def drawPaletteColors(self,size=(-1,-1),horizontal=True,bins=10,hue=False):
"""
**SUMMARY**
This method returns the visual representation (swatches) of the palette in an image. The palette
is orientated either horizontally or vertically, and each color is given an area
proportional to the number of pixels that have that color in the image. The palette
is arranged as it is returned from the clustering algorithm. When size is left
to its default value, the palette size will match the size of the
orientation, and then be 10% of the other dimension. E.g. if our image is 640X480 the horizontal
palette will be (640x48) likewise the vertical palette will be (480x64)
If a Hue palette is used this method will return a grayscale palette.
**PARAMETERS**
* *bins* - an integer number of bins into which to divide the colors in the image.
* *hue* - if hue is true we do only cluster on the image hue values.
* *size* - The size of the generated palette as a (width,height) tuple, if left default we select
a size based on the image so it can be nicely displayed with the
image.
* *horizontal* - If true we orientate our palette horizontally, otherwise vertically.
**RETURNS**
A palette swatch image.
**EXAMPLE**
>>> p = img1.drawPaletteColors()
>>> img2 = img1.sideBySide(p,side="bottom")
>>> img2.show()
**NOTES**
The hue calculations should be siginificantly faster than the generic RGB calculation as
it works in a one dimensional space. Sometimes the underlying scipy method freaks out
about k-means initialization with the following warning:
.. Warning::
One of the clusters is empty. Re-run kmean with a different initialization.
This shouldn't be a real problem.
**SEE ALSO**
:py:meth:`rePalette`
:py:meth:`drawPaletteColors`
:py:meth:`palettize`
:py:meth:`getPalette`
:py:meth:`binarizeFromPalette`
:py:meth:`findBlobsFromPalette`
"""
self._generatePalette(bins,hue)
retVal = None
if( not hue ):
if( horizontal ):
if( size[0] == -1 or size[1] == -1 ):
size = (int(self.width),int(self.height*.1))
pal = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
cv.Zero(pal)
idxL = 0
idxH = 0
for i in range(0,bins):
idxH =np.clip(idxH+(self._mPalettePercentages[i]*float(size[0])),0,size[0]-1)
roi = (int(idxL),0,int(idxH-idxL),size[1])
cv.SetImageROI(pal,roi)
color = np.array((float(self._mPalette[i][2]),float(self._mPalette[i][1]),float(self._mPalette[i][0])))
cv.AddS(pal,color,pal)
cv.ResetImageROI(pal)
idxL = idxH
retVal = Image(pal)
else:
if( size[0] == -1 or size[1] == -1 ):
size = (int(self.width*.1),int(self.height))
pal = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
cv.Zero(pal)
idxL = 0
idxH = 0
for i in range(0,bins):
idxH =np.clip(idxH+self._mPalettePercentages[i]*size[1],0,size[1]-1)
roi = (0,int(idxL),size[0],int(idxH-idxL))
cv.SetImageROI(pal,roi)
color = np.array((float(self._mPalette[i][2]),float(self._mPalette[i][1]),float(self._mPalette[i][0])))
cv.AddS(pal,color,pal)
cv.ResetImageROI(pal)
idxL = idxH
retVal = Image(pal)
else: # do hue
if( horizontal ):
if( size[0] == -1 or size[1] == -1 ):
size = (int(self.width),int(self.height*.1))
pal = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
cv.Zero(pal)
idxL = 0
idxH = 0
for i in range(0,bins):
idxH =np.clip(idxH+(self._mPalettePercentages[i]*float(size[0])),0,size[0]-1)
roi = (int(idxL),0,int(idxH-idxL),size[1])
cv.SetImageROI(pal,roi)
cv.AddS(pal,float(self._mPalette[i]),pal)
cv.ResetImageROI(pal)
idxL = idxH
retVal = Image(pal)
else:
if( size[0] == -1 or size[1] == -1 ):
size = (int(self.width*.1),int(self.height))
pal = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
cv.Zero(pal)
idxL = 0
idxH = 0
for i in range(0,bins):
idxH =np.clip(idxH+self._mPalettePercentages[i]*size[1],0,size[1]-1)
roi = (0,int(idxL),size[0],int(idxH-idxL))
cv.SetImageROI(pal,roi)
cv.AddS(pal,float(self._mPalette[i]),pal)
cv.ResetImageROI(pal)
idxL = idxH
retVal = Image(pal)
return retVal
def palettize(self,bins=10,hue=False,centroids=None):
"""
**SUMMARY**
This method analyzes an image and determines the most common colors using a k-means algorithm.
The method then goes through and replaces each pixel with the centroid of the clutsters found
by k-means. This reduces the number of colors in an image to the number of bins. This can be particularly
handy for doing segementation based on color.
**PARAMETERS**
* *bins* - an integer number of bins into which to divide the colors in the image.
* *hue* - if hue is true we do only cluster on the image hue values.
**RETURNS**
An image matching the original where each color is replaced with its palette value.
**EXAMPLE**
>>> img2 = img1.palettize()
>>> img2.show()
**NOTES**
The hue calculations should be siginificantly faster than the generic RGB calculation as
it works in a one dimensional space. Sometimes the underlying scipy method freaks out
about k-means initialization with the following warning:
.. Warning::
UserWarning: One of the clusters is empty. Re-run kmean with a different initialization.
This shouldn't be a real problem.
**SEE ALSO**
:py:meth:`rePalette`
:py:meth:`drawPaletteColors`
:py:meth:`palettize`
:py:meth:`getPalette`
:py:meth:`binarizeFromPalette`
:py:meth:`findBlobsFromPalette`
"""
retVal = None
self._generatePalette(bins,hue,centroids)
if( hue ):
derp = self._mPalette[self._mPaletteMembers]
retVal = Image(derp[::-1].reshape(self.height,self.width)[::-1])
retVal = retVal.rotate(-90,fixed=False)
else:
retVal = Image(self._mPalette[self._mPaletteMembers].reshape(self.width,self.height,3))
return retVal
def findBlobsFromPalette(self, palette_selection, dilate = 0, minsize=5, maxsize=0,appx_level=3):
"""
**SUMMARY**
This method attempts to use palettization to do segmentation and behaves similar to the
findBlobs blob in that it returs a feature set of blob objects. Once a palette has been
extracted using getPalette() we can then select colors from that palette to be labeled
white within our blobs.
**PARAMETERS**
* *palette_selection* - color triplets selected from our palette that will serve turned into blobs
These values can either be a 3xN numpy array, or a list of RGB triplets.
* *dilate* - the optional number of dilation operations to perform on the binary image
prior to performing blob extraction.
* *minsize* - the minimum blob size in pixels
* *maxsize* - the maximim blob size in pixels.
* *appx_level* - The blob approximation level - an integer for the maximum distance between the true edge and the
approximation edge - lower numbers yield better approximation.
**RETURNS**
If the method executes successfully a FeatureSet of Blobs is returned from the image. If the method
fails a value of None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> p = img.getPalette()
>>> blobs = img.findBlobsFromPalette( (p[0],p[1],[6]) )
>>> blobs.draw()
>>> img.show()
**SEE ALSO**
:py:meth:`rePalette`
:py:meth:`drawPaletteColors`
:py:meth:`palettize`
:py:meth:`getPalette`
:py:meth:`binarizeFromPalette`
:py:meth:`findBlobsFromPalette`
"""
#we get the palette from find palete
#ASSUME: GET PALLETE WAS CALLED!
bwimg = self.binarizeFromPalette(palette_selection)
if( dilate > 0 ):
bwimg =bwimg.dilate(dilate)
if (maxsize == 0):
maxsize = self.width * self.height
#create a single channel image, thresholded to parameters
blobmaker = BlobMaker()
blobs = blobmaker.extractFromBinary(bwimg,
self, minsize = minsize, maxsize = maxsize,appx_level=appx_level)
if not len(blobs):
return None
return blobs
def binarizeFromPalette(self, palette_selection):
"""
**SUMMARY**
This method uses the color palette to generate a binary (black and white) image. Palaette selection
is a list of color tuples retrieved from img.getPalette(). The provided values will be drawn white
while other values will be black.
**PARAMETERS**
palette_selection - color triplets selected from our palette that will serve turned into blobs
These values can either be a 3xN numpy array, or a list of RGB triplets.
**RETURNS**
This method returns a black and white images, where colors that are close to the colors
in palette_selection are set to white
**EXAMPLE**
>>> img = Image("lenna")
>>> p = img.getPalette()
>>> b = img.binarizeFromPalette( (p[0],p[1],[6]) )
>>> b.show()
**SEE ALSO**
:py:meth:`rePalette`
:py:meth:`drawPaletteColors`
:py:meth:`palettize`
:py:meth:`getPalette`
:py:meth:`binarizeFromPalette`
:py:meth:`findBlobsFromPalette`
"""
#we get the palette from find palete
#ASSUME: GET PALLETE WAS CALLED!
if( self._mPalette == None ):
logger.warning("Image.binarizeFromPalette: No palette exists, call getPalette())")
return None
retVal = None
img = self.palettize(self._mPaletteBins, hue=self._mDoHuePalette)
if( not self._mDoHuePalette ):
npimg = img.getNumpy()
white = np.array([255,255,255])
black = np.array([0,0,0])
for p in palette_selection:
npimg = np.where(npimg != p,npimg,white)
npimg = np.where(npimg != white,black,white)
retVal = Image(npimg)
else:
npimg = img.getNumpy()[:,:,1]
white = np.array([255])
black = np.array([0])
for p in palette_selection:
npimg = np.where(npimg != p,npimg,white)
npimg = np.where(npimg != white,black,white)
retVal = Image(npimg)
return retVal
def skeletonize(self, radius = 5):
"""
**SUMMARY**
Skeletonization is the process of taking in a set of blobs (here blobs are white
on a black background) and finding a squigly line that would be the back bone of
the blobs were they some sort of vertebrate animal. Another way of thinking about
skeletonization is that it finds a series of lines that approximates a blob's shape.
A good summary can be found here:
http://www.inf.u-szeged.hu/~palagyi/skel/skel.html
**PARAMETERS**
* *radius* - an intenger that defines how roughly how wide a blob must be to be added
to the skeleton, lower values give more skeleton lines, higher values give
fewer skeleton lines.
**EXAMPLE**
>>> cam = Camera()
>>> while True:
>>> img = cam.getImage()
>>> b = img.binarize().invert()
>>> s = img.skeletonize()
>>> r = b-s
>>> r.show()
**NOTES**
This code was a suggested improvement by Alex Wiltchko, check out his awesome blog here:
http://alexbw.posterous.com/
"""
img = self.toGray().getNumpy()[:,:,0]
distance_img = ndimage.distance_transform_edt(img)
morph_laplace_img = ndimage.morphological_laplace(distance_img, (radius, radius))
skeleton = morph_laplace_img < morph_laplace_img.min()/2
retVal = np.zeros([self.width,self.height])
retVal[skeleton] = 255
return Image(retVal)
def smartThreshold(self, mask=None, rect=None):
"""
**SUMMARY**
smartThreshold uses a method called grabCut, also called graph cut, to
automagically generate a grayscale mask image. The dumb version of threshold
just uses color, smartThreshold looks at
both color and edges to find a blob. To work smartThreshold needs either a
rectangle that bounds the object you want to find, or a mask. If you use
a rectangle make sure it holds the complete object. In the case of a mask, it
need not be a normal binary mask, it can have the normal white foreground and black
background, but also a light and dark gray values that correspond to areas
that are more likely to be foreground and more likely to be background. These
values can be found in the color class as Color.BACKGROUND, Color.FOREGROUND,
Color.MAYBE_BACKGROUND, and Color.MAYBE_FOREGROUND.
**PARAMETERS**
* *mask* - A grayscale mask the same size as the image using the 4 mask color values
* *rect* - A rectangle tuple of the form (x_position,y_position,width,height)
**RETURNS**
A grayscale image with the foreground / background values assigned to:
* BACKGROUND = (0,0,0)
* MAYBE_BACKGROUND = (64,64,64)
* MAYBE_FOREGROUND = (192,192,192)
* FOREGROUND = (255,255,255)
**EXAMPLE**
>>> img = Image("RatTop.png")
>>> mask = Image((img.width,img.height))
>>> mask.dl().circle((100,100),80,color=Color.MAYBE_BACKGROUND,filled=True)
>>> mask.dl().circle((100,100),60,color=Color.MAYBE_FOREGROUND,filled=True)
>>> mask.dl().circle((100,100),40,color=Color.FOREGROUND,filled=True)
>>> mask = mask.applyLayers()
>>> new_mask = img.smartThreshold(mask=mask)
>>> new_mask.show()
**NOTES**
http://en.wikipedia.org/wiki/Graph_cuts_in_computer_vision
**SEE ALSO**
:py:meth:`smartFindBlobs`
"""
try:
import cv2
except:
logger.warning("Can't Do GrabCut without OpenCV >= 2.3.0")
return
retVal = []
if( mask is not None ):
bmp = mask._getGrayscaleBitmap()
# translate the human readable images to something opencv wants using a lut
LUT = np.zeros((256,1),dtype=uint8)
LUT[255]=1
LUT[64]=2
LUT[192]=3
cv.LUT(bmp,bmp,cv.fromarray(LUT))
mask_in = np.array(cv.GetMat(bmp))
# get our image in a flavor grab cut likes
npimg = np.array(cv.GetMat(self.getBitmap()))
# require by opencv
tmp1 = np.zeros((1, 13 * 5))
tmp2 = np.zeros((1, 13 * 5))
# do the algorithm
cv2.grabCut(npimg,mask_in,None,tmp1,tmp2,10,mode=cv2.GC_INIT_WITH_MASK)
# generate the output image
output = cv.CreateImageHeader((mask_in.shape[1],mask_in.shape[0]),cv.IPL_DEPTH_8U,1)
cv.SetData(output,mask_in.tostring(),mask_in.dtype.itemsize*mask_in.shape[1])
# remap the color space
LUT = np.zeros((256,1),dtype=uint8)
LUT[1]=255
LUT[2]=64
LUT[3]=192
cv.LUT(output,output,cv.fromarray(LUT))
# and create the return value
mask._graybitmap = None # don't ask me why... but this gets corrupted
retVal = Image(output)
elif ( rect is not None ):
npimg = np.array(cv.GetMat(self.getBitmap()))
tmp1 = np.zeros((1, 13 * 5))
tmp2 = np.zeros((1, 13 * 5))
mask = np.zeros((self.height,self.width),dtype='uint8')
cv2.grabCut(npimg,mask,rect,tmp1,tmp2,10,mode=cv2.GC_INIT_WITH_RECT)
bmp = cv.CreateImageHeader((mask.shape[1],mask.shape[0]),cv.IPL_DEPTH_8U,1)
cv.SetData(bmp,mask.tostring(),mask.dtype.itemsize*mask.shape[1])
LUT = np.zeros((256,1),dtype=uint8)
LUT[1]=255
LUT[2]=64
LUT[3]=192
cv.LUT(bmp,bmp,cv.fromarray(LUT))
retVal = Image(bmp)
else:
logger.warning( "ImageClass.findBlobsSmart requires either a mask or a selection rectangle. Failure to provide one of these causes your bytes to splinter and bit shrapnel to hit your pipeline making it asplode in a ball of fire. Okay... not really")
return retVal
def smartFindBlobs(self,mask=None,rect=None,thresh_level=2,appx_level=3):
"""
**SUMMARY**
smartFindBlobs uses a method called grabCut, also called graph cut, to
automagically determine the boundary of a blob in the image. The dumb find
blobs just uses color threshold to find the boundary, smartFindBlobs looks at
both color and edges to find a blob. To work smartFindBlobs needs either a
rectangle that bounds the object you want to find, or a mask. If you use
a rectangle make sure it holds the complete object. In the case of a mask, it
need not be a normal binary mask, it can have the normal white foreground and black
background, but also a light and dark gray values that correspond to areas
that are more likely to be foreground and more likely to be background. These
values can be found in the color class as Color.BACKGROUND, Color.FOREGROUND,
Color.MAYBE_BACKGROUND, and Color.MAYBE_FOREGROUND.
**PARAMETERS**
* *mask* - A grayscale mask the same size as the image using the 4 mask color values
* *rect* - A rectangle tuple of the form (x_position,y_position,width,height)
* *thresh_level* - This represents what grab cut values to use in the mask after the
graph cut algorithm is run,
* 1 - means use the foreground, maybe_foreground, and maybe_background values
* 2 - means use the foreground and maybe_foreground values.
* 3+ - means use just the foreground
* *appx_level* - The blob approximation level - an integer for the maximum distance between the true edge and the
approximation edge - lower numbers yield better approximation.
**RETURNS**
A featureset of blobs. If everything went smoothly only a couple of blobs should
be present.
**EXAMPLE**
>>> img = Image("RatTop.png")
>>> mask = Image((img.width,img.height))
>>> mask.dl().circle((100,100),80,color=Color.MAYBE_BACKGROUND,filled=True
>>> mask.dl().circle((100,100),60,color=Color.MAYBE_FOREGROUND,filled=True)
>>> mask.dl().circle((100,100),40,color=Color.FOREGROUND,filled=True)
>>> mask = mask.applyLayers()
>>> blobs = img.smartFindBlobs(mask=mask)
>>> blobs.draw()
>>> blobs.show()
**NOTES**
http://en.wikipedia.org/wiki/Graph_cuts_in_computer_vision
**SEE ALSO**
:py:meth:`smartThreshold`
"""
result = self.smartThreshold(mask, rect)
binary = None
retVal = None
if result:
if( thresh_level == 1 ):
result = result.threshold(192)
elif( thresh_level == 2):
result = result.threshold(128)
elif( thresh_level > 2 ):
result = result.threshold(1)
bm = BlobMaker()
retVal = bm.extractFromBinary(result,self,appx_level)
return retVal
def threshold(self, value):
"""
**SUMMARY**
We roll old school with this vanilla threshold function. It takes your image
converts it to grayscale, and applies a threshold. Values above the threshold
are white, values below the threshold are black (note this is in contrast to
binarize... which is a stupid function that drives me up a wall). The resulting
black and white image is returned.
**PARAMETERS**
* *value* - the threshold, goes between 0 and 255.
**RETURNS**
A black and white SimpleCV image.
**EXAMPLE**
>>> img = Image("purplemonkeydishwasher.png")
>>> result = img.threshold(42)
**NOTES**
THRESHOLD RULES BINARIZE DROOLS!
**SEE ALSO**
:py:meth:`binarize`
"""
gray = self._getGrayscaleBitmap()
result = self.getEmpty(1)
cv.Threshold(gray, result, value, 255, cv.CV_THRESH_BINARY)
retVal = Image(result)
return retVal
def floodFill(self,points,tolerance=None,color=Color.WHITE,lower=None,upper=None,fixed_range=True):
"""
**SUMMARY**
FloodFill works just like ye olde paint bucket tool in your favorite image manipulation
program. You select a point (or a list of points), a color, and a tolerance, and floodFill will start at that
point, looking for pixels within the tolerance from your intial pixel. If the pixel is in
tolerance, we will convert it to your color, otherwise the method will leave the pixel alone.
The method accepts both single values, and triplet tuples for the tolerance values. If you
require more control over your tolerance you can use the upper and lower values. The fixed
range parameter let's you toggle between setting the tolerance with repect to the seed pixel,
and using a tolerance that is relative to the adjacent pixels. If fixed_range is true the
method will set its tolerance with respect to the seed pixel, otherwise the tolerance will
be with repsect to adjacent pixels.
**PARAMETERS**
* *points* - A tuple, list of tuples, or np.array of seed points for flood fill
* *tolerance* - The color tolerance as a single value or a triplet.
* *color* - The color to replace the floodFill pixels with
* *lower* - If tolerance does not provide enough control you can optionally set the upper and lower values
around the seed pixel. This value can be a single value or a triplet. This will override
the tolerance variable.
* *upper* - If tolerance does not provide enough control you can optionally set the upper and lower values
around the seed pixel. This value can be a single value or a triplet. This will override
the tolerance variable.
* *fixed_range* - If fixed_range is true we use the seed_pixel +/- tolerance
If fixed_range is false, the tolerance is +/- tolerance of the values of
the adjacent pixels to the pixel under test.
**RETURNS**
An Image where the values similar to the seed pixel have been replaced by the input color.
**EXAMPLE**
>>> img = Image("lenna")
>>> img2 = img.floodFill(((10,10),(54,32)),tolerance=(10,10,10),color=Color.RED)
>>> img2.show()
**SEE ALSO**
:py:meth:`floodFillToMask`
:py:meth:`findFloodFillBlobs`
"""
if( isinstance(color,np.ndarray) ):
color = color.tolist()
elif( isinstance(color,dict) ):
color = (color['R'],color['G'],color['B'])
if( isinstance(points,tuple) ):
points = np.array(points)
# first we guess what the user wants to do
# if we get and int/float convert it to a tuple
if( upper is None and lower is None and tolerance is None ):
upper = (0,0,0)
lower = (0,0,0)
if( tolerance is not None and
(isinstance(tolerance,float) or isinstance(tolerance,int))):
tolerance = (int(tolerance),int(tolerance),int(tolerance))
if( lower is not None and
(isinstance(lower,float) or isinstance(lower, int)) ):
lower = (int(lower),int(lower),int(lower))
elif( lower is None ):
lower = tolerance
if( upper is not None and
(isinstance(upper,float) or isinstance(upper, int)) ):
upper = (int(upper),int(upper),int(upper))
elif( upper is None ):
upper = tolerance
if( isinstance(points,tuple) ):
points = np.array(points)
flags = 8
if( fixed_range ):
flags = flags+cv.CV_FLOODFILL_FIXED_RANGE
bmp = self.getEmpty()
cv.Copy(self.getBitmap(),bmp)
if( len(points.shape) != 1 ):
for p in points:
cv.FloodFill(bmp,tuple(p),color,lower,upper,flags)
else:
cv.FloodFill(bmp,tuple(points),color,lower,upper,flags)
retVal = Image(bmp)
return retVal
def floodFillToMask(self, points,tolerance=None,color=Color.WHITE,lower=None,upper=None,fixed_range=True,mask=None):
"""
**SUMMARY**
floodFillToMask works sorta paint bucket tool in your favorite image manipulation
program. You select a point (or a list of points), a color, and a tolerance, and floodFill will start at that
point, looking for pixels within the tolerance from your intial pixel. If the pixel is in
tolerance, we will convert it to your color, otherwise the method will leave the pixel alone.
Unlike regular floodFill, floodFillToMask, will return a binary mask of your flood fill
operation. This is handy if you want to extract blobs from an area, or create a
selection from a region. The method takes in an optional mask. Non-zero values of the mask
act to block the flood fill operations. This is handy if you want to use an edge image
to "stop" the flood fill operation within a particular region.
The method accepts both single values, and triplet tuples for the tolerance values. If you
require more control over your tolerance you can use the upper and lower values. The fixed
range parameter let's you toggle between setting the tolerance with repect to the seed pixel,
and using a tolerance that is relative to the adjacent pixels. If fixed_range is true the
method will set its tolerance with respect to the seed pixel, otherwise the tolerance will
be with repsect to adjacent pixels.
**PARAMETERS**
* *points* - A tuple, list of tuples, or np.array of seed points for flood fill
* *tolerance* - The color tolerance as a single value or a triplet.
* *color* - The color to replace the floodFill pixels with
* *lower* - If tolerance does not provide enough control you can optionally set the upper and lower values
around the seed pixel. This value can be a single value or a triplet. This will override
the tolerance variable.
* *upper* - If tolerance does not provide enough control you can optionally set the upper and lower values
around the seed pixel. This value can be a single value or a triplet. This will override
the tolerance variable.
* *fixed_range* - If fixed_range is true we use the seed_pixel +/- tolerance
If fixed_range is false, the tolerance is +/- tolerance of the values of
the adjacent pixels to the pixel under test.
* *mask* - An optional mask image that can be used to control the flood fill operation.
the output of this function will include the mask data in the input mask.
**RETURNS**
An Image where the values similar to the seed pixel have been replaced by the input color.
**EXAMPLE**
>>> img = Image("lenna")
>>> mask = img.edges()
>>> mask= img.floodFillToMask(((10,10),(54,32)),tolerance=(10,10,10),mask=mask)
>>> mask.show
**SEE ALSO**
:py:meth:`floodFill`
:py:meth:`findFloodFillBlobs`
"""
mask_flag = 255 # flag weirdness
if( isinstance(color,np.ndarray) ):
color = color.tolist()
elif( isinstance(color,dict) ):
color = (color['R'],color['G'],color['B'])
if( isinstance(points,tuple) ):
points = np.array(points)
# first we guess what the user wants to do
# if we get and int/float convert it to a tuple
if( upper is None and lower is None and tolerance is None ):
upper = (0,0,0)
lower = (0,0,0)
if( tolerance is not None and
(isinstance(tolerance,float) or isinstance(tolerance,int))):
tolerance = (int(tolerance),int(tolerance),int(tolerance))
if( lower is not None and
(isinstance(lower,float) or isinstance(lower, int)) ):
lower = (int(lower),int(lower),int(lower))
elif( lower is None ):
lower = tolerance
if( upper is not None and
(isinstance(upper,float) or isinstance(upper, int)) ):
upper = (int(upper),int(upper),int(upper))
elif( upper is None ):
upper = tolerance
if( isinstance(points,tuple) ):
points = np.array(points)
flags = (mask_flag << 8 )+8
if( fixed_range ):
flags = flags + cv.CV_FLOODFILL_FIXED_RANGE
localMask = None
#opencv wants a mask that is slightly larger
if( mask is None ):
localMask = cv.CreateImage((self.width+2,self.height+2), cv.IPL_DEPTH_8U, 1)
cv.Zero(localMask)
else:
localMask = mask.embiggen(size=(self.width+2,self.height+2))._getGrayscaleBitmap()
bmp = self.getEmpty()
cv.Copy(self.getBitmap(),bmp)
if( len(points.shape) != 1 ):
for p in points:
cv.FloodFill(bmp,tuple(p),color,lower,upper,flags,localMask)
else:
cv.FloodFill(bmp,tuple(points),color,lower,upper,flags,localMask)
retVal = Image(localMask)
retVal = retVal.crop(1,1,self.width,self.height)
return retVal
def findBlobsFromMask(self, mask,threshold=128, minsize=10, maxsize=0,appx_level=3 ):
"""
**SUMMARY**
This method acts like findBlobs, but it lets you specifiy blobs directly by
providing a mask image. The mask image must match the size of this image, and
the mask should have values > threshold where you want the blobs selected. This
method can be used with binarize, dialte, erode, floodFill, edges etc to
get really nice segmentation.
**PARAMETERS**
* *mask* - The mask image, areas lighter than threshold will be counted as blobs.
Mask should be the same size as this image.
* *threshold* - A single threshold value used when we binarize the mask.
* *minsize* - The minimum size of the returned blobs.
* *maxsize* - The maximum size of the returned blobs, if none is specified we peg
this to the image size.
* *appx_level* - The blob approximation level - an integer for the maximum distance between the true edge and the
approximation edge - lower numbers yield better approximation.
**RETURNS**
A featureset of blobs. If no blobs are found None is returned.
**EXAMPLE**
>>> img = Image("Foo.png")
>>> mask = img.binarize().dilate(2)
>>> blobs = img.findBlobsFromMask(mask)
>>> blobs.show()
**SEE ALSO**
:py:meth:`findBlobs`
:py:meth:`binarize`
:py:meth:`threshold`
:py:meth:`dilate`
:py:meth:`erode`
"""
if (maxsize == 0):
maxsize = self.width * self.height
#create a single channel image, thresholded to parameters
if( mask.width != self.width or mask.height != self.height ):
logger.warning("ImageClass.findBlobsFromMask - your mask does not match the size of your image")
return None
blobmaker = BlobMaker()
gray = mask._getGrayscaleBitmap()
result = mask.getEmpty(1)
cv.Threshold(gray, result, threshold, 255, cv.CV_THRESH_BINARY)
blobs = blobmaker.extractFromBinary(Image(result), self, minsize = minsize, maxsize = maxsize,appx_level=appx_level)
if not len(blobs):
return None
return FeatureSet(blobs).sortArea()
def findFloodFillBlobs(self,points,tolerance=None,lower=None,upper=None,
fixed_range=True,minsize=30,maxsize=-1):
"""
**SUMMARY**
This method lets you use a flood fill operation and pipe the results to findBlobs. You provide
the points to seed floodFill and the rest is taken care of.
floodFill works just like ye olde paint bucket tool in your favorite image manipulation
program. You select a point (or a list of points), a color, and a tolerance, and floodFill will start at that
point, looking for pixels within the tolerance from your intial pixel. If the pixel is in
tolerance, we will convert it to your color, otherwise the method will leave the pixel alone.
The method accepts both single values, and triplet tuples for the tolerance values. If you
require more control over your tolerance you can use the upper and lower values. The fixed
range parameter let's you toggle between setting the tolerance with repect to the seed pixel,
and using a tolerance that is relative to the adjacent pixels. If fixed_range is true the
method will set its tolerance with respect to the seed pixel, otherwise the tolerance will
be with repsect to adjacent pixels.
**PARAMETERS**
* *points* - A tuple, list of tuples, or np.array of seed points for flood fill.
* *tolerance* - The color tolerance as a single value or a triplet.
* *color* - The color to replace the floodFill pixels with
* *lower* - If tolerance does not provide enough control you can optionally set the upper and lower values
around the seed pixel. This value can be a single value or a triplet. This will override
the tolerance variable.
* *upper* - If tolerance does not provide enough control you can optionally set the upper and lower values
around the seed pixel. This value can be a single value or a triplet. This will override
the tolerance variable.
* *fixed_range* - If fixed_range is true we use the seed_pixel +/- tolerance
If fixed_range is false, the tolerance is +/- tolerance of the values of
the adjacent pixels to the pixel under test.
* *minsize* - The minimum size of the returned blobs.
* *maxsize* - The maximum size of the returned blobs, if none is specified we peg
this to the image size.
**RETURNS**
A featureset of blobs. If no blobs are found None is returned.
An Image where the values similar to the seed pixel have been replaced by the input color.
**EXAMPLE**
>>> img = Image("lenna")
>>> blerbs = img.findFloodFillBlobs(((10,10),(20,20),(30,30)),tolerance=30)
>>> blerbs.show()
**SEE ALSO**
:py:meth:`findBlobs`
:py:meth:`floodFill`
"""
mask = self.floodFillToMask(points,tolerance,color=Color.WHITE,lower=lower,upper=upper,fixed_range=fixed_range)
return self.findBlobsFromMask(mask,minsize,maxsize)
def _doDFT(self, grayscale=False):
"""
**SUMMARY**
This private method peforms the discrete Fourier transform on an input image.
The transform can be applied to a single channel gray image or to each channel of the
image. Each channel generates a 64F 2 channel IPL image corresponding to the real
and imaginary components of the DFT. A list of these IPL images are then cached
in the private member variable _DFT.
**PARAMETERS**
* *grayscale* - If grayscale is True we first covert the image to grayscale, otherwise
we perform the operation on each channel.
**RETURNS**
nothing - but creates a locally cached list of IPL imgaes corresponding to the real
and imaginary components of each channel.
**EXAMPLE**
>>> img = Image('logo.png')
>>> img._doDFT()
>>> img._DFT[0] # get the b channel Re/Im components
**NOTES**
http://en.wikipedia.org/wiki/Discrete_Fourier_transform
http://math.stackexchange.com/questions/1002/fourier-transform-for-dummies
**TO DO**
This method really needs to convert the image to an optimal DFT size.
http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html#getoptimaldftsize
"""
if( grayscale and (len(self._DFT) == 0 or len(self._DFT) == 3)):
self._DFT = []
img = self._getGrayscaleBitmap()
width, height = cv.GetSize(img)
src = cv.CreateImage((width, height), cv.IPL_DEPTH_64F, 2)
dst = cv.CreateImage((width, height), cv.IPL_DEPTH_64F, 2)
data = cv.CreateImage((width, height), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((width, height), cv.IPL_DEPTH_64F, 1)
cv.ConvertScale(img,data,1.0)
cv.Zero(blank)
cv.Merge(data,blank,None,None,src)
cv.Merge(data,blank,None,None,dst)
cv.DFT(src, dst, cv.CV_DXT_FORWARD)
self._DFT.append(dst)
elif( not grayscale and (len(self._DFT) < 2 )):
self._DFT = []
r = self.getEmpty(1)
g = self.getEmpty(1)
b = self.getEmpty(1)
cv.Split(self.getBitmap(),b,g,r,None)
chans = [b,g,r]
width = self.width
height = self.height
data = cv.CreateImage((width, height), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((width, height), cv.IPL_DEPTH_64F, 1)
src = cv.CreateImage((width, height), cv.IPL_DEPTH_64F, 2)
for c in chans:
dst = cv.CreateImage((width, height), cv.IPL_DEPTH_64F, 2)
cv.ConvertScale(c,data,1.0)
cv.Zero(blank)
cv.Merge(data,blank,None,None,src)
cv.Merge(data,blank,None,None,dst)
cv.DFT(src, dst, cv.CV_DXT_FORWARD)
self._DFT.append(dst)
def _getDFTClone(self,grayscale=False):
"""
**SUMMARY**
This method works just like _doDFT but returns a deep copy
of the resulting array which can be used in destructive operations.
**PARAMETERS**
* *grayscale* - If grayscale is True we first covert the image to grayscale, otherwise
we perform the operation on each channel.
**RETURNS**
A deep copy of the cached DFT real/imaginary image list.
**EXAMPLE**
>>> img = Image('logo.png')
>>> myDFT = img._getDFTClone()
>>> SomeCVFunc(myDFT[0])
**NOTES**
http://en.wikipedia.org/wiki/Discrete_Fourier_transform
http://math.stackexchange.com/questions/1002/fourier-transform-for-dummies
**SEE ALSO**
ImageClass._doDFT()
"""
# this is needs to be switched to the optimal
# DFT size for faster processing.
self._doDFT(grayscale)
retVal = []
if(grayscale):
gs = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_64F,2)
cv.Copy(self._DFT[0],gs)
retVal.append(gs)
else:
for img in self._DFT:
temp = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_64F,2)
cv.Copy(img,temp)
retVal.append(temp)
return retVal
def rawDFTImage(self,grayscale=False):
"""
**SUMMARY**
This method returns the **RAW** DFT transform of an image as a list of IPL Images.
Each result image is a two channel 64f image where the first channel is the real
component and the second channel is teh imaginary component. If the operation
is performed on an RGB image and grayscale is False the result is a list of
these images of the form [b,g,r].
**PARAMETERS**
* *grayscale* - If grayscale is True we first covert the image to grayscale, otherwise
we perform the operation on each channel.
**RETURNS**
A list of the DFT images (see above). Note that this is a shallow copy operation.
**EXAMPLE**
>>> img = Image('logo.png')
>>> myDFT = img.rawDFTImage()
>>> for c in myDFT:
>>> #do some operation on the DFT
**NOTES**
http://en.wikipedia.org/wiki/Discrete_Fourier_transform
http://math.stackexchange.com/questions/1002/fourier-transform-for-dummies
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
self._doDFT(grayscale)
return self._DFT
def getDFTLogMagnitude(self,grayscale=False):
"""
**SUMMARY**
This method returns the log value of the magnitude image of the DFT transform. This
method is helpful for examining and comparing the results of DFT transforms. The log
component helps to "squish" the large floating point values into an image that can
be rendered easily.
In the image the low frequency components are in the corners of the image and the high
frequency components are in the center of the image.
**PARAMETERS**
* *grayscale* - if grayscale is True we perform the magnitude operation of the grayscale
image otherwise we perform the operation on each channel.
**RETURNS**
Returns a SimpleCV image corresponding to the log magnitude of the input image.
**EXAMPLE**
>>> img = Image("RedDog2.jpg")
>>> img.getDFTLogMagnitude().show()
>>> lpf = img.lowPassFilter(img.width/10.img.height/10)
>>> lpf.getDFTLogMagnitude().show()
**NOTES**
* http://en.wikipedia.org/wiki/Discrete_Fourier_transform
* http://math.stackexchange.com/questions/1002/fourier-transform-for-dummies
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
dft = self._getDFTClone(grayscale)
chans = []
if( grayscale ):
chans = [self.getEmpty(1)]
else:
chans = [self.getEmpty(1),self.getEmpty(1),self.getEmpty(1)]
data = cv.CreateImage((self.width, self.height), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((self.width, self.height), cv.IPL_DEPTH_64F, 1)
for i in range(0,len(chans)):
cv.Split(dft[i],data,blank,None,None)
cv.Pow( data, data, 2.0)
cv.Pow( blank, blank, 2.0)
cv.Add( data, blank, data, None)
cv.Pow( data, data, 0.5 )
cv.AddS( data, cv.ScalarAll(1.0), data, None ) # 1 + Mag
cv.Log( data, data ) # log(1 + Mag
min, max, pt1, pt2 = cv.MinMaxLoc(data)
cv.Scale(data, data, 1.0/(max-min), 1.0*(-min)/(max-min))
cv.Mul(data,data,data,255.0)
cv.Convert(data,chans[i])
retVal = None
if( grayscale ):
retVal = Image(chans[0])
else:
retVal = self.getEmpty()
cv.Merge(chans[0],chans[1],chans[2],None,retVal)
retVal = Image(retVal)
return retVal
def _boundsFromPercentage(self, floatVal, bound):
return np.clip(int(floatVal*bound),0,bound)
def applyDFTFilter(self,flt,grayscale=False):
"""
**SUMMARY**
This function allows you to apply an arbitrary filter to the DFT of an image.
This filter takes in a gray scale image, whiter values are kept and black values
are rejected. In the DFT image, the lower frequency values are in the corners
of the image, while the higher frequency components are in the center. For example,
a low pass filter has white squares in the corners and is black everywhere else.
**PARAMETERS**
* *grayscale* - if this value is True we perfrom the operation on the DFT of the gray
version of the image and the result is gray image. If grayscale is true
we perform the operation on each channel and the recombine them to create
the result.
* *flt* - A grayscale filter image. The size of the filter must match the size of
the image.
**RETURNS**
A SimpleCV image after applying the filter.
**EXAMPLE**
>>> filter = Image("MyFilter.png")
>>> myImage = Image("MyImage.png")
>>> result = myImage.applyDFTFilter(filter)
>>> result.show()
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
**TODO**
Make this function support a separate filter image for each channel.
"""
if isinstance(flt, DFT):
filteredimage = flt.applyFilter(self, grayscale)
return filteredimage
if( flt.width != self.width and
flt.height != self.height ):
logger.warning("Image.applyDFTFilter - Your filter must match the size of the image")
dft = []
if( grayscale ):
dft = self._getDFTClone(grayscale)
flt = flt._getGrayscaleBitmap()
flt64f = cv.CreateImage((flt.width,flt.height),cv.IPL_DEPTH_64F,1)
cv.ConvertScale(flt,flt64f,1.0)
finalFilt = cv.CreateImage((flt.width,flt.height),cv.IPL_DEPTH_64F,2)
cv.Merge(flt64f,flt64f,None,None,finalFilt)
for d in dft:
cv.MulSpectrums(d,finalFilt,d,0)
else: #break down the filter and then do each channel
dft = self._getDFTClone(grayscale)
flt = flt.getBitmap()
b = cv.CreateImage((flt.width,flt.height),cv.IPL_DEPTH_8U,1)
g = cv.CreateImage((flt.width,flt.height),cv.IPL_DEPTH_8U,1)
r = cv.CreateImage((flt.width,flt.height),cv.IPL_DEPTH_8U,1)
cv.Split(flt,b,g,r,None)
chans = [b,g,r]
for c in range(0,len(chans)):
flt64f = cv.CreateImage((chans[c].width,chans[c].height),cv.IPL_DEPTH_64F,1)
cv.ConvertScale(chans[c],flt64f,1.0)
finalFilt = cv.CreateImage((chans[c].width,chans[c].height),cv.IPL_DEPTH_64F,2)
cv.Merge(flt64f,flt64f,None,None,finalFilt)
cv.MulSpectrums(dft[c],finalFilt,dft[c],0)
return self._inverseDFT(dft)
def _boundsFromPercentage(self, floatVal, bound):
return np.clip(int(floatVal*(bound/2.00)),0,(bound/2))
def highPassFilter(self, xCutoff,yCutoff=None,grayscale=False):
"""
**SUMMARY**
This method applies a high pass DFT filter. This filter enhances
the high frequencies and removes the low frequency signals. This has
the effect of enhancing edges. The frequencies are defined as going between
0.00 and 1.00 and where 0 is the lowest frequency in the image and 1.0 is
the highest possible frequencies. Each of the frequencies are defined
with respect to the horizontal and vertical signal. This filter
isn't perfect and has a harsh cutoff that causes ringing artifacts.
**PARAMETERS**
* *xCutoff* - The horizontal frequency at which we perform the cutoff. A separate
frequency can be used for the b,g, and r signals by providing a
list of values. The frequency is defined between zero to one,
where zero is constant component and 1 is the highest possible
frequency in the image.
* *yCutoff* - The cutoff frequencies in the y direction. If none are provided
we use the same values as provided for x.
* *grayscale* - if this value is True we perfrom the operation on the DFT of the gray
version of the image and the result is gray image. If grayscale is true
we perform the operation on each channel and the recombine them to create
the result.
**RETURNS**
A SimpleCV Image after applying the filter.
**EXAMPLE**
>>> img = Image("SimpleCV/sampleimages/RedDog2.jpg")
>>> img.getDFTLogMagnitude().show()
>>> hpf = img.highPassFilter([0.2,0.1,0.2])
>>> hpf.show()
>>> hpf.getDFTLogMagnitude().show()
**NOTES**
This filter is far from perfect and will generate a lot of ringing artifacts.
* See: http://en.wikipedia.org/wiki/Ringing_(signal)
* See: http://en.wikipedia.org/wiki/High-pass_filter#Image
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
if( isinstance(xCutoff,float) ):
xCutoff = [xCutoff,xCutoff,xCutoff]
if( isinstance(yCutoff,float) ):
yCutoff = [yCutoff,yCutoff,yCutoff]
if(yCutoff is None):
yCutoff = [xCutoff[0],xCutoff[1],xCutoff[2]]
for i in range(0,len(xCutoff)):
xCutoff[i] = self._boundsFromPercentage(xCutoff[i],self.width)
yCutoff[i] = self._boundsFromPercentage(yCutoff[i],self.height)
filter = None
h = self.height
w = self.width
if( grayscale ):
filter = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
cv.Zero(filter)
cv.AddS(filter,255,filter) # make everything white
#now make all of the corners black
cv.Rectangle(filter,(0,0),(xCutoff[0],yCutoff[0]),(0,0,0),thickness=-1) #TL
cv.Rectangle(filter,(0,h-yCutoff[0]),(xCutoff[0],h),(0,0,0),thickness=-1) #BL
cv.Rectangle(filter,(w-xCutoff[0],0),(w,yCutoff[0]),(0,0,0),thickness=-1) #TR
cv.Rectangle(filter,(w-xCutoff[0],h-yCutoff[0]),(w,h),(0,0,0),thickness=-1) #BR
else:
#I need to looking into CVMERGE/SPLIT... I would really need to know
# how much memory we're allocating here
filterB = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
filterG = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
filterR = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
cv.Zero(filterB)
cv.Zero(filterG)
cv.Zero(filterR)
cv.AddS(filterB,255,filterB) # make everything white
cv.AddS(filterG,255,filterG) # make everything whit
cv.AddS(filterR,255,filterR) # make everything white
#now make all of the corners black
temp = [filterB,filterG,filterR]
i = 0
for f in temp:
cv.Rectangle(f,(0,0),(xCutoff[i],yCutoff[i]),0,thickness=-1)
cv.Rectangle(f,(0,h-yCutoff[i]),(xCutoff[i],h),0,thickness=-1)
cv.Rectangle(f,(w-xCutoff[i],0),(w,yCutoff[i]),0,thickness=-1)
cv.Rectangle(f,(w-xCutoff[i],h-yCutoff[i]),(w,h),0,thickness=-1)
i = i+1
filter = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,3)
cv.Merge(filterB,filterG,filterR,None,filter)
scvFilt = Image(filter)
retVal = self.applyDFTFilter(scvFilt,grayscale)
return retVal
def lowPassFilter(self, xCutoff,yCutoff=None,grayscale=False):
"""
**SUMMARY**
This method applies a low pass DFT filter. This filter enhances
the low frequencies and removes the high frequency signals. This has
the effect of reducing noise. The frequencies are defined as going between
0.00 and 1.00 and where 0 is the lowest frequency in the image and 1.0 is
the highest possible frequencies. Each of the frequencies are defined
with respect to the horizontal and vertical signal. This filter
isn't perfect and has a harsh cutoff that causes ringing artifacts.
**PARAMETERS**
* *xCutoff* - The horizontal frequency at which we perform the cutoff. A separate
frequency can be used for the b,g, and r signals by providing a
list of values. The frequency is defined between zero to one,
where zero is constant component and 1 is the highest possible
frequency in the image.
* *yCutoff* - The cutoff frequencies in the y direction. If none are provided
we use the same values as provided for x.
* *grayscale* - if this value is True we perfrom the operation on the DFT of the gray
version of the image and the result is gray image. If grayscale is true
we perform the operation on each channel and the recombine them to create
the result.
**RETURNS**
A SimpleCV Image after applying the filter.
**EXAMPLE**
>>> img = Image("SimpleCV/sampleimages/RedDog2.jpg")
>>> img.getDFTLogMagnitude().show()
>>> lpf = img.lowPassFilter([0.2,0.2,0.05])
>>> lpf.show()
>>> lpf.getDFTLogMagnitude().show()
**NOTES**
This filter is far from perfect and will generate a lot of ringing artifacts.
See: http://en.wikipedia.org/wiki/Ringing_(signal)
See: http://en.wikipedia.org/wiki/Low-pass_filter
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
if( isinstance(xCutoff,float) ):
xCutoff = [xCutoff,xCutoff,xCutoff]
if( isinstance(yCutoff,float) ):
yCutoff = [yCutoff,yCutoff,yCutoff]
if(yCutoff is None):
yCutoff = [xCutoff[0],xCutoff[1],xCutoff[2]]
for i in range(0,len(xCutoff)):
xCutoff[i] = self._boundsFromPercentage(xCutoff[i],self.width)
yCutoff[i] = self._boundsFromPercentage(yCutoff[i],self.height)
filter = None
h = self.height
w = self.width
if( grayscale ):
filter = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
cv.Zero(filter)
#now make all of the corners black
cv.Rectangle(filter,(0,0),(xCutoff[0],yCutoff[0]),255,thickness=-1) #TL
cv.Rectangle(filter,(0,h-yCutoff[0]),(xCutoff[0],h),255,thickness=-1) #BL
cv.Rectangle(filter,(w-xCutoff[0],0),(w,yCutoff[0]),255,thickness=-1) #TR
cv.Rectangle(filter,(w-xCutoff[0],h-yCutoff[0]),(w,h),255,thickness=-1) #BR
else:
#I need to looking into CVMERGE/SPLIT... I would really need to know
# how much memory we're allocating here
filterB = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
filterG = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
filterR = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
cv.Zero(filterB)
cv.Zero(filterG)
cv.Zero(filterR)
#now make all of the corners black
temp = [filterB,filterG,filterR]
i = 0
for f in temp:
cv.Rectangle(f,(0,0),(xCutoff[i],yCutoff[i]),255,thickness=-1)
cv.Rectangle(f,(0,h-yCutoff[i]),(xCutoff[i],h),255,thickness=-1)
cv.Rectangle(f,(w-xCutoff[i],0),(w,yCutoff[i]),255,thickness=-1)
cv.Rectangle(f,(w-xCutoff[i],h-yCutoff[i]),(w,h),255,thickness=-1)
i = i+1
filter = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,3)
cv.Merge(filterB,filterG,filterR,None,filter)
scvFilt = Image(filter)
retVal = self.applyDFTFilter(scvFilt,grayscale)
return retVal
#FUCK! need to decide BGR or RGB
# ((rx_begin,ry_begin)(gx_begin,gy_begin)(bx_begin,by_begin))
# or (x,y)
def bandPassFilter(self, xCutoffLow, xCutoffHigh, yCutoffLow=None, yCutoffHigh=None,grayscale=False):
"""
**SUMMARY**
This method applies a simple band pass DFT filter. This filter enhances
the a range of frequencies and removes all of the other frequencies. This allows
a user to precisely select a set of signals to display . The frequencies are
defined as going between
0.00 and 1.00 and where 0 is the lowest frequency in the image and 1.0 is
the highest possible frequencies. Each of the frequencies are defined
with respect to the horizontal and vertical signal. This filter
isn't perfect and has a harsh cutoff that causes ringing artifacts.
**PARAMETERS**
* *xCutoffLow* - The horizontal frequency at which we perform the cutoff of the low
frequency signals. A separate
frequency can be used for the b,g, and r signals by providing a
list of values. The frequency is defined between zero to one,
where zero is constant component and 1 is the highest possible
frequency in the image.
* *xCutoffHigh* - The horizontal frequency at which we perform the cutoff of the high
frequency signals. Our filter passes signals between xCutoffLow and
xCutoffHigh. A separate frequency can be used for the b, g, and r
channels by providing a
list of values. The frequency is defined between zero to one,
where zero is constant component and 1 is the highest possible
frequency in the image.
* *yCutoffLow* - The low frequency cutoff in the y direction. If none
are provided we use the same values as provided for x.
* *yCutoffHigh* - The high frequency cutoff in the y direction. If none
are provided we use the same values as provided for x.
* *grayscale* - if this value is True we perfrom the operation on the DFT of the gray
version of the image and the result is gray image. If grayscale is true
we perform the operation on each channel and the recombine them to create
the result.
**RETURNS**
A SimpleCV Image after applying the filter.
**EXAMPLE**
>>> img = Image("SimpleCV/sampleimages/RedDog2.jpg")
>>> img.getDFTLogMagnitude().show()
>>> lpf = img.bandPassFilter([0.2,0.2,0.05],[0.3,0.3,0.2])
>>> lpf.show()
>>> lpf.getDFTLogMagnitude().show()
**NOTES**
This filter is far from perfect and will generate a lot of ringing artifacts.
See: http://en.wikipedia.org/wiki/Ringing_(signal)
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
if( isinstance(xCutoffLow,float) ):
xCutoffLow = [xCutoffLow,xCutoffLow,xCutoffLow]
if( isinstance(yCutoffLow,float) ):
yCutoffLow = [yCutoffLow,yCutoffLow,yCutoffLow]
if( isinstance(xCutoffHigh,float) ):
xCutoffHigh = [xCutoffHigh,xCutoffHigh,xCutoffHigh]
if( isinstance(yCutoffHigh,float) ):
yCutoffHigh = [yCutoffHigh,yCutoffHigh,yCutoffHigh]
if(yCutoffLow is None):
yCutoffLow = [xCutoffLow[0],xCutoffLow[1],xCutoffLow[2]]
if(yCutoffHigh is None):
yCutoffHigh = [xCutoffHigh[0],xCutoffHigh[1],xCutoffHigh[2]]
for i in range(0,len(xCutoffLow)):
xCutoffLow[i] = self._boundsFromPercentage(xCutoffLow[i],self.width)
xCutoffHigh[i] = self._boundsFromPercentage(xCutoffHigh[i],self.width)
yCutoffHigh[i] = self._boundsFromPercentage(yCutoffHigh[i],self.height)
yCutoffLow[i] = self._boundsFromPercentage(yCutoffLow[i],self.height)
filter = None
h = self.height
w = self.width
if( grayscale ):
filter = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
cv.Zero(filter)
#now make all of the corners black
cv.Rectangle(filter,(0,0),(xCutoffHigh[0],yCutoffHigh[0]),255,thickness=-1) #TL
cv.Rectangle(filter,(0,h-yCutoffHigh[0]),(xCutoffHigh[0],h),255,thickness=-1) #BL
cv.Rectangle(filter,(w-xCutoffHigh[0],0),(w,yCutoffHigh[0]),255,thickness=-1) #TR
cv.Rectangle(filter,(w-xCutoffHigh[0],h-yCutoffHigh[0]),(w,h),255,thickness=-1) #BR
cv.Rectangle(filter,(0,0),(xCutoffLow[0],yCutoffLow[0]),0,thickness=-1) #TL
cv.Rectangle(filter,(0,h-yCutoffLow[0]),(xCutoffLow[0],h),0,thickness=-1) #BL
cv.Rectangle(filter,(w-xCutoffLow[0],0),(w,yCutoffLow[0]),0,thickness=-1) #TR
cv.Rectangle(filter,(w-xCutoffLow[0],h-yCutoffLow[0]),(w,h),0,thickness=-1) #BR
else:
#I need to looking into CVMERGE/SPLIT... I would really need to know
# how much memory we're allocating here
filterB = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
filterG = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
filterR = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
cv.Zero(filterB)
cv.Zero(filterG)
cv.Zero(filterR)
#now make all of the corners black
temp = [filterB,filterG,filterR]
i = 0
for f in temp:
cv.Rectangle(f,(0,0),(xCutoffHigh[i],yCutoffHigh[i]),255,thickness=-1) #TL
cv.Rectangle(f,(0,h-yCutoffHigh[i]),(xCutoffHigh[i],h),255,thickness=-1) #BL
cv.Rectangle(f,(w-xCutoffHigh[i],0),(w,yCutoffHigh[i]),255,thickness=-1) #TR
cv.Rectangle(f,(w-xCutoffHigh[i],h-yCutoffHigh[i]),(w,h),255,thickness=-1) #BR
cv.Rectangle(f,(0,0),(xCutoffLow[i],yCutoffLow[i]),0,thickness=-1) #TL
cv.Rectangle(f,(0,h-yCutoffLow[i]),(xCutoffLow[i],h),0,thickness=-1) #BL
cv.Rectangle(f,(w-xCutoffLow[i],0),(w,yCutoffLow[i]),0,thickness=-1) #TR
cv.Rectangle(f,(w-xCutoffLow[i],h-yCutoffLow[i]),(w,h),0,thickness=-1) #BR
i = i+1
filter = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,3)
cv.Merge(filterB,filterG,filterR,None,filter)
scvFilt = Image(filter)
retVal = self.applyDFTFilter(scvFilt,grayscale)
return retVal
def _inverseDFT(self,input):
"""
**SUMMARY**
**PARAMETERS**
**RETURNS**
**EXAMPLE**
NOTES:
SEE ALSO:
"""
# a destructive IDFT operation for internal calls
w = input[0].width
h = input[0].height
if( len(input) == 1 ):
cv.DFT(input[0], input[0], cv.CV_DXT_INV_SCALE)
result = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
data = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
cv.Split(input[0],data,blank,None,None)
min, max, pt1, pt2 = cv.MinMaxLoc(data)
denom = max-min
if(denom == 0):
denom = 1
cv.Scale(data, data, 1.0/(denom), 1.0*(-min)/(denom))
cv.Mul(data,data,data,255.0)
cv.Convert(data,result)
retVal = Image(result)
else: # DO RGB separately
results = []
data = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
for i in range(0,len(input)):
cv.DFT(input[i], input[i], cv.CV_DXT_INV_SCALE)
result = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
cv.Split( input[i],data,blank,None,None)
min, max, pt1, pt2 = cv.MinMaxLoc(data)
denom = max-min
if(denom == 0):
denom = 1
cv.Scale(data, data, 1.0/(denom), 1.0*(-min)/(denom))
cv.Mul(data,data,data,255.0) # this may not be right
cv.Convert(data,result)
results.append(result)
retVal = cv.CreateImage((w,h),cv.IPL_DEPTH_8U,3)
cv.Merge(results[0],results[1],results[2],None,retVal)
retVal = Image(retVal)
del input
return retVal
def InverseDFT(self, raw_dft_image):
"""
**SUMMARY**
This method provides a way of performing an inverse discrete Fourier transform
on a real/imaginary image pair and obtaining the result as a SimpleCV image. This
method is helpful if you wish to perform custom filter development.
**PARAMETERS**
* *raw_dft_image* - A list object with either one or three IPL images. Each image should
have a 64f depth and contain two channels (the real and the imaginary).
**RETURNS**
A simpleCV image.
**EXAMPLE**
Note that this is an example, I don't recommend doing this unless you know what
you are doing.
>>> raw = img.getRawDFT()
>>> cv.SomeOperation(raw)
>>> result = img.InverseDFT(raw)
>>> result.show()
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
input = []
w = raw_dft_image[0].width
h = raw_dft_image[0].height
if(len(raw_dft_image) == 1):
gs = cv.CreateImage((w,h),cv.IPL_DEPTH_64F,2)
cv.Copy(self._DFT[0],gs)
input.append(gs)
else:
for img in raw_dft_image:
temp = cv.CreateImage((w,h),cv.IPL_DEPTH_64F,2)
cv.Copy(img,temp)
input.append(img)
if( len(input) == 1 ):
cv.DFT(input[0], input[0], cv.CV_DXT_INV_SCALE)
result = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
data = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
cv.Split(input[0],data,blank,None,None)
min, max, pt1, pt2 = cv.MinMaxLoc(data)
denom = max-min
if(denom == 0):
denom = 1
cv.Scale(data, data, 1.0/(denom), 1.0*(-min)/(denom))
cv.Mul(data,data,data,255.0)
cv.Convert(data,result)
retVal = Image(result)
else: # DO RGB separately
results = []
data = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
for i in range(0,len(raw_dft_image)):
cv.DFT(input[i], input[i], cv.CV_DXT_INV_SCALE)
result = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
cv.Split( input[i],data,blank,None,None)
min, max, pt1, pt2 = cv.MinMaxLoc(data)
denom = max-min
if(denom == 0):
denom = 1
cv.Scale(data, data, 1.0/(denom), 1.0*(-min)/(denom))
cv.Mul(data,data,data,255.0) # this may not be right
cv.Convert(data,result)
results.append(result)
retVal = cv.CreateImage((w,h),cv.IPL_DEPTH_8U,3)
cv.Merge(results[0],results[1],results[2],None,retVal)
retVal = Image(retVal)
return retVal
def applyButterworthFilter(self,dia=400,order=2,highpass=False,grayscale=False):
"""
**SUMMARY**
Creates a butterworth filter of 64x64 pixels, resizes it to fit
image, applies DFT on image using the filter.
Returns image with DFT applied on it
**PARAMETERS**
* *dia* - int Diameter of Butterworth low pass filter
* *order* - int Order of butterworth lowpass filter
* *highpass*: BOOL True: highpass filterm False: lowpass filter
* *grayscale*: BOOL
**EXAMPLE**
>>> im = Image("lenna")
>>> img = im.applyButterworth(dia=400,order=2,highpass=True,grayscale=False)
Output image: http://i.imgur.com/5LS3e.png
>>> img = im.applyButterworth(dia=400,order=2,highpass=False,grayscale=False)
Output img: http://i.imgur.com/QlCAY.png
>>> im = Image("grayscale_lenn.png") #take image from here: http://i.imgur.com/O0gZn.png
>>> img = im.applyButterworth(dia=400,order=2,highpass=True,grayscale=True)
Output img: http://i.imgur.com/BYYnp.png
>>> img = im.applyButterworth(dia=400,order=2,highpass=False,grayscale=True)
Output img: http://i.imgur.com/BYYnp.png
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
#reimplemented with faster, vectorized filter kernel creation
w,h = self.size()
intensity_scale = 2**8 - 1 #for now 8-bit
sz_x = 64 #for now constant, symmetric
sz_y = 64 #for now constant, symmetric
x0 = sz_x/2.0 #for now, on center
y0 = sz_y/2.0 #for now, on center
#efficient "vectorized" computation
X, Y = np.meshgrid(np.arange(sz_x), np.arange(sz_y))
D = np.sqrt((X-x0)**2+(Y-y0)**2)
flt = intensity_scale/(1.0 + (D/dia)**(order*2))
if highpass: #then invert the filter
flt = intensity_scale - flt
flt = Image(flt) #numpy arrays are in row-major form...doesn't matter for symmetric filter
flt_re = flt.resize(w,h)
img = self.applyDFTFilter(flt_re,grayscale)
return img
def applyGaussianFilter(self, dia=400, highpass=False, grayscale=False):
"""
**SUMMARY**
Creates a gaussian filter of 64x64 pixels, resizes it to fit
image, applies DFT on image using the filter.
Returns image with DFT applied on it
**PARAMETERS**
* *dia* - int - diameter of Gaussian filter
* *highpass*: BOOL True: highpass filter False: lowpass filter
* *grayscale*: BOOL
**EXAMPLE**
>>> im = Image("lenna")
>>> img = im.applyGaussianfilter(dia=400,highpass=True,grayscale=False)
Output image: http://i.imgur.com/DttJv.png
>>> img = im.applyGaussianfilter(dia=400,highpass=False,grayscale=False)
Output img: http://i.imgur.com/PWn4o.png
>>> im = Image("grayscale_lenn.png") #take image from here: http://i.imgur.com/O0gZn.png
>>> img = im.applyGaussianfilter(dia=400,highpass=True,grayscale=True)
Output img: http://i.imgur.com/9hX5J.png
>>> img = im.applyGaussianfilter(dia=400,highpass=False,grayscale=True)
Output img: http://i.imgur.com/MXI5T.png
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
#reimplemented with faster, vectorized filter kernel creation
w,h = self.size()
intensity_scale = 2**8 - 1 #for now 8-bit
sz_x = 64 #for now constant, symmetric
sz_y = 64 #for now constant, symmetric
x0 = sz_x/2.0 #for now, on center
y0 = sz_y/2.0 #for now, on center
#efficient "vectorized" computation
X, Y = np.meshgrid(np.arange(sz_x), np.arange(sz_y))
D = np.sqrt((X-x0)**2+(Y-y0)**2)
flt = intensity_scale*np.exp(-0.5*(D/dia)**2)
if highpass: #then invert the filter
flt = intensity_scale - flt
flt = Image(flt) #numpy arrays are in row-major form...doesn't matter for symmetric filter
flt_re = flt.resize(w,h)
img = self.applyDFTFilter(flt_re,grayscale)
return img
def applyUnsharpMask(self,boost=1,dia=400,grayscale=False):
"""
**SUMMARY**
This method applies unsharp mask or highboost filtering
on image depending upon the boost value provided.
DFT is applied on image using gaussian lowpass filter.
A mask is created subtracting the DFT image from the original
iamge. And then mask is added in the image to sharpen it.
unsharp masking => image + mask
highboost filtering => image + (boost)*mask
**PARAMETERS**
* *boost* - int boost = 1 => unsharp masking, boost > 1 => highboost filtering
* *dia* - int Diameter of Gaussian low pass filter
* *grayscale* - BOOL
**EXAMPLE**
Gaussian Filters:
>>> im = Image("lenna")
>>> img = im.applyUnsharpMask(2,grayscale=False) #highboost filtering
output image: http://i.imgur.com/A1pZf.png
>>> img = im.applyUnsharpMask(1,grayscale=False) #unsharp masking
output image: http://i.imgur.com/smCdL.png
>>> im = Image("grayscale_lenn.png") #take image from here: http://i.imgur.com/O0gZn.png
>>> img = im.applyUnsharpMask(2,grayscale=True) #highboost filtering
output image: http://i.imgur.com/VtGzl.png
>>> img = im.applyUnsharpMask(1,grayscale=True) #unsharp masking
output image: http://i.imgur.com/bywny.png
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
if boost < 0:
print "boost >= 1"
return None
lpIm = self.applyGaussianFilter(dia=dia,grayscale=grayscale,highpass=False)
im = Image(self.getBitmap())
mask = im - lpIm
img = im
for i in range(boost):
img = img + mask
return img
def listHaarFeatures(self):
'''
This is used to list the built in features available for HaarCascade feature
detection. Just run this function as:
>>> img.listHaarFeatures()
Then use one of the file names returned as the input to the findHaarFeature()
function. So you should get a list, more than likely you will see face.xml,
to use it then just
>>> img.findHaarFeatures('face.xml')
'''
features_directory = os.path.join(LAUNCH_PATH, 'Features','HaarCascades')
features = os.listdir(features_directory)
print features
def _CopyAvg(self, src, dst,roi, levels, levels_f, mode):
'''
Take the value in an ROI, calculate the average / peak hue
and then set the output image roi to the value.
'''
if( mode ): # get the peak hue for an area
h = src[roi[0]:roi[0]+roi[2],roi[1]:roi[1]+roi[3]].hueHistogram()
myHue = np.argmax(h)
C = (float(myHue),float(255),float(255),float(0))
cv.SetImageROI(dst,roi)
cv.AddS(dst,c,dst)
cv.ResetImageROI(dst)
else: # get the average value for an area optionally set levels
cv.SetImageROI(src.getBitmap(),roi)
cv.SetImageROI(dst,roi)
avg = cv.Avg(src.getBitmap())
avg = (float(avg[0]),float(avg[1]),float(avg[2]),0)
if(levels is not None):
avg = (int(avg[0]/levels)*levels_f,int(avg[1]/levels)*levels_f,int(avg[2]/levels)*levels_f,0)
cv.AddS(dst,avg,dst)
cv.ResetImageROI(src.getBitmap())
cv.ResetImageROI(dst)
def pixelize(self, block_size = 10, region = None, levels=None, doHue=False):
"""
**SUMMARY**
Pixelation blur, like the kind used to hide naughty bits on your favorite tv show.
**PARAMETERS**
* *block_size* - the blur block size in pixels, an integer is an square blur, a tuple is rectangular.
* *region* - do the blur in a region in format (x_position,y_position,width,height)
* *levels* - the number of levels per color channel. This makes the image look like an 8-bit video game.
* *doHue* - If this value is true we calculate the peak hue for the area, not the
average color for the area.
**RETURNS**
Returns the image with the pixelation blur applied.
**EXAMPLE**
>>> img = Image("lenna")
>>> result = img.pixelize( 16, (200,180,250,250), levels=4)
>>> img.show()
"""
if( isinstance(block_size, int) ):
block_size = (block_size,block_size)
retVal = self.getEmpty()
levels_f = 0.00
if( levels is not None ):
levels = 255/int(levels)
if(levels <= 1 ):
levels = 2
levels_f = float(levels)
if( region is not None ):
cv.Copy(self.getBitmap(), retVal)
cv.SetImageROI(retVal,region)
cv.Zero(retVal)
cv.ResetImageROI(retVal)
xs = region[0]
ys = region[1]
w = region[2]
h = region[3]
else:
xs = 0
ys = 0
w = self.width
h = self.height
#if( region is None ):
hc = w / block_size[0] #number of horizontal blocks
vc = h / block_size[1] #number of vertical blocks
#when we fit in the blocks, we're going to spread the round off
#over the edges 0->x_0, 0->y_0 and x_0+hc*block_size
x_lhs = int(np.ceil(float(w%block_size[0])/2.0)) # this is the starting point
y_lhs = int(np.ceil(float(h%block_size[1])/2.0))
x_rhs = int(np.floor(float(w%block_size[0])/2.0)) # this is the starting point
y_rhs = int(np.floor(float(h%block_size[1])/2.0))
x_0 = xs+x_lhs
y_0 = ys+y_lhs
x_f = (x_0+(block_size[0]*hc)) #this would be the end point
y_f = (y_0+(block_size[1]*vc))
for i in range(0,hc):
for j in range(0,vc):
xt = x_0+(block_size[0]*i)
yt = y_0+(block_size[1]*j)
roi = (xt,yt,block_size[0],block_size[1])
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
if( x_lhs > 0 ): # add a left strip
xt = xs
wt = x_lhs
ht = block_size[1]
for j in range(0,vc):
yt = y_0+(j*block_size[1])
roi = (xt,yt,wt,ht)
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
if( x_rhs > 0 ): # add a right strip
xt = (x_0+(block_size[0]*hc))
wt = x_rhs
ht = block_size[1]
for j in range(0,vc):
yt = y_0+(j*block_size[1])
roi = (xt,yt,wt,ht)
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
if( y_lhs > 0 ): # add a left strip
yt = ys
ht = y_lhs
wt = block_size[0]
for i in range(0,hc):
xt = x_0+(i*block_size[0])
roi = (xt,yt,wt,ht)
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
if( y_rhs > 0 ): # add a right strip
yt = (y_0+(block_size[1]*vc))
ht = y_rhs
wt = block_size[0]
for i in range(0,hc):
xt = x_0+(i*block_size[0])
roi = (xt,yt,wt,ht)
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
#now the corner cases
if(x_lhs > 0 and y_lhs > 0 ):
roi = (xs,ys,x_lhs,y_lhs)
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
if(x_rhs > 0 and y_rhs > 0 ):
roi = (x_f,y_f,x_rhs,y_rhs)
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
if(x_lhs > 0 and y_rhs > 0 ):
roi = (xs,y_f,x_lhs,y_rhs)
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
if(x_rhs > 0 and y_lhs > 0 ):
roi = (x_f,ys,x_rhs,y_lhs)
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
if(doHue):
cv.CvtColor(retVal,retVal,cv.CV_HSV2BGR)
return Image(retVal)
def anonymize(self, block_size=10, features=None, transform=None):
"""
**SUMMARY**
Anonymize, for additional privacy to images.
**PARAMETERS**
* *features* - A list with the Haar like feature cascades that should be matched.
* *block_size* - The size of the blocks for the pixelize function.
* *transform* - A function, to be applied to the regions matched instead of pixelize.
* This function must take two arguments: the image and the region it'll be applied to,
* as in region = (x, y, width, height).
**RETURNS**
Returns the image with matching regions pixelated.
**EXAMPLE**
>>> img = Image("lenna")
>>> anonymous = img.anonymize()
>>> anonymous.show()
>>> def my_function(img, region):
>>> x, y, width, height = region
>>> img = img.crop(x, y, width, height)
>>> return img
>>>
>>>img = Image("lenna")
>>>transformed = img.anonymize(transform = my_function)
"""
regions = []
if features is None:
regions.append(self.findHaarFeatures("face"))
regions.append(self.findHaarFeatures("profile"))
else:
for feature in features:
regions.append(self.findHaarFeatures(feature))
found = [f for f in regions if f is not None]
img = self.copy()
if found:
for feature_set in found:
for region in feature_set:
rect = (region.topLeftCorner()[0], region.topLeftCorner()[1],
region.width(), region.height())
if transform is None:
img = img.pixelize(block_size=block_size, region=rect)
else:
img = transform(img, rect)
return img
def fillHoles(self):
"""
**SUMMARY**
Fill holes on a binary image by closing the contours
**PARAMETERS**
* *img* - a binary image
**RETURNS**
The image with the holes filled
**EXAMPLE**
>>> img = Image("SimpleCV")
#todo Add noise and showcase the image
"""
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
# res = cv2.morphologyEx(self.getGrayNumpy(),cv2.MORPH_OPEN,kernel)
# return res
des = cv2.bitwise_not(self.getGrayNumpy())
return cv2.inPaint(des)
contour,hier = cv2.findContours(des,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contour:
cv2.drawContours(des,[cnt],0,255,-1)
print 'yep'
gray = cv2.bitwise_not(des)
return gray
def edgeIntersections(self, pt0, pt1, width=1, canny1=0, canny2=100):
"""
**SUMMARY**
Find the outermost intersection of a line segment and the edge image and return
a list of the intersection points. If no intersections are found the method returns
an empty list.
**PARAMETERS**
* *pt0* - an (x,y) tuple of one point on the intersection line.
* *pt1* - an (x,y) tuple of the second point on the intersection line.
* *width* - the width of the line to use. This approach works better when
for cases where the edges on an object are not always closed
and may have holes.
* *canny1* - the lower bound of the Canny edge detector parameters.
* *canny2* - the upper bound of the Canny edge detector parameters.
**RETURNS**
A list of two (x,y) tuples or an empty list.
**EXAMPLE**
>>> img = Image("SimpleCV")
>>> a = (25,100)
>>> b = (225,110)
>>> pts = img.edgeIntersections(a,b,width=3)
>>> e = img.edges(0,100)
>>> e.drawLine(a,b,color=Color.RED)
>>> e.drawCircle(pts[0],10,color=Color.GREEN)
>>> e.drawCircle(pts[1],10,color=Color.GREEN)
>>> e.show()
img = Image("SimpleCV")
a = (25,100)
b = (225,100)
pts = img.edgeIntersections(a,b,width=3)
e = img.edges(0,100)
e.drawLine(a,b,color=Color.RED)
e.drawCircle(pts[0],10,color=Color.GREEN)
e.drawCircle(pts[1],10,color=Color.GREEN)
e.show()
"""
w = abs(pt0[0]-pt1[0])
h = abs(pt0[1]-pt1[1])
x = np.min([pt0[0],pt1[0]])
y = np.min([pt0[1],pt1[1]])
if( w <= 0 ):
w = width
x = np.clip(x-(width/2),0,x-(width/2))
if( h <= 0 ):
h = width
y = np.clip(y-(width/2),0,y-(width/2))
#got some corner cases to catch here
p0p = np.array([(pt0[0]-x,pt0[1]-y)])
p1p = np.array([(pt1[0]-x,pt1[1]-y)])
edges = self.crop(x,y,w,h)._getEdgeMap(canny1, canny2)
line = cv.CreateImage((w,h),cv.IPL_DEPTH_8U,1)
cv.Zero(line)
cv.Line(line,((pt0[0]-x),(pt0[1]-y)),((pt1[0]-x),(pt1[1]-y)),cv.Scalar(255.00),width,8)
cv.Mul(line,edges,line)
intersections = uint8(np.array(cv.GetMat(line)).transpose())
(xs,ys) = np.where(intersections==255)
points = zip(xs,ys)
if(len(points)==0):
return [None,None]
A = np.argmin(spsd.cdist(p0p,points,'cityblock'))
B = np.argmin(spsd.cdist(p1p,points,'cityblock'))
ptA = (int(xs[A]+x),int(ys[A]+y))
ptB = (int(xs[B]+x),int(ys[B]+y))
# we might actually want this to be list of all the points
return [ptA, ptB]
def fitContour(self, initial_curve, window=(11,11), params=(0.1,0.1,0.1),doAppx=True,appx_level=1):
"""
**SUMMARY**
This method tries to fit a list of points to lines in the image. The list of points
is a list of (x,y) tuples that are near (i.e. within the window size) of the line
you want to fit in the image. This method uses a binary such as the result of calling
edges.
This method is based on active contours. Please see this reference:
http://en.wikipedia.org/wiki/Active_contour_model
**PARAMETERS**
* *initial_curve* - region of the form [(x0,y0),(x1,y1)...] that are the initial conditions to fit.
* *window* - the search region around each initial point to look for a solution.
* *params* - The alpha, beta, and gamma parameters for the active contours
algorithm as a list [alpha,beta,gamma].
* *doAppx* - post process the snake into a polynomial approximation. Basically
this flag will clean up the output of the contour algorithm.
* *appx_level* - how much to approximate the snake, higher numbers mean more approximation.
**DISCUSSION**
THIS SECTION IS QUOTED FROM: http://users.ecs.soton.ac.uk/msn/book/new_demo/Snakes/
There are three components to the Energy Function:
* Continuity
* Curvature
* Image (Gradient)
Each Weighted by Specified Parameter:
Total Energy = Alpha*Continuity + Beta*Curvature + Gamma*Image
Choose different values dependent on Feature to extract:
* Set alpha high if there is a deceptive Image Gradient
* Set beta high if smooth edged Feature, low if sharp edges
* Set gamma high if contrast between Background and Feature is low
**RETURNS**
A list of (x,y) tuples that approximate the curve. If you do not use
approximation the list should be the same length as the input list length.
**EXAMPLE**
>>> img = Image("lenna")
>>> edges = img.edges(t1=120,t2=155)
>>> guess = [(311,284),(313,270),(320,259),(330,253),(347,245)]
>>> result = edges.fitContour(guess)
>>> img.drawPoints(guess,color=Color.RED)
>>> img.drawPoints(result,color=Color.GREEN)
>>> img.show()
"""
alpha = [params[0]]
beta= [params[1]]
gamma = [params[2]]
if( window[0]%2 == 0 ):
window = (window[0]+1,window[1])
logger.warn("Yo dawg, just a heads up, snakeFitPoints wants an odd window size. I fixed it for you, but you may want to take a look at your code.")
if( window[1]%2 == 0 ):
window = (window[0],window[1]+1)
logger.warn("Yo dawg, just a heads up, snakeFitPoints wants an odd window size. I fixed it for you, but you may want to take a look at your code.")
raw = cv.SnakeImage(self._getGrayscaleBitmap(),initial_curve,alpha,beta,gamma,window,(cv.CV_TERMCRIT_ITER,10,0.01))
if( doAppx ):
try:
import cv2
except:
logger.warning("Can't Do snakeFitPoints without OpenCV >= 2.3.0")
return
appx = cv2.approxPolyDP(np.array([raw],'float32'),appx_level,True)
retVal = []
for p in appx:
retVal.append((int(p[0][0]),int(p[0][1])))
else:
retVal = raw
return retVal
def fitEdge(self,guess,window=10,threshold=128, measurements=5, darktolight=True, lighttodark=True,departurethreshold=1):
"""
**SUMMARY**
Fit edge in a binary/gray image using an initial guess and the least squares method.
The functions returns a single line
**PARAMETERS**
* *guess* - A tuples of the form ((x0,y0),(x1,y1)) which is an approximate guess
* *window* - A window around the guess to search.
* *threshold* - the threshold above which we count a pixel as a line
* *measurements* -the number of line projections to use for fitting the line
TODO: Constrict a line to black to white or white to black
Right vs. Left orientation.
**RETURNS**
A a line object
**EXAMPLE**
"""
searchLines = FeatureSet()
fitPoints = FeatureSet()
x1 = guess[0][0]
x2 = guess[1][0]
y1 = guess[0][1]
y2 = guess[1][1]
dx = float((x2-x1))/(measurements-1)
dy = float((y2-y1))/(measurements-1)
s = np.zeros((measurements,2))
lpstartx = np.zeros(measurements)
lpstarty = np.zeros(measurements)
lpendx = np.zeros(measurements)
lpendy = np.zeros(measurements)
linefitpts = np.zeros((measurements,2))
#obtain equation for initial guess line
if( x1==x2): #vertical line must be handled as special case since slope isn't defined
m=0
mo = 0
b = x1
for i in xrange(0, measurements):
s[i][0] = x1
s[i][1] = y1 + i * dy
lpstartx[i] = s[i][0] + window
lpstarty[i] = s[i][1]
lpendx[i] = s[i][0] - window
lpendy[i] = s[i][1]
Cur_line = Line(self,((lpstartx[i],lpstarty[i]),(lpendx[i],lpendy[i])))
((lpstartx[i],lpstarty[i]),(lpendx[i],lpendy[i])) = Cur_line.cropToImageEdges().end_points
searchLines.append(Cur_line)
tmp = self.getThresholdCrossing((int(lpstartx[i]),int(lpstarty[i])),(int(lpendx[i]),int(lpendy[i])),threshold=threshold,lighttodark=lighttodark, darktolight=darktolight, departurethreshold=departurethreshold)
fitPoints.append(Circle(self,tmp[0],tmp[1],3))
linefitpts[i] = tmp
else:
m = float((y2-y1))/(x2-x1)
b = y1 - m*x1
mo = -1/m #slope of orthogonal line segments
#obtain points for measurement along the initial guess line
for i in xrange(0, measurements):
s[i][0] = x1 + i * dx
s[i][1] = y1 + i * dy
fx = (math.sqrt(math.pow(window,2))/(1+mo))/2
fy = fx * mo
lpstartx[i] = s[i][0] + fx
lpstarty[i] = s[i][1] + fy
lpendx[i] = s[i][0] - fx
lpendy[i] = s[i][1] - fy
Cur_line = Line(self,((lpstartx[i],lpstarty[i]),(lpendx[i],lpendy[i])))
((lpstartx[i],lpstarty[i]),(lpendx[i],lpendy[i])) = Cur_line.cropToImageEdges().end_points
searchLines.append(Cur_line)
tmp = self.getThresholdCrossing((int(lpstartx[i]),int(lpstarty[i])),(int(lpendx[i]),int(lpendy[i])),threshold=threshold,lighttodark=lighttodark, darktolight=darktolight,departurethreshold=departurethreshold)
fitPoints.append((tmp[0],tmp[1]))
linefitpts[i] = tmp
badpts = []
for j in range(len(linefitpts)):
if (linefitpts[j,0] == -1) or (linefitpts[j,1] == -1):
badpts.append(j)
for pt in badpts:
linefitpts = np.delete(linefitpts,pt,axis=0)
x = linefitpts[:,0]
y = linefitpts[:,1]
ymin = np.min(y)
ymax = np.max(y)
xmax = np.max(x)
xmin = np.min(x)
if( (xmax-xmin) > (ymax-ymin) ):
# do the least squares
A = np.vstack([x,np.ones(len(x))]).T
m,c = nla.lstsq(A,y)[0]
y0 = int(m*xmin+c)
y1 = int(m*xmax+c)
finalLine = Line(self,((xmin,y0),(xmax,y1)))
else:
# do the least squares
A = np.vstack([y,np.ones(len(y))]).T
m,c = nla.lstsq(A,x)[0]
x0 = int(ymin*m+c)
x1 = int(ymax*m+c)
finalLine = Line(self,((x0,ymin),(x1,ymax)))
return finalLine, searchLines, fitPoints
def getThresholdCrossing(self, pt1, pt2, threshold=128, darktolight=True, lighttodark=True, departurethreshold=1):
"""
**SUMMARY**
This function takes in an image and two points, calculates the intensity
profile between the points, and returns the single point at which the profile
crosses an intensity
**PARAMETERS**
* *p1, p2* - the starting and ending points in tuple form e.g. (1,2)
* *threshold* pixel value of desired threshold crossing
* *departurethreshold* - noise reduction technique. requires this many points to be above the threshold to trigger crossing
**RETURNS**
A a lumpy numpy array of the pixel values. Ususally this is in BGR format.
**EXAMPLE**
>>> img = Image("lenna")
>>> myColor = [0,0,0]
>>> sl = img.getHorzScanline(422)
>>> sll = sl.tolist()
>>> for p in sll:
>>> if( p == myColor ):
>>> # do something
**SEE ALSO**
:py:meth:`getHorzScanlineGray`
:py:meth:`getVertScanlineGray`
:py:meth:`getVertScanline`
"""
linearr = self.getDiagonalScanlineGrey(pt1,pt2)
ind = 0
crossing = -1
if departurethreshold==1:
while ind < linearr.size-1:
if darktolight:
if linearr[ind] <=threshold and linearr[ind+1] > threshold:
crossing = ind
break
if lighttodark:
if linearr[ind] >= threshold and linearr[ind+1] < threshold:
crossing = ind
break
ind = ind +1
if crossing != -1:
xind = pt1[0] + int(round((pt2[0]-pt1[0])*crossing/linearr.size))
yind = pt1[1] + int(round((pt2[1]-pt1[1])*crossing/linearr.size))
retVal = (xind,yind)
else:
retVal = (-1,-1)
#print 'Edgepoint not found.'
else:
while ind < linearr.size-(departurethreshold+1):
if darktolight:
if linearr[ind] <=threshold and (linearr[ind+1:ind+1+departurethreshold] > threshold).all():
crossing = ind
break
if lighttodark:
if linearr[ind] >= threshold and (linearr[ind+1:ind+1+departurethreshold] < threshold).all():
crossing = ind
break
ind = ind +1
if crossing != -1:
xind = pt1[0] + int(round((pt2[0]-pt1[0])*crossing/linearr.size))
yind = pt1[1] + int(round((pt2[1]-pt1[1])*crossing/linearr.size))
retVal = (xind,yind)
else:
retVal = (-1,-1)
#print 'Edgepoint not found.'
return retVal
def getDiagonalScanlineGrey(self, pt1, pt2):
"""
**SUMMARY**
This function returns a single line of greyscale values from the image.
TODO: speed inprovements and RGB tolerance
**PARAMETERS**
* *pt1, pt2* - the starting and ending points in tuple form e.g. (1,2)
**RETURNS**
An array of the pixel values.
**EXAMPLE**
>>> img = Image("lenna")
>>> sl = img.getDiagonalScanlineGrey((100,200),(300,400))
**SEE ALSO**
:py:meth:`getHorzScanlineGray`
:py:meth:`getVertScanlineGray`
:py:meth:`getVertScanline`
"""
if not self.isGray():
self = self.toGray()
#self = self._getGrayscaleBitmap()
width = round(math.sqrt(math.pow(pt2[0]-pt1[0],2) + math.pow(pt2[1]-pt1[1],2)))
retVal = np.zeros(width)
for x in range(0, retVal.size):
xind = pt1[0] + int(round((pt2[0]-pt1[0])*x/retVal.size))
yind = pt1[1] + int(round((pt2[1]-pt1[1])*x/retVal.size))
current_pixel = self.getPixel(xind,yind)
retVal[x] = current_pixel[0]
return retVal
def fitLines(self,guesses,window=10,threshold=128):
"""
**SUMMARY**
Fit lines in a binary/gray image using an initial guess and the least squares method.
The lines are returned as a line feature set.
**PARAMETERS**
* *guesses* - A list of tuples of the form ((x0,y0),(x1,y1)) where each of the lines
is an approximate guess.
* *window* - A window around the guess to search.
* *threshold* - the threshold above which we count a pixel as a line
**RETURNS**
A feature set of line features, one per guess.
**EXAMPLE**
>>> img = Image("lsq.png")
>>> guesses = [((313,150),(312,332)),((62,172),(252,52)),((102,372),(182,182)),((372,62),(572,162)),((542,362),(462,182)),((232,412),(462,423))]
>>> l = img.fitLines(guesses,window=10)
>>> l.draw(color=Color.RED,width=3)
>>> for g in guesses:
>>> img.drawLine(g[0],g[1],color=Color.YELLOW)
>>> img.show()
"""
retVal = FeatureSet()
i =0
for g in guesses:
# Guess the size of the crop region from the line guess and the window.
ymin = np.min([g[0][1],g[1][1]])
ymax = np.max([g[0][1],g[1][1]])
xmin = np.min([g[0][0],g[1][0]])
xmax = np.max([g[0][0],g[1][0]])
xminW = np.clip(xmin-window,0,self.width)
xmaxW = np.clip(xmax+window,0,self.width)
yminW = np.clip(ymin-window,0,self.height)
ymaxW = np.clip(ymax+window,0,self.height)
temp = self.crop(xminW,yminW,xmaxW-xminW,ymaxW-yminW)
temp = temp.getGrayNumpy()
# pick the lines above our threshold
x,y = np.where(temp>threshold)
pts = zip(x,y)
gpv = np.array([float(g[0][0]-xminW),float(g[0][1]-yminW)])
gpw = np.array([float(g[1][0]-xminW),float(g[1][1]-yminW)])
def lineSegmentToPoint(p):
w = gpw
v = gpv
#print w,v
p = np.array([float(p[0]),float(p[1])])
l2 = np.sum((w-v)**2)
t = float(np.dot((p-v),(w-v))) / float(l2)
if( t < 0.00 ):
return np.sqrt(np.sum((p-v)**2))
elif(t > 1.0):
return np.sqrt(np.sum((p-w)**2))
else:
project = v + (t*(w-v))
return np.sqrt(np.sum((p-project)**2))
# http://stackoverflow.com/questions/849211/shortest-distance-between-a-point-and-a-line-segment
distances = np.array(map(lineSegmentToPoint,pts))
closepoints = np.where(distances<window)[0]
pts = np.array(pts)
if( len(closepoints) < 3 ):
continue
good_pts = pts[closepoints]
good_pts = good_pts.astype(float)
x = good_pts[:,0]
y = good_pts[:,1]
# do the shift from our crop
# generate the line values
x = x + xminW
y = y + yminW
ymin = np.min(y)
ymax = np.max(y)
xmax = np.max(x)
xmin = np.min(x)
if( (xmax-xmin) > (ymax-ymin) ):
# do the least squares
A = np.vstack([x,np.ones(len(x))]).T
m,c = nla.lstsq(A,y)[0]
y0 = int(m*xmin+c)
y1 = int(m*xmax+c)
retVal.append(Line(self,((xmin,y0),(xmax,y1))))
else:
# do the least squares
A = np.vstack([y,np.ones(len(y))]).T
m,c = nla.lstsq(A,x)[0]
x0 = int(ymin*m+c)
x1 = int(ymax*m+c)
retVal.append(Line(self,((x0,ymin),(x1,ymax))))
return retVal
def fitLinePoints(self,guesses,window=(11,11), samples=20,params=(0.1,0.1,0.1)):
"""
**DESCRIPTION**
This method uses the snakes / active contour approach in an attempt to
fit a series of points to a line that may or may not be exactly linear.
**PARAMETERS**
* *guesses* - A set of lines that we wish to fit to. The lines are specified
as a list of tuples of (x,y) tuples. E.g. [((x0,y0),(x1,y1))....]
* *window* - The search window in pixels for the active contours approach.
* *samples* - The number of points to sample along the input line,
these are the initial conditions for active contours method.
* *params* - the alpha, beta, and gamma values for the active contours routine.
**RETURNS**
A list of fitted contour points. Each contour is a list of (x,y) tuples.
**EXAMPLE**
>>> img = Image("lsq.png")
>>> guesses = [((313,150),(312,332)),((62,172),(252,52)),((102,372),(182,182)),((372,62),(572,162)),((542,362),(462,182)),((232,412),(462,423))]
>>> r = img.fitLinePoints(guesses)
>>> for rr in r:
>>> img.drawLine(rr[0],rr[1],color=Color.RED,width=3)
>>> for g in guesses:
>>> img.drawLine(g[0],g[1],color=Color.YELLOW)
>>> img.show()
"""
pts = []
for g in guesses:
#generate the approximation
bestGuess = []
dx = float(g[1][0]-g[0][0])
dy = float(g[1][1]-g[0][1])
l = np.sqrt((dx*dx)+(dy*dy))
if( l <= 0 ):
logger.warning("Can't Do snakeFitPoints without OpenCV >= 2.3.0")
return
dx = dx/l
dy = dy/l
for i in range(-1,samples+1):
t = i*(l/samples)
bestGuess.append((int(g[0][0]+(t*dx)),int(g[0][1]+(t*dy))))
# do the snake fitting
appx = self.fitContour(bestGuess,window=window,params=params,doAppx=False)
pts.append(appx)
return pts
def drawPoints(self, pts, color=Color.RED, sz=3, width=-1):
"""
**DESCRIPTION**
A quick and dirty points rendering routine.
**PARAMETERS**
* *pts* - pts a list of (x,y) points.
* *color* - a color for our points.
* *sz* - the circle radius for our points.
* *width* - if -1 fill the point, otherwise the size of point border
**RETURNS**
None - This is an inplace operation.
**EXAMPLE**
>>> img = Image("lenna")
>>> img.drawPoints([(10,10),(30,30)])
>>> img.show()
"""
for p in pts:
self.drawCircle(p,sz,color,width)
return None
def sobel(self, xorder=1, yorder=1, doGray=True, aperture=5, aperature=None):
"""
**DESCRIPTION**
Sobel operator for edge detection
**PARAMETERS**
* *xorder* - int - Order of the derivative x.
* *yorder* - int - Order of the derivative y.
* *doGray* - Bool - grayscale or not.
* *aperture* - int - Size of the extended Sobel kernel. It must be 1, 3, 5, or 7.
**RETURNS**
Image with sobel opeartor applied on it
**EXAMPLE**
>>> img = Image("lenna")
>>> s = img.sobel()
>>> s.show()
"""
aperture = aperature if aperature else aperture
retVal = None
try:
import cv2
except:
logger.warning("Can't do Sobel without OpenCV >= 2.3.0")
return None
if( aperture != 1 and aperture != 3 and aperture != 5 and aperture != 7 ):
logger.warning("Bad Sobel Aperture, values are [1,3,5,7].")
return None
if( doGray ):
dst = cv2.Sobel(self.getGrayNumpy(),cv2.cv.CV_32F,xorder,yorder,ksize=aperture)
minv = np.min(dst)
maxv = np.max(dst)
cscale = 255/(maxv-minv)
shift = -1*(minv)
t = np.zeros(self.size(),dtype='uint8')
t = cv2.convertScaleAbs(dst,t,cscale,shift/255.0)
retVal = Image(t)
else:
layers = self.splitChannels(grayscale=False)
sobel_layers = []
for layer in layers:
dst = cv2.Sobel(layer.getGrayNumpy(),cv2.cv.CV_32F,xorder,yorder,ksize=aperture)
minv = np.min(dst)
maxv = np.max(dst)
cscale = 255/(maxv-minv)
shift = -1*(minv)
t = np.zeros(self.size(),dtype='uint8')
t = cv2.convertScaleAbs(dst,t,cscale,shift/255.0)
sobel_layers.append(Image(t))
b,g,r = sobel_layers
retVal = self.mergeChannels(b,g,r)
return retVal
def track(self, method="CAMShift", ts=None, img=None, bb=None, **kwargs):
"""
**DESCRIPTION**
Tracking the object surrounded by the bounding box in the given
image or TrackSet.
**PARAMETERS**
* *method* - str - The Tracking Algorithm to be applied
* *ts* - TrackSet - SimpleCV.Features.TrackSet.
* *img* - Image - Image to be tracked or list - List of Images to be tracked.
* *bb* - tuple - Bounding Box tuple (x, y, w, h)
**Optional Parameters**
*CAMShift*
CAMShift Tracker is based on mean shift thresholding algorithm which is
combined with an adaptive region-sizing step. Histogram is calcualted based
on the mask provided. If mask is not provided, hsv transformed image of the
provided image is thresholded using inRange function (band thresholding).
lower HSV and upper HSV values are used inRange function. If the user doesn't
provide any range values, default range values are used.
Histogram is back projected using previous images to get an appropriate image
and it passed to camshift function to find the object in the image. Users can
decide the number of images to be used in back projection by providing num_frames.
lower - Lower HSV value for inRange thresholding. tuple of (H, S, V). Default : (0, 60, 32)
upper - Upper HSV value for inRange thresholding. tuple of (H, S, V). Default: (180, 255, 255)
mask - Mask to calculate Histogram. It's better if you don't provide one. Default: calculated using above thresholding ranges.
num_frames - number of frames to be backtracked. Default: 40
*LK*
LK Tracker is based on Optical Flow method. In brief, optical flow can be
defined as the apparent motion of objects caused by the relative motion between
an observer and the scene. (Wikipedia).
LK Tracker first finds some good feature points in the given bounding box in the image.
These are the tracker points. In consecutive frames, optical flow of these feature points
is calculated. Users can limit the number of feature points by provideing maxCorners and
qualityLevel. number of features will always be less than maxCorners. These feature points
are calculated using Harris Corner detector. It returns a matrix with each pixel having
some quality value. Only good features are used based upon the qualityLevel provided. better
features have better quality measure and hence are more suitable to track.
Users can set minimum distance between each features by providing minDistance.
LK tracker finds optical flow using a number of pyramids and users can set this number by
providing maxLevel and users can set size of the search window for Optical Flow by setting
winSize.
docs from http://docs.opencv.org/
maxCorners - Maximum number of corners to return in goodFeaturesToTrack. If there are more corners than are found, the strongest of them is returned. Default: 4000
qualityLevel - Parameter characterizing the minimal accepted quality of image corners. The parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue or the Harris function response. The corners with the quality measure less than the product are rejected. For example, if the best corner has the quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure less than 15 are rejected. Default: 0.08
minDistance - Minimum possible Euclidean distance between the returned corners. Default: 2
blockSize - Size of an average block for computing a derivative covariation matrix over each pixel neighborhood. Default: 3
winSize - size of the search window at each pyramid level. Default: (10, 10)
maxLevel - 0-based maximal pyramid level number; if set to 0, pyramids are not used (single level), Default: 10 if set to 1, two levels are used, and so on
*SURF*
SURF based tracker finds keypoints in the template and computes the descriptor. The template is
chosen based on the bounding box provided with the first image. The image is cropped and stored
as template. SURF keypoints are found and descriptor is computed for the template and stored.
SURF keypoints are found in the image and its descriptor is computed. Image keypoints and template
keypoints are matched using K-nearest neighbor algorithm. Matched keypoints are filtered according
to the knn distance of the points. Users can set this criteria by setting distance.
Density Based Clustering algorithm (DBSCAN) is applied on the matched keypoints to filter out points
that are in background. DBSCAN creates a cluster of object points anc background points. These background
points are discarded. Users can set certain parameters for DBSCAN which are listed below.
K-means is applied on matched KeyPoints with k=1 to find the center of the cluster and then bounding
box is predicted based upon the position of all the object KeyPoints.
eps_val - eps for DBSCAN. The maximum distance between two samples for them to be considered as in the same neighborhood. default: 0.69
min_samples - min number of samples in DBSCAN. The number of samples in a neighborhood for a point to be considered as a core point. default: 5
distance - thresholding KNN distance of each feature. if KNN distance > distance, point is discarded. default: 100
*MFTrack*
Median Flow tracker is similar to LK tracker (based on Optical Flow), but it's more advanced, better and
faster.
In MFTrack, tracking points are decided based upon the number of horizontal and vertical points and window
size provided by the user. Unlike LK Tracker, good features are not found which saves a huge amount of time.
feature points are selected symmetrically in the bounding box.
Total number of feature points to be tracked = numM * numN.
If the width and height of bounding box is 200 and 100 respectively, and numM = 10 and numN = 10,
there will be 10 points in the bounding box equally placed(10 points in 200 pixels) in each row. and 10 equally placed
points (10 points in 100 pixels) in each column. So total number of tracking points = 100.
numM > 0
numN > 0 (both may not be equal)
users can provide a margin around the bounding box that will be considered to place feature points and
calculate optical flow.
Optical flow is calculated from frame1 to frame2 and from frame2 to frame1. There might be some points
which give inaccurate optical flow, to eliminate these points the above method is used. It is called
forward-backward error tracking. Optical Flow seach window size can be set usung winsize_lk.
For each point, comparision is done based on the quadratic area around it.
The length of the square window can be set using winsize.
numM - Number of points to be tracked in the bounding box
in height direction.
default: 10
numN - Number of points to be tracked in the bounding box
in width direction.
default: 10
margin - Margin around the bounding box.
default: 5
winsize_lk - Optical Flow search window size.
default: 4
winsize - Size of quadratic area around the point which is compared.
default: 10
Available Tracking Methods
- CamShift
- LK
- SURF
- MFTrack
**RETURNS**
SimpleCV.Features.TrackSet
Returns a TrackSet with all the necessary attributes.
**HOW TO**
>>> ts = img.track("camshift", img=img1, bb=bb)
Here TrackSet is returned. All the necessary attributes will be included in the trackset.
After getting the trackset you need not provide the bounding box or image. You provide TrackSet as parameter to track().
Bounding box and image will be taken from the trackset.
So. now
>>> ts = new_img.track("camshift",ts)
The new Tracking feature will be appended to the given trackset and that will be returned.
So, to use it in loop::
img = cam.getImage()
bb = (img.width/4,img.height/4,img.width/4,img.height/4)
ts = img.track(img=img, bb=bb)
while (True):
img = cam.getImage()
ts = img.track("camshift", ts=ts)
ts = []
while (some_condition_here):
img = cam.getImage()
ts = img.track("camshift",ts,img0,bb)
now here in first loop iteration since ts is empty, img0 and bb will be considered.
New tracking object will be created and added in ts (TrackSet)
After first iteration, ts is not empty and hence the previous
image frames and bounding box will be taken from ts and img0
and bb will be ignored.
# Instead of loop, give a list of images to be tracked.
ts = []
imgs = [img1, img2, img3, ..., imgN]
ts = img0.track("camshift", ts, imgs, bb)
ts.drawPath()
ts[-1].image.show()
Using Optional Parameters:
for CAMShift
>>> ts = []
>>> ts = img.track("camshift", ts, img1, bb, lower=(40, 100, 100), upper=(100, 250, 250))
You can provide some/all/None of the optional parameters listed for CAMShift.
for LK
>>> ts = []
>>> ts = img.track("lk", ts, img1, bb, maxCorners=4000, qualityLevel=0.5, minDistance=3)
You can provide some/all/None of the optional parameters listed for LK.
for SURF
>>> ts = []
>>> ts = img.track("surf", ts, img1, bb, eps_val=0.7, min_samples=8, distance=200)
You can provide some/all/None of the optional parameters listed for SURF.
for MFTrack
>>> ts = []
>>> ts = img.track("mftrack", ts, img1, bb, numM=12, numN=12, winsize=15)
You can provide some/all/None of the optional parameters listed for MFTrack.
Check out Tracking examples provided in the SimpleCV source code.
READ MORE:
CAMShift Tracker:
Uses meanshift based CAMShift thresholding technique. Blobs and objects with
single tone or tracked very efficiently. CAMshift should be preferred if you
are trying to track faces. It is optimized to track faces.
LK (Lucas Kanade) Tracker:
It is based on LK Optical Flow. It calculates Optical flow in frame1 to frame2
and also in frame2 to frame1 and using back track error, filters out false
positives.
SURF based Tracker:
Matches keypoints from the template image and the current frame.
flann based matcher is used to match the keypoints.
Density based clustering is used classify points as in-region (of bounding box)
and out-region points. Using in-region points, new bounding box is predicted using
k-means.
Median Flow Tracker:
Media Flow Tracker is the base tracker that is used in OpenTLD. It is based on
Optical Flow. It calculates optical flow of the points in the bounding box from
frame 1 to frame 2 and from frame 2 to frame 1 and using back track error, removes
false positives. As the name suggests, it takes the median of the flow, and eliminates
points.
"""
if not ts and not img:
print "Invalid Input. Must provide FeatureSet or Image"
return None
if not ts and not bb:
print "Invalid Input. Must provide Bounding Box with Image"
return None
if not ts:
ts = TrackSet()
else:
img = ts[-1].image
bb = ts[-1].bb
try:
import cv2
except ImportError:
print "Tracking is available for OpenCV >= 2.3"
return None
if type(img) == list:
ts = self.track(method, ts, img[0], bb, **kwargs)
for i in img:
ts = i.track(method, ts, **kwargs)
return ts
# Issue #256 - (Bug) Memory management issue due to too many number of images.
nframes = 300
if 'nframes' in kwargs:
nframes = kwargs['nframes']
if len(ts) > nframes:
ts.trimList(50)
if method.lower() == "camshift":
track = camshiftTracker(self, bb, ts, **kwargs)
ts.append(track)
elif method.lower() == "lk":
track = lkTracker(self, bb, ts, img, **kwargs)
ts.append(track)
elif method.lower() == "surf":
try:
from scipy.spatial import distance as Dis
from sklearn.cluster import DBSCAN
except ImportError:
logger.warning("sklearn required")
return None
if not hasattr(cv2, "FeatureDetector_create"):
warnings.warn("OpenCV >= 2.4.3 required. Returning None.")
return None
track = surfTracker(self, bb, ts, **kwargs)
ts.append(track)
elif method.lower() == "mftrack":
track = mfTracker(self, bb, ts, img, **kwargs)
ts.append(track)
return ts
def _to32F(self):
"""
**SUMMARY**
Convert this image to a 32bit floating point image.
"""
retVal = cv.CreateImage((self.width,self.height), cv.IPL_DEPTH_32F, 3)
cv.Convert(self.getBitmap(),retVal)
return retVal
def __getstate__(self):
return dict( size = self.size(), colorspace = self._colorSpace, image = self.applyLayers().getBitmap().tostring() )
def __setstate__(self, mydict):
self._bitmap = cv.CreateImageHeader(mydict['size'], cv.IPL_DEPTH_8U, 3)
cv.SetData(self._bitmap, mydict['image'])
self._colorSpace = mydict['colorspace']
self.width = mydict['size'][0]
self.height = mydict['size'][1]
def area(self):
'''
Returns the area of the Image.
'''
return self.width * self.height
def _get_header_anim(self):
""" Animation header. To replace the getheader()[0] """
bb = "GIF89a"
bb += int_to_bin(self.size()[0])
bb += int_to_bin(self.size()[1])
bb += "\x87\x00\x00"
return bb
def rotate270(self):
"""
**DESCRIPTION**
Rotate the image 270 degrees to the left, the same as 90 degrees to the right.
This is the same as rotateRight()
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> img.rotate270().show()
"""
retVal = cv.CreateImage((self.height, self.width), cv.IPL_DEPTH_8U, 3)
cv.Transpose(self.getBitmap(), retVal)
cv.Flip(retVal, retVal, 1)
return(Image(retVal, colorSpace=self._colorSpace))
def rotate90(self):
"""
**DESCRIPTION**
Rotate the image 90 degrees to the left, the same as 270 degrees to the right.
This is the same as rotateRight()
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> img.rotate90().show()
"""
retVal = cv.CreateImage((self.height, self.width), cv.IPL_DEPTH_8U, 3)
cv.Transpose(self.getBitmap(), retVal)
cv.Flip(retVal, retVal, 0) # vertical
return(Image(retVal, colorSpace=self._colorSpace))
def rotateLeft(self): # same as 90
"""
**DESCRIPTION**
Rotate the image 90 degrees to the left.
This is the same as rotate 90.
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> img.rotateLeft().show()
"""
return self.rotate90()
def rotateRight(self): # same as 270
"""
**DESCRIPTION**
Rotate the image 90 degrees to the right.
This is the same as rotate 270.
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> img.rotateRight().show()
"""
return self.rotate270()
def rotate180(self):
"""
**DESCRIPTION**
Rotate the image 180 degrees to the left/right.
This is the same as rotate 90.
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> img.rotate180().show()
"""
retVal = cv.CreateImage((self.width, self.height), cv.IPL_DEPTH_8U, 3)
cv.Flip(self.getBitmap(), retVal, 0) #vertical
cv.Flip(retVal, retVal, 1)#horizontal
return(Image(retVal, colorSpace=self._colorSpace))
def verticalHistogram(self, bins=10, threshold=128,normalize=False,forPlot=False):
"""
**DESCRIPTION**
This method generates histogram of the number of grayscale pixels
greater than the provided threshold. The method divides the image
into a number evenly spaced vertical bins and then counts the number
of pixels where the pixel is greater than the threshold. This method
is helpful for doing basic morphological analysis.
**PARAMETERS**
* *bins* - The number of bins to use.
* *threshold* - The grayscale threshold. We count pixels greater than this value.
* *normalize* - If normalize is true we normalize the bin countsto sum to one. Otherwise we return the number of pixels.
* *forPlot* - If this is true we return the bin indicies, the bin counts, and the bin widths as a tuple. We can use these values in pyplot.bar to quickly plot the histogram.
**RETURNS**
The default settings return the raw bin counts moving from left to
right on the image. If forPlot is true we return a tuple that
contains a list of bin labels, the bin counts, and the bin widths.
This tuple can be used to plot the histogram using
matplotlib.pyplot.bar function.
**EXAMPLE**
>>> import matplotlib.pyplot as plt
>>> img = Image('lenna')
>>> plt.bar(*img.verticalHistogram(threshold=128,bins=10,normalize=False,forPlot=True),color='y')
>>> plt.show()
**NOTES**
See: http://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
See: http://matplotlib.org/api/pyplot_api.html?highlight=hist#matplotlib.pyplot.hist
"""
if( bins <= 0 ):
raise Exception("Not enough bins")
img = self.getGrayNumpy()
pts = np.where(img>threshold)
y = pts[1]
hist = np.histogram(y,bins=bins,range=(0,self.height),normed=normalize)
retVal = None
if( forPlot ):
# for using matplotlib bar command
# bin labels, bin values, bin width
retVal=(hist[1][0:-1],hist[0],self.height/bins)
else:
retVal = hist[0]
return retVal
def horizontalHistogram(self, bins=10, threshold=128,normalize=False,forPlot=False):
"""
**DESCRIPTION**
This method generates histogram of the number of grayscale pixels
greater than the provided threshold. The method divides the image
into a number evenly spaced horizontal bins and then counts the number
of pixels where the pixel is greater than the threshold. This method
is helpful for doing basic morphological analysis.
**PARAMETERS**
* *bins* - The number of bins to use.
* *threshold* - The grayscale threshold. We count pixels greater than this value.
* *normalize* - If normalize is true we normalize the bin counts to sum to one. Otherwise we return the number of pixels.
* *forPlot* - If this is true we return the bin indicies, the bin counts, and the bin widths as a tuple. We can use these values in pyplot.bar to quickly plot the histogram.
**RETURNS**
The default settings return the raw bin counts moving from top to
bottom on the image. If forPlot is true we return a tuple that
contains a list of bin labels, the bin counts, and the bin widths.
This tuple can be used to plot the histogram using
matplotlib.pyplot.bar function.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> plt.bar(img.horizontalHistogram(threshold=128,bins=10,normalize=False,forPlot=True),color='y')
>>>> plt.show())
**NOTES**
See: http://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
See: http://matplotlib.org/api/pyplot_api.html?highlight=hist#matplotlib.pyplot.hist
"""
if( bins <= 0 ):
raise Exception("Not enough bins")
img = self.getGrayNumpy()
pts = np.where(img>threshold)
x = pts[0]
hist = np.histogram(x,bins=bins,range=(0,self.width),normed=normalize)
retVal = None
if( forPlot ):
# for using matplotlib bar command
# bin labels, bin values, bin width
retVal=(hist[1][0:-1],hist[0],self.width/bins)
else:
retVal = hist[0]
return retVal
def getLineScan(self,x=None,y=None,pt1=None,pt2=None,channel = -1):
"""
**SUMMARY**
This function takes in a channel of an image or grayscale by default
and then pulls out a series of pixel values as a linescan object
than can be manipulated further.
**PARAMETERS**
* *x* - Take a vertical line scan at the column x.
* *y* - Take a horizontal line scan at the row y.
* *pt1* - Take a line scan between two points on the line the line scan values always go in the +x direction
* *pt2* - Second parameter for a non-vertical or horizontal line scan.
* *channel* - To select a channel. eg: selecting a channel RED,GREEN or BLUE. If set to -1 it operates with gray scale values
**RETURNS**
A SimpleCV.LineScan object or None if the method fails.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> a = img.getLineScan(x=10)
>>>> b = img.getLineScan(y=10)
>>>> c = img.getLineScan(pt1 = (10,10), pt2 = (500,500) )
>>>> plt.plot(a)
>>>> plt.plot(b)
>>>> plt.plot(c)
>>>> plt.show()
"""
if channel == -1:
img = self.getGrayNumpy()
else:
try:
img = self.getNumpy()[:,:,channel]
except IndexError:
print 'Channel missing!'
return None
retVal = None
if( x is not None and y is None and pt1 is None and pt2 is None):
if( x >= 0 and x < self.width):
retVal = LineScan(img[x,:])
retVal.image = self
retVal.pt1 = (x,0)
retVal.pt2 = (x,self.height)
retVal.col = x
x = np.ones((1,self.height))[0]*x
y = range(0,self.height,1)
pts = zip(x,y)
retVal.pointLoc = pts
else:
warnings.warn("ImageClass.getLineScan - that is not valid scanline.")
return None
elif( x is None and y is not None and pt1 is None and pt2 is None):
if( y >= 0 and y < self.height):
retVal = LineScan(img[:,y])
retVal.image = self
retVal.pt1 = (0,y)
retVal.pt2 = (self.width,y)
retVal.row = y
y = np.ones((1,self.width))[0]*y
x = range(0,self.width,1)
pts = zip(x,y)
retVal.pointLoc = pts
else:
warnings.warn("ImageClass.getLineScan - that is not valid scanline.")
return None
pass
elif( (isinstance(pt1,tuple) or isinstance(pt1,list)) and
(isinstance(pt2,tuple) or isinstance(pt2,list)) and
len(pt1) == 2 and len(pt2) == 2 and
x is None and y is None):
pts = self.bresenham_line(pt1,pt2)
retVal = LineScan([img[p[0],p[1]] for p in pts])
retVal.pointLoc = pts
retVal.image = self
retVal.pt1 = pt1
retVal.pt2 = pt2
else:
# an invalid combination - warn
warnings.warn("ImageClass.getLineScan - that is not valid scanline.")
return None
retVal.channel = channel
return retVal
def setLineScan(self, linescan,x=None,y=None,pt1=None,pt2=None,channel = -1):
"""
**SUMMARY**
This function helps you put back the linescan in the image.
**PARAMETERS**
* *linescan* - LineScan object
* *x* - put line scan at the column x.
* *y* - put line scan at the row y.
* *pt1* - put line scan between two points on the line the line scan values always go in the +x direction
* *pt2* - Second parameter for a non-vertical or horizontal line scan.
* *channel* - To select a channel. eg: selecting a channel RED,GREEN or BLUE. If set to -1 it operates with gray scale values
**RETURNS**
A SimpleCV.Image
**EXAMPLE**
>>> img = Image('lenna')
>>> a = img.getLineScan(x=10)
>>> for index in range(len(a)):
... a[index] = 0
>>> newimg = img.putLineScan(a, x=50)
>>> newimg.show()
# This will show you a black line in column 50.
"""
#retVal = self.toGray()
if channel == -1:
img = np.copy(self.getGrayNumpy())
else:
try:
img = np.copy(self.getNumpy()[:,:,channel])
except IndexError:
print 'Channel missing!'
return None
if( x is None and y is None and pt1 is None and pt2 is None):
if(linescan.pt1 is None or linescan.pt2 is None):
warnings.warn("ImageClass.setLineScan: No coordinates to re-insert linescan.")
return None
else:
pt1 = linescan.pt1
pt2 = linescan.pt2
if( pt1[0] == pt2[0] and np.abs(pt1[1]-pt2[1])==self.height):
x = pt1[0] # vertical line
pt1=None
pt2=None
elif( pt1[1] == pt2[1] and np.abs(pt1[0]-pt2[0])==self.width):
y = pt1[1] # horizontal line
pt1=None
pt2=None
retVal = None
if( x is not None and y is None and pt1 is None and pt2 is None):
if( x >= 0 and x < self.width):
if( len(linescan) != self.height ):
linescan = linescan.resample(self.height)
#check for number of points
#linescan = np.array(linescan)
img[x,:] = np.clip(linescan[:], 0, 255)
else:
warnings.warn("ImageClass.setLineScan: No coordinates to re-insert linescan.")
return None
elif( x is None and y is not None and pt1 is None and pt2 is None):
if( y >= 0 and y < self.height):
if( len(linescan) != self.width ):
linescan = linescan.resample(self.width)
#check for number of points
#linescan = np.array(linescan)
img[:,y] = np.clip(linescan[:], 0, 255)
else:
warnings.warn("ImageClass.setLineScan: No coordinates to re-insert linescan.")
return None
elif( (isinstance(pt1,tuple) or isinstance(pt1,list)) and
(isinstance(pt2,tuple) or isinstance(pt2,list)) and
len(pt1) == 2 and len(pt2) == 2 and
x is None and y is None):
pts = self.bresenham_line(pt1,pt2)
if( len(linescan) != len(pts) ):
linescan = linescan.resample(len(pts))
#linescan = np.array(linescan)
linescan = np.clip(linescan[:], 0, 255)
idx = 0
for pt in pts:
img[pt[0],pt[1]]=linescan[idx]
idx = idx+1
else:
warnings.warn("ImageClass.setLineScan: No coordinates to re-insert linescan.")
return None
if channel == -1:
retVal = Image(img)
else:
temp = np.copy(self.getNumpy())
temp[:,:,channel] = img
retVal = Image(temp)
return retVal
def replaceLineScan(self, linescan, x=None, y=None, pt1=None, pt2=None, channel = None):
"""
**SUMMARY**
This function easily lets you replace the linescan in the image.
Once you get the LineScan object, you might want to edit it. Perform
some task, apply some filter etc and now you want to put it back where
you took it from. By using this function, it is not necessary to specify
where to put the data. It will automatically replace where you took the
LineScan from.
**PARAMETERS**
* *linescan* - LineScan object
* *x* - put line scan at the column x.
* *y* - put line scan at the row y.
* *pt1* - put line scan between two points on the line the line scan values always go in the +x direction
* *pt2* - Second parameter for a non-vertical or horizontal line scan.
* *channel* - To select a channel. eg: selecting a channel RED,GREEN or BLUE. If set to -1 it operates with gray scale values
**RETURNS**
A SimpleCV.Image
**EXAMPLE**
>>> img = Image('lenna')
>>> a = img.getLineScan(x=10)
>>> for index in range(len(a)):
... a[index] = 0
>>> newimg = img.replaceLineScan(a)
>>> newimg.show()
# This will show you a black line in column 10.
"""
if x is None and y is None and pt1 is None and pt2 is None and channel is None:
if linescan.channel == -1:
img = np.copy(self.getGrayNumpy())
else:
try:
img = np.copy(self.getNumpy()[:,:,linescan.channel])
except IndexError:
print 'Channel missing!'
return None
if linescan.row is not None:
if len(linescan) == self.width:
ls = np.clip(linescan, 0, 255)
img[:,linescan.row] = ls[:]
else:
warnings.warn("LineScan Size and Image size do not match")
return None
elif linescan.col is not None:
if len(linescan) == self.height:
ls = np.clip(linescan, 0, 255)
img[linescan.col,:] = ls[:]
else:
warnings.warn("LineScan Size and Image size do not match")
return None
elif linescan.pt1 and linescan.pt2:
pts = self.bresenham_line(linescan.pt1, linescan.pt2)
if( len(linescan) != len(pts) ):
linescan = linescan.resample(len(pts))
ls = np.clip(linescan[:], 0, 255)
idx = 0
for pt in pts:
img[pt[0],pt[1]]=ls[idx]
idx = idx+1
if linescan.channel == -1:
retVal = Image(img)
else:
temp = np.copy(self.getNumpy())
temp[:,:,linescan.channel] = img
retVal = Image(temp)
else:
if channel is None:
retVal = self.setLineScan(linescan , x, y, pt1, pt2, linescan.channel)
else:
retVal = self.setLineScan(linescan , x, y, pt1, pt2, channel)
return retVal
def getPixelsOnLine(self,pt1,pt2):
"""
**SUMMARY**
Return all of the pixels on an arbitrary line.
**PARAMETERS**
* *pt1* - The first pixel coordinate as an (x,y) tuple or list.
* *pt2* - The second pixel coordinate as an (x,y) tuple or list.
**RETURNS**
Returns a list of RGB pixels values.
**EXAMPLE**
>>>> img = Image('something.png')
>>>> img.getPixelsOnLine( (0,0), (img.width/2,img.height/2) )
"""
retVal = None
if( (isinstance(pt1,tuple) or isinstance(pt1,list)) and
(isinstance(pt2,tuple) or isinstance(pt2,list)) and
len(pt1) == 2 and len(pt2) == 2 ):
pts = self.bresenham_line(pt1,pt2)
retVal = [self.getPixel(p[0],p[1]) for p in pts]
else:
warnings.warn("ImageClass.getPixelsOnLine - The line you provided is not valid")
return retVal
def bresenham_line(self, (x,y), (x2,y2)):
"""
Brensenham line algorithm
cribbed from: http://snipplr.com/view.php?codeview&id=22482
This is just a helper method
"""
if (not 0 <= x <= self.width-1 or not 0 <= y <= self.height-1 or
not 0 <= x2 <= self.width-1 or not 0 <= y2 <= self.height-1):
l = Line(self, ((x, y), (x2, y2))).cropToImageEdges()
if l:
ep = list(l.end_points)
ep.sort()
x, y = ep[0]
x2, y2 = ep[1]
else:
return []
steep = 0
coords = []
dx = abs(x2 - x)
if (x2 - x) > 0:
sx = 1
else:
sx = -1
dy = abs(y2 - y)
if (y2 - y) > 0:
sy = 1
else:
sy = -1
if dy > dx:
steep = 1
x,y = y,x
dx,dy = dy,dx
sx,sy = sy,sx
d = (2 * dy) - dx
for i in range(0,dx):
if steep:
coords.append((y,x))
else:
coords.append((x,y))
while d >= 0:
y = y + sy
d = d - (2 * dx)
x = x + sx
d = d + (2 * dy)
coords.append((x2,y2))
return coords
def uncrop(self, ListofPts): #(x,y),(x2,y2)):
"""
**SUMMARY**
This function allows us to translate a set of points from the crop window back to the coordinate of the source window.
**PARAMETERS**
* *ListofPts* - set of points from cropped image.
**RETURNS**
Returns a list of coordinates in the source image.
**EXAMPLE**
>> img = Image('lenna')
>> croppedImg = img.crop(10,20,250,500)
>> sourcePts = croppedImg.uncrop([(2,3),(56,23),(24,87)])
"""
return [(i[0]+self._uncroppedX,i[1]+self._uncroppedY)for i in ListofPts]
def grid(self,dimensions=(10,10), color=(0, 0, 0), width=1, antialias=True, alpha=-1):
"""
**SUMMARY**
Draw a grid on the image
**PARAMETERS**
* *dimensions* - No of rows and cols as an (rows,xols) tuple or list.
* *color* - Grid's color as a tuple or list.
* *width* - The grid line width in pixels.
* *antialias* - Draw an antialiased object
* *aplha* - The alpha blending for the object. If this value is -1 then the
layer default value is used. A value of 255 means opaque, while 0 means transparent.
**RETURNS**
Returns the index of the drawing layer of the grid
**EXAMPLE**
>>>> img = Image('something.png')
>>>> img.grid([20,20],(255,0,0))
>>>> img.grid((20,20),(255,0,0),1,True,0)
"""
retVal = self.copy()
try:
step_row = self.size()[1]/dimensions[0]
step_col = self.size()[0]/dimensions[1]
except ZeroDivisionError:
return imgTemp
i = 1
j = 1
grid = DrawingLayer(self.size()) #add a new layer for grid
while( (i < dimensions[0]) and (j < dimensions[1]) ):
if( i < dimensions[0] ):
grid.line((0,step_row*i), (self.size()[0],step_row*i), color, width, antialias, alpha)
i = i + 1
if( j < dimensions[1] ):
grid.line((step_col*j,0), (step_col*j,self.size()[1]), color, width, antialias, alpha)
j = j + 1
retVal._gridLayer[0] = retVal.addDrawingLayer(grid) # store grid layer index
retVal._gridLayer[1] = dimensions
return retVal
def removeGrid(self):
"""
**SUMMARY**
Remove Grid Layer from the Image.
**PARAMETERS**
None
**RETURNS**
Drawing Layer corresponding to the Grid Layer
**EXAMPLE**
>>>> img = Image('something.png')
>>>> img.grid([20,20],(255,0,0))
>>>> gridLayer = img.removeGrid()
"""
if self._gridLayer[0] is not None:
grid = self.removeDrawingLayer(self._gridLayer[0])
self._gridLayer=[None,[0, 0]]
return grid
else:
return None
def findGridLines(self):
"""
**SUMMARY**
Return Grid Lines as a Line Feature Set
**PARAMETERS**
None
**RETURNS**
Grid Lines as a Feature Set
**EXAMPLE**
>>>> img = Image('something.png')
>>>> img.grid([20,20],(255,0,0))
>>>> lines = img.findGridLines()
"""
gridIndex = self.getDrawingLayer(self._gridLayer[0])
if self._gridLayer[0]==-1:
print "Cannot find grid on the image, Try adding a grid first"
lineFS = FeatureSet()
try:
step_row = self.size()[1]/self._gridLayer[1][0]
step_col = self.size()[0]/self._gridLayer[1][1]
except ZeroDivisionError:
return None
i = 1
j = 1
while( i < self._gridLayer[1][0] ):
lineFS.append(Line(self,((0,step_row*i), (self.size()[0],step_row*i))))
i = i + 1
while( j < self._gridLayer[1][1] ):
lineFS.append(Line(self,((step_col*j,0), (step_col*j,self.size()[1]))))
j = j + 1
return lineFS
def logicalAND(self, img, grayscale=True):
"""
**SUMMARY**
Perform bitwise AND operation on images
**PARAMETERS**
img - the bitwise operation to be performed with
grayscale
**RETURNS**
SimpleCV.ImageClass.Image
**EXAMPLE**
>>> img = Image("something.png")
>>> img1 = Image("something_else.png")
>>> img.logicalAND(img1, grayscale=False)
>>> img.logicalAND(img1)
"""
if not self.size() == img.size():
print "Both images must have same sizes"
return None
try:
import cv2
except ImportError:
print "This function is available for OpenCV >= 2.3"
if grayscale:
retval = cv2.bitwise_and(self.getGrayNumpyCv2(), img.getGrayNumpyCv2())
else:
retval = cv2.bitwise_and(self.getNumpyCv2(), img.getNumpyCv2())
return Image(retval, cv2image=True)
def logicalNAND(self, img, grayscale=True):
"""
**SUMMARY**
Perform bitwise NAND operation on images
**PARAMETERS**
img - the bitwise operation to be performed with
grayscale
**RETURNS**
SimpleCV.ImageClass.Image
**EXAMPLE**
>>> img = Image("something.png")
>>> img1 = Image("something_else.png")
>>> img.logicalNAND(img1, grayscale=False)
>>> img.logicalNAND(img1)
"""
if not self.size() == img.size():
print "Both images must have same sizes"
return None
try:
import cv2
except ImportError:
print "This function is available for OpenCV >= 2.3"
if grayscale:
retval = cv2.bitwise_and(self.getGrayNumpyCv2(), img.getGrayNumpyCv2())
else:
retval = cv2.bitwise_and(self.getNumpyCv2(), img.getNumpyCv2())
retval = cv2.bitwise_not(retval)
return Image(retval, cv2image=True)
def logicalOR(self, img, grayscale=True):
"""
**SUMMARY**
Perform bitwise OR operation on images
**PARAMETERS**
img - the bitwise operation to be performed with
grayscale
**RETURNS**
SimpleCV.ImageClass.Image
**EXAMPLE**
>>> img = Image("something.png")
>>> img1 = Image("something_else.png")
>>> img.logicalOR(img1, grayscale=False)
>>> img.logicalOR(img1)
"""
if not self.size() == img.size():
print "Both images must have same sizes"
return None
try:
import cv2
except ImportError:
print "This function is available for OpenCV >= 2.3"
if grayscale:
retval = cv2.bitwise_or(self.getGrayNumpyCv2(), img.getGrayNumpyCv2())
else:
retval = cv2.bitwise_or(self.getNumpyCv2(), img.getNumpyCv2())
return Image(retval, cv2image=True)
def logicalXOR(self, img, grayscale=True):
"""
**SUMMARY**
Perform bitwise XOR operation on images
**PARAMETERS**
img - the bitwise operation to be performed with
grayscale
**RETURNS**
SimpleCV.ImageClass.Image
**EXAMPLE**
>>> img = Image("something.png")
>>> img1 = Image("something_else.png")
>>> img.logicalXOR(img1, grayscale=False)
>>> img.logicalXOR(img1)
"""
if not self.size() == img.size():
print "Both images must have same sizes"
return None
try:
import cv2
except ImportError:
print "This function is available for OpenCV >= 2.3"
if grayscale:
retval = cv2.bitwise_xor(self.getGrayNumpyCv2(), img.getGrayNumpyCv2())
else:
retval = cv2.bitwise_xor(self.getNumpyCv2(), img.getNumpyCv2())
return Image(retval, cv2image=True)
def matchSIFTKeyPoints(self, template, quality=200):
"""
**SUMMARY**
matchSIFTKeypoint allows you to match a template image with another image using
SIFT keypoints. The method extracts keypoints from each image, uses the Fast Local
Approximate Nearest Neighbors algorithm to find correspondences between the feature
points, filters the correspondences based on quality.
This method should be able to handle a reasonable changes in camera orientation and
illumination. Using a template that is close to the target image will yield much
better results.
**PARAMETERS**
* *template* - A template image.
* *quality* - The feature quality metric. This can be any value between about 100 and 500. Lower
values should return fewer, but higher quality features.
**RETURNS**
A Tuple of lists consisting of matched KeyPoints found on the image and matched
keypoints found on the template. keypoints are sorted according to lowest distance.
**EXAMPLE**
>>> template = Image("template.png")
>>> img = camera.getImage()
>>> fs = img.macthSIFTKeyPoints(template)
**SEE ALSO**
:py:meth:`_getRawKeypoints`
:py:meth:`_getFLANNMatches`
:py:meth:`drawKeypointMatches`
:py:meth:`findKeypoints`
"""
try:
import cv2
except ImportError:
warnings.warn("OpenCV >= 2.4.3 required")
return None
if not hasattr(cv2, "FeatureDetector_create"):
warnings.warn("OpenCV >= 2.4.3 required")
return None
if template == None:
return None
detector = cv2.FeatureDetector_create("SIFT")
descriptor = cv2.DescriptorExtractor_create("SIFT")
img = self.getNumpyCv2()
template_img = template.getNumpyCv2()
skp = detector.detect(img)
skp, sd = descriptor.compute(img, skp)
tkp = detector.detect(template_img)
tkp, td = descriptor.compute(template_img, tkp)
idx, dist = self._getFLANNMatches(sd, td)
dist = dist[:,0]/2500.0
dist = dist.reshape(-1,).tolist()
idx = idx.reshape(-1).tolist()
indices = range(len(dist))
indices.sort(key=lambda i: dist[i])
dist = [dist[i] for i in indices]
idx = [idx[i] for i in indices]
sfs = []
for i, dis in itertools.izip(idx, dist):
if dis < quality:
sfs.append(KeyPoint(template, skp[i], sd, "SIFT"))
else:
break #since sorted
idx, dist = self._getFLANNMatches(td, sd)
dist = dist[:,0]/2500.0
dist = dist.reshape(-1,).tolist()
idx = idx.reshape(-1).tolist()
indices = range(len(dist))
indices.sort(key=lambda i: dist[i])
dist = [dist[i] for i in indices]
idx = [idx[i] for i in indices]
tfs = []
for i, dis in itertools.izip(idx, dist):
if dis < quality:
tfs.append(KeyPoint(template, tkp[i], td, "SIFT"))
else:
break
return sfs, tfs
def drawSIFTKeyPointMatch(self, template, distance=200, num=-1, width=1):
"""
**SUMMARY**
Draw SIFT keypoints draws a side by side representation of two images, calculates
keypoints for both images, determines the keypoint correspondences, and then draws
the correspondences. This method is helpful for debugging keypoint calculations
and also looks really cool :) . The parameters mirror the parameters used
for findKeypointMatches to assist with debugging
**PARAMETERS**
* *template* - A template image.
* *distance* - This can be any value between about 100 and 500. Lower value should
return less number of features but higher quality features.
* *num* - Number of features you want to draw. Features are sorted according to the
dist from min to max.
* *width* - The width of the drawn line.
**RETURNS**
A side by side image of the template and source image with each feature correspondence
draw in a different color.
**EXAMPLE**
>>> img = cam.getImage()
>>> template = Image("myTemplate.png")
>>> result = img.drawSIFTKeypointMatch(self,template,300.00):
**SEE ALSO**
:py:meth:`drawKeypointMatches`
:py:meth:`findKeypoints`
:py:meth:`findKeypointMatch`
"""
if template == None:
return
resultImg = template.sideBySide(self,scale=False)
hdif = (self.height-template.height)/2
sfs, tfs = self.matchSIFTKeyPoints(template, distance)
maxlen = min(len(sfs), len(tfs))
if num < 0 or num > maxlen:
num = maxlen
for i in range(num):
skp = sfs[i]
tkp = tfs[i]
pt_a = (int(tkp.y), int(tkp.x)+hdif)
pt_b = (int(skp.y)+template.width, int(skp.x))
resultImg.drawLine(pt_a, pt_b, color=Color.getRandom(),thickness=width)
return resultImg
def stegaEncode(self,message):
"""
**SUMMARY**
A simple steganography tool for hidding messages in images.
**PARAMETERS**
* *message* -A message string that you would like to encode.
**RETURNS**
Your message encoded in the returning image.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> img2 = img.stegaEncode("HELLO WORLD!")
>>>> img2.save("TopSecretImg.png")
>>>> img3 = Image("TopSecretImg.png")
>>>> img3.stegaDecode()
**NOTES**
More here:
http://en.wikipedia.org/wiki/Steganography
You will need to install stepic:
http://domnit.org/stepic/doc/pydoc/stepic.html
You may need to monkey with jpeg compression
as it seems to degrade the encoded message.
PNG sees to work quite well.
"""
try:
import stepic
except ImportError:
logger.warning("stepic library required")
return None
warnings.simplefilter("ignore")
pilImg = pil.frombuffer("RGB",self.size(),self.toString())
stepic.encode_inplace(pilImg,message)
retVal = Image(pilImg)
return retVal.flipVertical()
def stegaDecode(self):
"""
**SUMMARY**
A simple steganography tool for hidding and finding
messages in images.
**RETURNS**
Your message decoded in the image.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> img2 = img.stegaEncode("HELLO WORLD!")
>>>> img2.save("TopSecretImg.png")
>>>> img3 = Image("TopSecretImg.png")
>>>> img3.stegaDecode()
**NOTES**
More here:
http://en.wikipedia.org/wiki/Steganography
You will need to install stepic:
http://domnit.org/stepic/doc/pydoc/stepic.html
You may need to monkey with jpeg compression
as it seems to degrade the encoded message.
PNG sees to work quite well.
"""
try:
import stepic
except ImportError:
logger.warning("stepic library required")
return None
warnings.simplefilter("ignore")
pilImg = pil.frombuffer("RGB",self.size(),self.toString())
result = stepic.decode(pilImg)
return result
def findFeatures(self, method="szeliski", threshold=1000):
"""
**SUMMARY**
Find szeilski or Harris features in the image.
Harris features correspond to Harris corner detection in the image.
Read more:
Harris Features: http://en.wikipedia.org/wiki/Corner_detection
szeliski Features: http://research.microsoft.com/en-us/um/people/szeliski/publications.htm
**PARAMETERS**
* *method* - Features type
* *threshold* - threshold val
**RETURNS**
A list of Feature objects corrseponding to the feature points.
**EXAMPLE**
>>> img = Image("corner_sample.png")
>>> fpoints = img.findFeatures("harris", 2000)
>>> for f in fpoints:
... f.draw()
>>> img.show()
**SEE ALSO**
:py:meth:`drawKeypointMatches`
:py:meth:`findKeypoints`
:py:meth:`findKeypointMatch`
"""
try:
import cv2
except ImportError:
logger.warning("OpenCV >= 2.3.0 required")
return None
img = self.getGrayNumpyCv2()
blur = cv2.GaussianBlur(img, (3, 3), 0)
Ix = cv2.Sobel(blur, cv2.CV_32F, 1, 0)
Iy = cv2.Sobel(blur, cv2.CV_32F, 0, 1)
Ix_Ix = np.multiply(Ix, Ix)
Iy_Iy = np.multiply(Iy, Iy)
Ix_Iy = np.multiply(Ix, Iy)
Ix_Ix_blur = cv2.GaussianBlur(Ix_Ix, (5, 5), 0)
Iy_Iy_blur = cv2.GaussianBlur(Iy_Iy, (5, 5), 0)
Ix_Iy_blur = cv2.GaussianBlur(Ix_Iy, (5, 5), 0)
harris_thresh = threshold*5000
alpha = 0.06
detA = Ix_Ix_blur * Iy_Iy_blur - Ix_Iy_blur**2
traceA = Ix_Ix_blur + Iy_Iy_blur
feature_list = []
if method == "szeliski":
harmonic_mean = detA / traceA
for j, i in np.argwhere(harmonic_mean > threshold):
feature_list.append(Feature(self, i, j, ((i, j), (i, j), (i, j), (i, j))))
elif method == "harris":
harris_function = detA - (alpha*traceA*traceA)
for j,i in np.argwhere(harris_function > harris_thresh):
feature_list.append(Feature(self, i, j, ((i, j), (i, j), (i, j), (i, j))))
else:
logger.warning("Invalid method.")
return None
return feature_list
def watershed(self, mask=None, erode=2,dilate=2, useMyMask=False):
"""
**SUMMARY**
Implements the Watershed algorithm on the input image.
Read more:
Watershed: "http://en.wikipedia.org/wiki/Watershed_(image_processing)"
**PARAMETERS**
* *mask* - an optional binary mask. If none is provided we do a binarize and invert.
* *erode* - the number of times to erode the mask to find the foreground.
* *dilate* - the number of times to dilate the mask to find possible background.
* *useMyMask* - if this is true we do not modify the mask.
**RETURNS**
The Watershed image
**EXAMPLE**
>>> img = Image("/sampleimages/wshed.jpg")
>>> img1 = img.watershed()
>>> img1.show()
# here is an example of how to create your own mask
>>> img = Image('lenna')
>>> myMask = Image((img.width,img.height))
>>> myMask = myMask.floodFill((0,0),color=Color.WATERSHED_BG)
>>> mask = img.threshold(128)
>>> myMask = (myMask-mask.dilate(2)+mask.erode(2))
>>> result = img.watershed(mask=myMask,useMyMask=True)
**SEE ALSO**
Color.WATERSHED_FG - The watershed foreground color
Color.WATERSHED_BG - The watershed background color
Color.WATERSHED_UNSURE - The watershed not sure if fg or bg color.
TODO: Allow the user to pass in a function that defines the watershed mask.
"""
try:
import cv2
except ImportError:
logger.warning("OpenCV >= 2.3.0 required")
return None
output = self.getEmpty(3)
if mask is None:
mask = self.binarize().invert()
newmask = None
if( not useMyMask ):
newmask = Image((self.width,self.height))
newmask = newmask.floodFill((0,0),color=Color.WATERSHED_BG)
newmask = (newmask-mask.dilate(dilate)+mask.erode(erode))
else:
newmask = mask
m = np.int32(newmask.getGrayNumpyCv2())
cv2.watershed(self.getNumpyCv2(),m)
m = cv2.convertScaleAbs(m)
ret,thresh = cv2.threshold(m,0,255,cv2.cv.CV_THRESH_OTSU)
retVal = Image(thresh,cv2image=True)
return retVal
def findBlobsFromWatershed(self,mask=None,erode=2,dilate=2,useMyMask=False,invert=False,minsize=20,maxsize=None):
"""
**SUMMARY**
Implements the watershed algorithm on the input image with an optional mask and t
hen uses the mask to find blobs.
Read more:
Watershed: "http://en.wikipedia.org/wiki/Watershed_(image_processing)"
**PARAMETERS**
* *mask* - an optional binary mask. If none is provided we do a binarize and invert.
* *erode* - the number of times to erode the mask to find the foreground.
* *dilate* - the number of times to dilate the mask to find possible background.
* *useMyMask* - if this is true we do not modify the mask.
* *invert* - invert the resulting mask before finding blobs.
* *minsize* - minimum blob size in pixels.
* *maxsize* - the maximum blob size in pixels.
**RETURNS**
A feature set of blob features.
**EXAMPLE**
>>> img = Image("/sampleimages/wshed.jpg")
>>> mask = img.threshold(100).dilate(3)
>>> blobs = img.findBlobsFromWatershed(mask)
>>> blobs.show()
**SEE ALSO**
Color.WATERSHED_FG - The watershed foreground color
Color.WATERSHED_BG - The watershed background color
Color.WATERSHED_UNSURE - The watershed not sure if fg or bg color.
"""
newmask = self.watershed(mask,erode,dilate,useMyMask)
if( invert ):
newmask = mask.invert()
return self.findBlobsFromMask(newmask,minsize=minsize,maxsize=maxsize)
def maxValue(self,locations=False):
"""
**SUMMARY**
Returns the brightest/maximum pixel value in the
grayscale image. This method can also return the
locations of pixels with this value.
**PARAMETERS**
* *locations* - If true return the location of pixels
that have this value.
**RETURNS**
The maximum value and optionally the list of points as
a list of (x,y) tuples.
**EXAMPLE**
>>> img = Image("lenna")
>>> max = img.maxValue()
>>> min, pts = img.minValue(locations=True)
>>> img2 = img.stretch(min,max)
"""
if(locations):
val = np.max(self.getGrayNumpy())
x,y = np.where(self.getGrayNumpy()==val)
locs = zip(x.tolist(),y.tolist())
return int(val),locs
else:
val = np.max(self.getGrayNumpy())
return int(val)
def minValue(self,locations=False):
"""
**SUMMARY**
Returns the darkest/minimum pixel value in the
grayscale image. This method can also return the
locations of pixels with this value.
**PARAMETERS**
* *locations* - If true return the location of pixels
that have this value.
**RETURNS**
The minimum value and optionally the list of points as
a list of (x,y) tuples.
**EXAMPLE**
>>> img = Image("lenna")
>>> max = img.maxValue()
>>> min, pts = img.minValue(locations=True)
>>> img2 = img.stretch(min,max)
"""
if(locations):
val = np.min(self.getGrayNumpy())
x,y = np.where(self.getGrayNumpy()==val)
locs = zip(x.tolist(),y.tolist())
return int(val),locs
else:
val = np.min(self.getGrayNumpy())
return int(val)
def findKeypointClusters(self, num_of_clusters = 5, order='dsc', flavor='surf'):
'''
This function is meant to try and find interesting areas of an
image. It does this by finding keypoint clusters in an image.
It uses keypoint (ORB) detection to locate points of interest
and then uses kmeans clustering to get the X,Y coordinates of
those clusters of keypoints. You provide the expected number
of clusters and you will get back a list of the X,Y coordinates
and rank order of the number of Keypoints around those clusters
**PARAMETERS**
* num_of_clusters - The number of clusters you are looking for (default: 5)
* order - The rank order you would like the points returned in, dsc or asc, (default: dsc)
* flavor - The keypoint type, or 'corner' for just corners
**EXAMPLE**
>>> img = Image('simplecv')
>>> clusters = img.findKeypointClusters()
>>> clusters.draw()
>>> img.show()
**RETURNS**
FeatureSet
'''
if flavor.lower() == 'corner':
keypoints = self.findCorners() #fallback to corners
else:
keypoints = self.findKeypoints(flavor=flavor.upper()) #find the keypoints
if keypoints == None or keypoints <= 0:
return None
xypoints = np.array([(f.x,f.y) for f in keypoints])
xycentroids, xylabels = scv.kmeans2(xypoints, num_of_clusters) # find the clusters of keypoints
xycounts = np.array([])
for i in range(num_of_clusters ): #count the frequency of occurences for sorting
xycounts = np.append(xycounts, len(np.where(xylabels == i)[-1]))
merged = np.msort(np.hstack((np.vstack(xycounts), xycentroids))) #sort based on occurence
clusters = [c[1:] for c in merged] # strip out just the values ascending
if order.lower() == 'dsc':
clusters = clusters[::-1] #reverse if descending
fs = FeatureSet()
for x,y in clusters: #map the values to a feature set
f = Corner(self, x, y)
fs.append(f)
return fs
def getFREAKDescriptor(self, flavor="SURF"):
"""
**SUMMARY**
Compute FREAK Descriptor of given keypoints.
FREAK - Fast Retina Keypoints.
Read more: http://www.ivpe.com/freak.htm
Keypoints can be extracted using following detectors.
- SURF
- SIFT
- BRISK
- ORB
- STAR
- MSER
- FAST
- Dense
**PARAMETERS**
* *flavor* - Detector (see above list of detectors) - string
**RETURNS**
* FeatureSet* - A feature set of KeyPoint Features.
* Descriptor* - FREAK Descriptor
**EXAMPLE**
>>> img = Image("lenna")
>>> fs, des = img.getFREAKDescriptor("ORB")
"""
try:
import cv2
except ImportError:
warnings.warn("OpenCV version >= 2.4.2 requierd")
return None
if cv2.__version__.startswith('$Rev:'):
warnings.warn("OpenCV version >= 2.4.2 requierd")
return None
if int(cv2.__version__.replace('.','0'))<20402:
warnings.warn("OpenCV version >= 2.4.2 requierd")
return None
flavors = ["SIFT", "SURF", "BRISK", "ORB", "STAR", "MSER", "FAST", "Dense"]
if flavor not in flavors:
warnings.warn("Unkown Keypoints detector. Returning None.")
return None
detector = cv2.FeatureDetector_create(flavor)
extractor = cv2.DescriptorExtractor_create("FREAK")
self._mKeyPoints = detector.detect(self.getGrayNumpyCv2())
self._mKeyPoints, self._mKPDescriptors = extractor.compute(self.getGrayNumpyCv2(),
self._mKeyPoints)
fs = FeatureSet()
for i in range(len(self._mKeyPoints)):
fs.append(KeyPoint(self, self._mKeyPoints[i], self._mKPDescriptors[i], flavor))
return fs, self._mKPDescriptors
def getGrayHistogramCounts(self, bins = 255, limit=-1):
'''
This function returns a list of tuples of greyscale pixel counts
by frequency. This would be useful in determining the dominate
pixels (peaks) of the greyscale image.
**PARAMETERS**
* *bins* - The number of bins for the hisogram, defaults to 255 (greyscale)
* *limit* - The number of counts to return, default is all
**RETURNS**
* List * - A list of tuples of (frequency, value)
**EXAMPLE**
>>> img = Image("lenna")
>>> counts = img.getGrayHistogramCounts()
>>> counts[0] #the most dominate pixel color tuple of frequency and value
>>> counts[1][1] #the second most dominate pixel color value
'''
hist = self.histogram(bins)
vals = [(e,h) for h,e in enumerate(hist)]
vals.sort()
vals.reverse()
if limit == -1:
limit = bins
return vals[:limit]
def grayPeaks(self, bins = 255, delta = 0, lookahead = 15):
"""
**SUMMARY**
Takes the histogram of a grayscale image, and returns the peak
grayscale intensity values.
The bins parameter can be used to lump grays together, by default it is
set to 255
Returns a list of tuples, each tuple contains the grayscale intensity,
and the fraction of the image that has it.
**PARAMETERS**
* *bins* - the integer number of bins, between 1 and 255.
* *delta* - the minimum difference betweena peak and the following points,
before a peak may be considered a peak.Useful to hinder the
algorithm from picking up false peaks towards to end of
the signal.
* *lookahead* - the distance to lookahead from a peakto determine if it is
an actual peak, should be an integer greater than 0.
**RETURNS**
A list of (grays,fraction) tuples.
**NOTE**
Implemented using the techniques used in huetab()
"""
# The bins are the no of edges bounding an histogram.
# Thus bins= Number of bars in histogram+1
# As range() function is exclusive,
# hence bins+2 is passed as parameter.
y_axis, x_axis = np.histogram(self.getGrayNumpy(), bins = range(bins+2))
x_axis = x_axis[0:bins+1]
maxtab = []
mintab = []
length = len(y_axis)
if x_axis is None:
x_axis = range(length)
#perform some checks
if length != len(x_axis):
raise ValueError, "Input vectors y_axis and x_axis must have same length"
if lookahead < 1:
raise ValueError, "Lookahead must be above '1' in value"
if not (np.isscalar(delta) and delta >= 0):
raise ValueError, "delta must be a positive number"
#needs to be a numpy array
y_axis = np.asarray(y_axis)
#maxima and minima candidates are temporarily stored in
#mx and mn respectively
mn, mx = np.Inf, -np.Inf
#Only detect peak if there is 'lookahead' amount of points after it
for index, (x, y) in enumerate(zip(x_axis[:-lookahead], y_axis[:-lookahead])):
if y > mx:
mx = y
mxpos = x
if y < mn:
mn = y
mnpos = x
####look for max####
if y < mx-delta and mx != np.Inf:
#Maxima peak candidate found
#look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index+lookahead].max() < mx:
maxtab.append((mxpos, mx))
#set algorithm to only find minima now
mx = np.Inf
mn = np.Inf
if y > mn+delta and mn != -np.Inf:
#Minima peak candidate found
#look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index+lookahead].min() > mn:
mintab.append((mnpos, mn))
#set algorithm to only find maxima now
mn = -np.Inf
mx = -np.Inf
retVal = []
for intensity, pixelcount in maxtab:
retVal.append((intensity, pixelcount / float(self.width * self.height)))
return retVal
def tvDenoising(self, gray=False, weight=50, eps=0.0002, max_iter=200, resize=1):
"""
**SUMMARY**
Performs Total Variation Denoising, this filter tries to minimize the
total-variation of the image.
see : http://en.wikipedia.org/wiki/Total_variation_denoising
**Parameters**
* *gray* - Boolean value which identifies the colorspace of
the input image. If set to True, filter uses gray scale values,
otherwise colorspace is used.
* *weight* - Denoising weight, it controls the extent of denoising.
* *eps* - Stopping criteria for the algorithm. If the relative difference
of the cost function becomes less than this value, the algorithm stops.
* *max_iter* - Determines the maximum number of iterations the algorithm
goes through for optimizing.
* *resize* - Parameter to scale up/down the image. If set to
1 filter is applied on the original image. This parameter is
mostly to speed up the filter.
**NOTE**
This function requires Scikit-image library to be installed!
To install scikit-image library run::
sudo pip install -U scikit-image
Read More: http://scikit-image.org/
"""
try:
from skimage.filter import denoise_tv_chambolle
except ImportError:
logger.warn('Scikit-image Library not installed!')
return None
img = self.copy()
if resize <= 0:
print 'Enter a valid resize value'
return None
if resize != 1:
img = img.resize(int(img.width*resize),int(img.height*resize))
if gray is True:
img = img.getGrayNumpy()
multichannel = False
elif gray is False:
img = img.getNumpy()
multichannel = True
else:
warnings.warn('gray value not valid')
return None
denoise_mat = denoise_tv_chambolle(img,weight,eps,max_iter,multichannel)
retVal = img * denoise_mat
retVal = Image(retVal)
if resize != 1:
return retVal.resize(int(retVal.width/resize),int(retVal.width/resize))
else:
return retVal
def motionBlur(self,intensity=15, direction='NW'):
"""
**SUMMARY**
Performs the motion blur of an Image. Uses different filters to find out
the motion blur in different directions.
see : https://en.wikipedia.org/wiki/Motion_blur
**Parameters**
* *intensity* - The intensity of the motion blur effect. Basically defines
the size of the filter used in the process. It has to be an integer.
0 intensity implies no blurring.
* *direction* - The direction of the motion. It is a string taking values
left, right, up, down as well as N, S, E, W for north, south, east, west
and NW, NE, SW, SE for northwest and so on.
default is NW
**RETURNS**
An image with the specified motion blur filter applied.
**EXAMPLE**
>>> i = Image ('lenna')
>>> mb = i.motionBlur()
>>> mb.show()
"""
mid = int(intensity/2)
tmp = np.identity(intensity)
if intensity == 0:
warnings.warn("0 intensity means no blurring")
return self
elif intensity % 2 is 0:
div=mid
for i in range(mid, intensity-1):
tmp[i][i] = 0
else:
div=mid+1
for i in range(mid+1, intensity-1):
tmp[i][i]=0
if direction == 'right' or direction.upper() == 'E':
kernel = np.concatenate((np.zeros((1,mid)),np.ones((1,mid+1))),axis=1)
elif direction == 'left' or direction.upper() == 'W':
kernel = np.concatenate((np.ones((1,mid+1)),np.zeros((1,mid))),axis=1)
elif direction == 'up' or direction.upper() == 'N':
kernel = np.concatenate((np.ones((1+mid,1)),np.zeros((mid,1))),axis=0)
elif direction == 'down' or direction.upper() == 'S':
kernel = np.concatenate((np.zeros((mid,1)),np.ones((mid+1,1))),axis=0)
elif direction.upper() == 'NW':
kernel = tmp
elif direction.upper() == 'NE':
kernel = np.fliplr(tmp)
elif direction.upper() == 'SW':
kernel = np.flipud(tmp)
elif direction.upper() == 'SE':
kernel = np.flipud(np.fliplr(tmp))
else:
warnings.warn("Please enter a proper direction")
return None
retval=self.convolve(kernel=kernel/div)
return retval
def recognizeFace(self, recognizer=None):
"""
**SUMMARY**
Find faces in the image using FaceRecognizer and predict their class.
**PARAMETERS**
* *recognizer* - Trained FaceRecognizer object
**EXAMPLES**
>>> cam = Camera()
>>> img = cam.getImage()
>>> recognizer = FaceRecognizer()
>>> recognizer.load("training.xml")
>>> print img.recognizeFace(recognizer)
"""
try:
import cv2
if not hasattr(cv2, "createFisherFaceRecognizer"):
warnings.warn("OpenCV >= 2.4.4 required to use this.")
return None
except ImportError:
warnings.warn("OpenCV >= 2.4.4 required to use this.")
return None
if not isinstance(recognizer, FaceRecognizer):
warnings.warn("SimpleCV.Features.FaceRecognizer object required.")
return None
w, h = recognizer.imageSize
label = recognizer.predict(self.resize(w, h))
return label
def findAndRecognizeFaces(self, recognizer, cascade=None):
"""
**SUMMARY**
Predict the class of the face in the image using FaceRecognizer.
**PARAMETERS**
* *recognizer* - Trained FaceRecognizer object
* *cascade* -haarcascade which would identify the face
in the image.
**EXAMPLES**
>>> cam = Camera()
>>> img = cam.getImage()
>>> recognizer = FaceRecognizer()
>>> recognizer.load("training.xml")
>>> feat = img.findAndRecognizeFaces(recognizer, "face.xml")
>>> for feature, label, confidence in feat:
... i = feature.crop()
... i.drawText(str(label))
... i.show()
"""
try:
import cv2
if not hasattr(cv2, "createFisherFaceRecognizer"):
warnings.warn("OpenCV >= 2.4.4 required to use this.")
return None
except ImportError:
warnings.warn("OpenCV >= 2.4.4 required to use this.")
return None
if not isinstance(recognizer, FaceRecognizer):
warnings.warn("SimpleCV.Features.FaceRecognizer object required.")
return None
if not cascade:
cascade = "/".join([LAUNCH_PATH,"/Features/HaarCascades/face.xml"])
faces = self.findHaarFeatures(cascade)
if not faces:
warnings.warn("Faces not found in the image.")
return None
retVal = []
for face in faces:
label, confidence = face.crop().recognizeFace(recognizer)
retVal.append([face, label, confidence])
return retVal
def channelMixer(self, channel = 'r', weight = (100,100,100)):
"""
**SUMMARY**
Mixes channel of an RGB image based on the weights provided. The output is given at the
channel provided in the parameters. Basically alters the value of one channelg of an RGB
image based in the values of other channels and itself. If the image is not RGB then first
converts the image to RGB and then mixes channel
**PARAMETERS**
* *channel* - The output channel in which the values are to be replaced.
It can have either 'r' or 'g' or 'b'
* *weight* - The weight of each channel in calculation of the mixed channel.
It is a tuple having 3 values mentioning the percentage of the value of the
channels, from -200% to 200%
**RETURNS**
A SimpleCV RGB Image with the provided channel replaced with the mixed channel.
**EXAMPLE**
>>> img = Image("lenna")
>>> img2 = img.channelMixer()
>>> Img3 = img.channelMixer(channel = 'g', weights = (3,2,1))
**NOTE**
Read more at http://docs.gimp.org/en/plug-in-colors-channel-mixer.html
"""
r, g, b = self.splitChannels()
if weight[0] > 200 or weight[1] > 200 or weight[2] >= 200:
if weight[0] <-200 or weight[1] < -200 or weight[2] < -200:
warnings.warn('Value of weights can be from -200 to 200%')
return None
weight = map(float,weight)
channel = channel.lower()
if channel == 'r':
r = r*(weight[0]/100.0) + g*(weight[1]/100.0) + b*(weight[2]/100.0)
elif channel == 'g':
g = r*(weight[0]/100.0) + g*(weight[1]/100.0) + b*(weight[2]/100.0)
elif channel == 'b':
b = r*(weight[0]/100.0) + g*(weight[1]/100.0) + b*(weight[2]/100.0)
else:
warnings.warn('Please enter a valid channel(r/g/b)')
return None
retVal = self.mergeChannels(r = r, g = g, b = b)
return retVal
def prewitt(self):
"""
**SUMMARY**
Prewitt operator for edge detection
**PARAMETERS**
None
**RETURNS**
Image with prewitt opeartor applied on it
**EXAMPLE**
>>> img = Image("lenna")
>>> p = img.prewitt()
>>> p.show()
**NOTES**
Read more at: http://en.wikipedia.org/wiki/Prewitt_operator
"""
img = self.copy()
grayimg = img.grayscale()
gx = [[1,1,1],[0,0,0],[-1,-1,-1]]
gy = [[-1,0,1],[-1,0,1],[-1,0,1]]
grayx = grayimg.convolve(gx)
grayy = grayimg.convolve(gy)
grayxnp = np.uint64(grayx.getGrayNumpy())
grayynp = np.uint64(grayy.getGrayNumpy())
retVal = Image(np.sqrt(grayxnp**2+grayynp**2))
return retVal
def edgeSnap(self,pointList,step = 1):
"""
**SUMMARY**
Given a List of points finds edges closet to the line joining two
successive points, edges are returned as a FeatureSet of
Lines.
Note : Image must be binary, it is assumed that prior conversion is done
**Parameters**
* *pointList* - List of points to be checked for nearby edges.
* *step* - Number of points to skip if no edge is found in vicinity.
Keep this small if you want to sharply follow a curve
**RETURNS**
* FeatureSet * - A FeatureSet of Lines
**EXAMPLE**
>>> image = Image("logo").edges()
>>> edgeLines = image.edgeSnap([(50,50),(230,200)])
>>> edgeLines.draw(color = Color.YELLOW,width = 3)
"""
imgArray = self.getGrayNumpy()
c1 = np.count_nonzero(imgArray )
c2 = np.count_nonzero(imgArray - 255)
#checking that all values are 0 and 255
if( c1 + c2 != imgArray.size):
raise ValueError,"Image must be binary"
if(len(pointList) < 2 ):
return None
finalList = [pointList[0]]
featureSet = FeatureSet()
last = pointList[0]
for point in pointList[1:None]:
finalList += self._edgeSnap2(last,point,step)
last = point
last = finalList[0]
for point in finalList:
featureSet.append(Line(self,(last,point)))
last = point
return featureSet
def _edgeSnap2(self,start,end,step):
"""
**SUMMARY**
Given a two points returns a list of edge points closet to the line joining the points
Point is a tuple of two numbers
Note : Image must be binary
**Parameters**
* *start* - First Point
* *end* - Second Point
* *step* - Number of points to skip if no edge is found in vicinity
Keep this low to detect sharp curves
**RETURNS**
* List * - A list of tuples , each tuple contains (x,y) values
"""
edgeMap = np.copy(self.getGrayNumpy())
#Size of the box around a point which is checked for edges.
box = step*4
xmin = min(start[0],end[0])
xmax = max(start[0],end[0])
ymin = min(start[1],end[1])
ymax = max(start[1],end[1])
line = self.bresenham_line(start,end)
#List of Edge Points.
finalList = []
i = 0
#Closest any point has ever come to the end point
overallMinDist = None
while i < len(line) :
x,y = line[i]
#Get the matrix of points fromx around current point.
region = edgeMap[x-box:x+box,y-box:y+box]
#Condition at the boundary of the image
if(region.shape[0] == 0 or region.shape[1] == 0):
i += step
continue
#Index of all Edge points
indexList = np.argwhere(region>0)
if (indexList.size > 0):
#Center the coordinates around the point
indexList -= box
minDist = None
# Incase multiple edge points exist, choose the one closest
# to the end point
for ix,iy in indexList:
dist = math.hypot(x+ix-end[0],iy+y-end[1])
if(minDist ==None or dist < minDist ):
dx,dy = ix,iy
minDist = dist
# The distance of the new point is compared with the least
# distance computed till now, the point is rejected if it's
# comparitively more. This is done so that edge points don't
# wrap around a curve instead of heading towards the end point
if(overallMinDist!= None and minDist > overallMinDist*1.1):
i+=step
continue
if( overallMinDist == None or minDist < overallMinDist ):
overallMinDist = minDist
# Reset the points in the box so that they are not detected
# during the next iteration.
edgeMap[x-box:x+box,y-box:y+box] = 0
# Keep all the points in the bounding box
if( xmin <= x+dx <= xmax and ymin <= y+dx <=ymax):
#Add the point to list and redefine the line
line =[(x+dx,y+dy)] + self.bresenham_line((x+dx, y+dy), end)
finalList += [(x+dx,y+dy)]
i = 0
i += step
finalList += [end]
return finalList
def motionBlur(self,intensity=15, angle = 0):
"""
**SUMMARY**
Performs the motion blur of an Image given the intensity and angle
see : https://en.wikipedia.org/wiki/Motion_blur
**Parameters**
* *intensity* - The intensity of the motion blur effect. Governs the
size of the kernel used in convolution
* *angle* - Angle in degrees at which motion blur will occur. Positive
is Clockwise and negative is Anti-Clockwise. 0 blurs from left to
right
**RETURNS**
An image with the specified motion blur applied.
**EXAMPLE**
>>> img = Image ('lenna')
>>> blur = img.motionBlur(40,45)
>>> blur.show()
"""
intensity = int(intensity)
if(intensity <= 1):
logger.warning('power less than 1 will result in no change')
return self
kernel = np.zeros((intensity,intensity))
rad = math.radians(angle)
x1,y1 = intensity/2,intensity/2
x2 = int(x1-(intensity-1)/2*math.sin(rad))
y2 = int(y1 -(intensity-1)/2*math.cos(rad))
line = self.bresenham_line((x1,y1),(x2,y2))
x = [p[0] for p in line]
y = [p[1] for p in line]
kernel[x,y] = 1
kernel = kernel/len(line)
return self.convolve(kernel = kernel)
def getLightness(self):
"""
**SUMMARY**
This method converts the given RGB image to grayscale using the
Lightness method.
**Parameters**
None
**RETURNS**
A GrayScale image with values according to the Lightness method
**EXAMPLE**
>>> img = Image ('lenna')
>>> out = img.getLightness()
>>> out.show()
**NOTES**
Algorithm used: value = (MAX(R,G,B) + MIN(R,G,B))/2
"""
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
imgMat = np.array(self.getNumpyCv2(),dtype=np.int)
retVal = np.array((np.max(imgMat,2) + np.min(imgMat,2))/2,dtype=np.uint8)
else:
logger.warnings('Input a RGB image')
return None
return Image(retVal,cv2image=True)
def getLuminosity(self):
"""
**SUMMARY**
This method converts the given RGB image to grayscale using the
Luminosity method.
**Parameters**
None
**RETURNS**
A GrayScale image with values according to the Luminosity method
**EXAMPLE**
>>> img = Image ('lenna')
>>> out = img.getLuminosity()
>>> out.show()
**NOTES**
Algorithm used: value = 0.21 R + 0.71 G + 0.07 B
"""
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
imgMat = np.array(self.getNumpyCv2(),dtype=np.int)
retVal = np.array(np.average(imgMat,2,(0.07,0.71,0.21)),dtype=np.uint8)
else:
logger.warnings('Input a RGB image')
return None
return Image(retVal,cv2image=True)
def getAverage(self):
"""
**SUMMARY**
This method converts the given RGB image to grayscale by averaging out
the R,G,B values.
**Parameters**
None
**RETURNS**
A GrayScale image with values according to the Average method
**EXAMPLE**
>>> img = Image ('lenna')
>>> out = img.getAverage()
>>> out.show()
**NOTES**
Algorithm used: value = (R+G+B)/3
"""
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
imgMat = np.array(self.getNumpyCv2(),dtype=np.int)
retVal = np.array(imgMat.mean(2),dtype=np.uint8)
else:
logger.warnings('Input a RGB image')
return None
return Image(retVal,cv2image=True)
def smartRotate(self,bins=18,point = [-1,-1],auto = True,threshold=80,minLength=30,maxGap=10,t1=150,t2=200,fixed = True):
"""
**SUMMARY**
Attempts to rotate the image so that the most significant lines are
approximately parellel to horizontal or vertical edges.
**Parameters**
* *bins* - The number of bins the lines will be grouped into.
* *point* - the point about which to rotate, refer :py:meth:`rotate`
* *auto* - If true point will be computed to the mean of centers of all
the lines in the selected bin. If auto is True, value of point is
ignored
* *threshold* - which determines the minimum "strength" of the line
refer :py:meth:`findLines` for details.
* *minLength* - how many pixels long the line must be to be returned,
refer :py:meth:`findLines` for details.
* *maxGap* - how much gap is allowed between line segments to consider
them the same line .refer to :py:meth:`findLines` for details.
* *t1* - thresholds used in the edge detection step,
refer to :py:meth:`_getEdgeMap` for details.
* *t2* - thresholds used in the edge detection step,
refer to :py:meth:`_getEdgeMap` for details.
* *fixed* - if fixed is true,keep the original image dimensions,
otherwise scale the image to fit the rotation , refer to
:py:meth:`rotate`
**RETURNS**
A rotated image
**EXAMPLE**
>>> i = Image ('image.jpg')
>>> i.smartRotate().show()
"""
lines = self.findLines(threshold, minLength, maxGap, t1,t2)
if(len(lines) == 0):
logger.warning("No lines found in the image")
return self
# Initialize empty bins
binn = [[] for i in range(bins)]
#Convert angle to bin number
conv = lambda x:int(x+90)/bins
#Adding lines to bins
[ binn[conv(line.angle())].append(line) for line in lines ]
#computing histogram, value of each column is total length of all lines
#in the bin
hist = [ sum([line.length() for line in lines]) for lines in binn]
#The maximum histogram
index = np.argmax(np.array(hist))
#Good ol weighted mean, for the selected bin
avg = sum([line.angle()*line.length() for line in binn[index]])/sum([line.length() for line in binn[index] ])
#Mean of centers of all lines in selected bin
if(auto ):
x = sum([line.end_points[0][0] + line.end_points[1][0] for line in binn[index]])/2/len(binn[index])
y = sum([line.end_points[0][1] + line.end_points[1][1] for line in binn[index]])/2/len(binn[index])
point = [x,y]
#Determine whether to rotate the lines to vertical or horizontal
if (-45 <= avg <= 45):
return self.rotate(avg,fixed = fixed,point = point)
elif (avg > 45):
return self.rotate(avg-90,fixed = fixed,point = point)
else:
return self.rotate(avg+90,fixed = fixed,point = point)
#Congratulations !! You did a smart thing
def normalize(self, newMin = 0, newMax = 255, minCut = 2, maxCut = 98):
"""
**SUMMARY**
Performs image normalization and yeilds a linearly normalized gray image.
Also known as contrast strestching.
see : http://en.wikipedia.org/wiki/Normalization_(image_processing)
**Parameters**
* *newMin* - The minimum of the new range over which the image is normalized
* *newMax* - The maximum of the new range over which the image is normalized
* *minCut* - A number between 0 to 100. The threshold percentage for the
current minimum value selection. This helps us to avoid the effect of outlying
pixel with either very low value
* *maxCut* - A number between 0 to 100. The threshold percentage for the
current minimum value selection. This helps us to avoid the effect of outlying
pixel with either very low value
**RETURNS**
A normalized grayscale image.
**EXAMPLE**
>>> img = Image ('lenna')
>>> norm = i.normalize()
>>> norm.show()
"""
if newMin < 0 or newMax >255:
warnings.warn("newMin and newMax can vary from 0-255")
return None
if newMax < newMin:
warnings.warn("newMin should be less than newMax")
return None
if minCut > 100 or maxCut > 100:
warnings.warn("minCut and maxCut")
return None
#avoiding the effect of odd pixels
try:
hist = self.getGrayHistogramCounts()
freq, val = zip(*hist)
maxfreq = (freq[0]-freq[-1])* maxCut/100.0
minfreq = (freq[0]-freq[-1])* minCut/100.0
closestMatch = lambda a,l:min(l, key=lambda x:abs(x-a))
maxval = closestMatch(maxfreq, val)
minval = closestMatch(minfreq, val)
retVal = (self.grayscale()-minval)*((newMax-newMin)/float(maxval-minval))+ newMin
#catching zero division in case there are very less intensities present
#Normalizing based on absolute max and min intensities present
except ZeroDivisionError:
maxval = self.maxValue()
minval = self.minValue()
retVal = (self.grayscale()-minval)*((newMax-newMin)/float(maxval-minval))+ newMin
#catching the case where there is only one intensity throughout
except:
warnings.warn("All pixels of the image have only one intensity value")
return None
return retVal
def getNormalizedHueHistogram(self,roi=None):
"""
**SUMMARY**
This method generates a normalized hue histogram for the image
or the ROI within the image. The hue histogram is a 2D hue/saturation
numpy array histogram with a shape of 180x256. This histogram can
be used for histogram back projection.
**PARAMETERS**
* *roi* - Anything that can be cajoled into being an ROI feature
including a tuple of (x,y,w,h), a list of points, or another feature.
**RETURNS**
A normalized 180x256 numpy array that is the hue histogram.
**EXAMPLE**
>>> img = Image('lenna')
>>> roi = (0,0,100,100)
>>> hist = img.getNormalizedHueHistogram(roi)
**SEE ALSO**
ImageClass.backProjectHueHistogram()
ImageClass.findBlobsFromHueHistogram()
"""
try:
import cv2
except ImportError:
warnings.warn("OpenCV >= 2.3 required to use this.")
return None
from SimpleCV.Features import ROI
if( roi ): # roi is anything that can be taken to be an roi
roi = ROI(roi,self)
hsv = roi.crop().toHSV().getNumpyCv2()
else:
hsv = self.toHSV().getNumpyCv2()
hist = cv2.calcHist([hsv],[0,1],None,[180,256],[0,180,0,256])
cv2.normalize(hist,hist,0,255,cv2.NORM_MINMAX)
return hist
def backProjectHueHistogram(self,model,smooth=True,fullColor=False,threshold=None):
"""
**SUMMARY**
This method performs hue histogram back projection on the image. This is a very
quick and easy way of matching objects based on color. Given a hue histogram
taken from another image or an roi within the image we attempt to find all
pixels that are similar to the colors inside the histogram. The result can
either be a grayscale image that shows the matches or a color image.
**PARAMETERS**
* *model* - The histogram to use for pack projection. This can either be
a histogram, anything that can be converted into an ROI for the image (like
an x,y,w,h tuple or a feature, or another image.
* *smooth* - A bool, True means apply a smoothing operation after doing the
back project to improve the results.
* *fullColor* - return the results as a color image where pixels included
in the back projection are rendered as their source colro.
* *threshold* - If this value is not None, we apply a threshold to the
result of back projection to yield a binary image. Valid values are from
1 to 255.
**RETURNS**
A SimpleCV Image rendered according to the parameters provided.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> hist = img.getNormalizedHueHistogram((0,0,50,50)) # generate a hist
>>>> a = img.backProjectHueHistogram(hist)
>>>> b = img.backProjectHueHistogram((0,0,50,50) # same result
>>>> c = img.backProjectHueHistogram(Image('lyle'))
**SEE ALSO**
ImageClass.getNormalizedHueHistogram()
ImageClass.findBlobsFromHueHistogram()
"""
try:
import cv2
except ImportError:
warnings.warn("OpenCV >= 2.3 required to use this.")
return None
if( model is None ):
warnings.warn('Backproject requires a model')
return None
# this is the easier test, try to cajole model into ROI
if( isinstance(model,Image) ):
model = model.getNormalizedHueHistogram()
if(not isinstance(model,np.ndarray) or model.shape != (180,256) ):
model = self.getNormalizedHueHistogram(model)
if( isinstance(model,np.ndarray) and model.shape == (180,256) ):
hsv = self.toHSV().getNumpyCv2()
dst = cv2.calcBackProject([hsv],[0,1],model,[0,180,0,256],1)
if smooth:
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
cv2.filter2D(dst,-1,disc,dst)
result = Image(dst,cv2image=True)
result = result.toBGR()
if( threshold ):
result = result.threshold(threshold)
if( fullColor ):
temp = Image((self.width,self.height))
result = temp.blit(self,alphaMask=result)
return result
else:
warnings.warn('Backproject model does not appear to be valid')
return None
def findBlobsFromHueHistogram(self,model,threshold=1,smooth=True,minsize=10,maxsize=None):
"""
**SUMMARY**
This method performs hue histogram back projection on the image and uses
the results to generate a FeatureSet of blob objects. This is a very
quick and easy way of matching objects based on color. Given a hue histogram
taken from another image or an roi within the image we attempt to find all
pixels that are similar to the colors inside the histogram.
**PARAMETERS**
* *model* - The histogram to use for pack projection. This can either be
a histogram, anything that can be converted into an ROI for the image (like
an x,y,w,h tuple or a feature, or another image.
* *smooth* - A bool, True means apply a smoothing operation after doing the
back project to improve the results.
* *threshold* - If this value is not None, we apply a threshold to the
result of back projection to yield a binary image. Valid values are from
1 to 255.
* *minsize* - the minimum blob size in pixels.
* *maxsize* - the maximum blob size in pixels.
**RETURNS**
A FeatureSet of blob objects or None if no blobs are found.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> hist = img.getNormalizedHueHistogram((0,0,50,50)) # generate a hist
>>>> blobs = img.findBlobsFromHueHistogram(hist)
>>>> blobs.show()
**SEE ALSO**
ImageClass.getNormalizedHueHistogram()
ImageClass.backProjectHueHistogram()
"""
newMask = self.backProjectHueHistogram(model,smooth,fullColor=False,threshold=threshold)
return self.findBlobsFromMask(newMask,minsize=minsize,maxsize=maxsize)
def filter(self, flt, grayscale=False):
"""
**SUMMARY**
This function allows you to apply an arbitrary filter to the DFT of an image.
This filter takes in a gray scale image, whiter values are kept and black values
are rejected. In the DFT image, the lower frequency values are in the corners
of the image, while the higher frequency components are in the center. For example,
a low pass filter has white squares in the corners and is black everywhere else.
**PARAMETERS**
* *flt* - A DFT filter
* *grayscale* - if this value is True we perfrom the operation on the DFT of the gray
version of the image and the result is gray image. If grayscale is true
we perform the operation on each channel and the recombine them to create
the result.
**RETURNS**
A SimpleCV image after applying the filter.
**EXAMPLE**
>>> filter = DFT.createGaussianFilter()
>>> myImage = Image("MyImage.png")
>>> result = myImage.filter(filter)
>>> result.show()
"""
filteredimage = flt.applyFilter(self, grayscale)
return filteredimage
from SimpleCV.Features import FeatureSet, Feature, Barcode, Corner, HaarFeature, Line, Chessboard, TemplateMatch, BlobMaker, Circle, KeyPoint, Motion, KeypointMatch, FaceRecognizer
from SimpleCV.Tracking import camshiftTracker, lkTracker, surfTracker, mfTracker, TrackSet
from SimpleCV.Stream import JpegStreamer
from SimpleCV.Font import *
from SimpleCV.DrawingLayer import *
from SimpleCV.DFT import DFT
| bsd-3-clause |
Aasmi/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
andredalton/bcc | 2013/mac0438/ep3/exemplos/ex6.py | 1 | 2188 | #Demo of using multiprocessing for generating data in one process and plotting
#in another.
#Written by Robert Cimrman
#Requires >= Python 2.6 for the multiprocessing module or having the
#standalone processing module installed
from __future__ import print_function
import time
try:
from multiprocessing import Process, Pipe
except ImportError:
from processing import Process, Pipe
import numpy as np
import matplotlib
matplotlib.use('GtkAgg')
import matplotlib.pyplot as plt
import gobject
class ProcessPlotter(object):
def __init__(self):
self.x = []
self.y = []
def terminate(self):
plt.close('all')
def poll_draw(self):
def call_back():
while 1:
if not self.pipe.poll():
break
command = self.pipe.recv()
if command is None:
self.terminate()
return False
else:
self.x.append(command[0])
self.y.append(command[1])
self.ax.plot(self.x, self.y, 'ro')
self.fig.canvas.draw()
return True
return call_back
def __call__(self, pipe):
print('starting plotter...')
self.pipe = pipe
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.gid = gobject.timeout_add(1000, self.poll_draw())
print('...done')
plt.show()
class NBPlot(object):
def __init__(self):
self.plot_pipe, plotter_pipe = Pipe()
self.plotter = ProcessPlotter()
self.plot_process = Process(target = self.plotter,
args = (plotter_pipe,))
self.plot_process.daemon = True
self.plot_process.start()
def plot(self, finished=False):
send = self.plot_pipe.send
if finished:
send(None)
else:
data = np.random.random(2)
send(data)
def main():
pl = NBPlot()
for ii in range(10):
pl.plot()
time.sleep(0.5)
raw_input('press Enter...')
pl.plot(finished=True)
if __name__ == '__main__':
main() | apache-2.0 |
jstoxrocky/statsmodels | examples/incomplete/arima.py | 34 | 1605 | from __future__ import print_function
from statsmodels.datasets.macrodata import load_pandas
from statsmodels.tsa.base.datetools import dates_from_range
from statsmodels.tsa.arima_model import ARIMA
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
plt.interactive(False)
# let's examine an ARIMA model of CPI
cpi = load_pandas().data['cpi']
dates = dates_from_range('1959q1', '2009q3')
cpi.index = dates
res = ARIMA(cpi, (1, 1, 1), freq='Q').fit()
print(res.summary())
# we can look at the series
cpi.diff().plot()
# maybe logs are better
log_cpi = np.log(cpi)
# check the ACF and PCF plots
acf, confint_acf = sm.tsa.acf(log_cpi.diff().values[1:], confint=95)
# center the confidence intervals about zero
#confint_acf -= confint_acf.mean(1)[:, None]
pacf = sm.tsa.pacf(log_cpi.diff().values[1:], method='ols')
# confidence interval is now an option to pacf
from scipy import stats
confint_pacf = stats.norm.ppf(1 - .025) * np.sqrt(1 / 202.)
fig = plt.figure()
ax = fig.add_subplot(121)
ax.set_title('Autocorrelation')
ax.plot(range(41), acf, 'bo', markersize=5)
ax.vlines(range(41), 0, acf)
ax.fill_between(range(41), confint_acf[:, 0], confint_acf[:, 1], alpha=.25)
fig.tight_layout()
ax = fig.add_subplot(122, sharey=ax)
ax.vlines(range(41), 0, pacf)
ax.plot(range(41), pacf, 'bo', markersize=5)
ax.fill_between(range(41), -confint_pacf, confint_pacf, alpha=.25)
#NOTE: you'll be able to just to this when tsa-plots is in master
#sm.graphics.acf_plot(x, nlags=40)
#sm.graphics.pacf_plot(x, nlags=40)
# still some seasonality
# try an arma(1, 1) with ma(4) term
| bsd-3-clause |
andrewor14/iolap | plotting/plot_concurrent.py | 1 | 3995 | #!/usr/bin/env python
import matplotlib.pyplot as plt
from bisect import bisect_left
from os import listdir, makedirs
from os.path import exists, join
import sys
def read_files(paths):
'''
Parse loss data over time from the specified files.
Time here is expressed in terms of number of seconds elapsed since the beginning of
the experiment. Loss is expressed in terms of confidence interval size at a given time.
'''
times, losses = [], []
for path in paths:
time_arr, loss_arr = [], []
with open(path) as f:
for line in f.readlines():
(x, value, low, up) = tuple(line.split(" "))
time_arr += [int(x)]
loss_arr += [float(up) - float(low)]
times += [time_arr]
losses += [loss_arr]
min_time = min(min(time_arr) for time_arr in times)
times = [[(time - min_time) / 1000.0 for time in time_arr] for time_arr in times]
return (times, losses)
def average_loss(times, losses):
'''
Compute average losses over time.
'''
# Flatten, dedup, and sort
all_times = sorted(list(set([t for time_arr in times for t in time_arr])))
all_avg_losses = []
for time in all_times:
total_loss = 0
loss_count = 0
avg_loss = 0
for (i, loss_arr) in enumerate(losses):
nearest_index = bisect_left(times[i], time)
if nearest_index != len(times[i]) and (nearest_index > 0 or time == times[i][nearest_index]):
loss_count += 1
total_loss += loss_arr[nearest_index]
if loss_count != 0:
avg_loss = total_loss / loss_count
all_avg_losses += [avg_loss]
return (all_times, all_avg_losses)
def main():
slaq_name = "slaq_10pools_500bootstrap_students"
args = sys.argv
if len(args) > 2:
print "ERROR: Expected zero or one argument"
sys.exit(1)
if len(args) == 2:
slaq_name = args[1]
plot(slaq_name)
def plot(slaq_name):
fair_name = slaq_name.replace("slaq", "fair")
slaq_dir = "../data/%s" % slaq_name
fair_dir = "../data/%s" % fair_name
slaq_files = [join(slaq_dir, f) for f in listdir(slaq_dir) if f.endswith(".dat")]
fair_files = [join(fair_dir, f) for f in listdir(fair_dir) if f.endswith(".dat")]
(slaq_times, slaq_losses) = read_files(slaq_files)
(fair_times, fair_losses) = read_files(fair_files)
# Make output dir if it doesn't already exist
if not exists(slaq_name):
makedirs(slaq_name)
# Plot raw values
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for i in range(len(slaq_times)):
ax.plot(slaq_times[i], slaq_losses[i], "-x", label="Query %s" % i)
ax.set_xlabel("Time (s)")
ax.set_ylabel("Loss")
ax.legend(loc = "upper right")
ax.set_title(slaq_name)
plt.savefig("%s/concurrent_loss.png" % slaq_name)
# Plot deltas
delta_times = [time_arr[:-1] for time_arr in slaq_times]
delta_losses = [[(loss_arr[i+1] - loss_arr[i]) for i in range(len(loss_arr)-1)]
for loss_arr in slaq_losses]
delta_losses = [[loss_arr[i] / min(loss_arr[:i+1]) for i in range(len(loss_arr))]
for loss_arr in delta_losses]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for i in range(len(slaq_times)):
ax.plot(delta_times[i][:-1], delta_losses[i][:-1], "-", label="Query %s" % i)
ax.set_xlabel("Time (s)")
ax.set_ylabel("Normalized delta loss")
ax.legend(loc = "upper right")
ax.set_title(slaq_name)
plt.savefig("%s/concurrent_delta_loss.png" % slaq_name)
# Plot average overall loss
(slaq_times, avg_slaq_losses) = average_loss(slaq_times, slaq_losses)
(fair_times, avg_fair_losses) = average_loss(fair_times, fair_losses)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(slaq_times, avg_slaq_losses, "-", label="SLAQ average loss")
ax.plot(fair_times, avg_fair_losses, "-", label="Fair average loss")
ax.set_xlabel("Time (s)")
ax.set_ylabel("Loss")
xlim = min(800, max(slaq_times) * 1.1)
ax.set_xlim([0, xlim])
ax.legend(loc = "upper right")
ax.set_title(slaq_name)
plt.savefig("%s/concurrent_avg_loss.png" % slaq_name)
if __name__ == '__main__':
main()
| apache-2.0 |
gbrammer/grizli | grizli/pipeline/auto_script.py | 1 | 178757 | """
Automatic processing scripts for grizli
"""
import os
import inspect
import traceback
import glob
import time
import warnings
import numpy as np
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
from .. import prep, utils
from .default_params import UV_N_FILTERS, UV_M_FILTERS, UV_W_FILTERS
from .default_params import OPT_N_FILTERS, OPT_M_FILTERS, OPT_W_FILTERS
from .default_params import IR_N_FILTERS, IR_M_FILTERS, IR_W_FILTERS
from .default_params import ALL_IMAGING_FILTERS, VALID_FILTERS
from .default_params import UV_GRISMS, OPT_GRISMS, IR_GRISMS, GRIS_REF_FILTERS
from .default_params import get_yml_parameters, write_params_to_yml
# needed for function definitions
args = get_yml_parameters()
if False:
np.seterr(divide='ignore', invalid='ignore', over='ignore', under='ignore')
# Only fetch F814W optical data for now
#ONLY_F814W = True
ONLY_F814W = False
def get_extra_data(root='j114936+222414', HOME_PATH='/Volumes/Pegasus/Grizli/Automatic', PERSIST_PATH=None, instruments=['WFC3'], filters=['F160W', 'F140W', 'F098M', 'F105W'], radius=2, run_fetch=True, from_mast=True, reprocess_parallel=True, s3_sync=False):
import os
import glob
import numpy as np
from hsaquery import query, fetch, fetch_mast
from hsaquery.fetch import DEFAULT_PRODUCTS
if PERSIST_PATH is None:
PERSIST_PATH = os.path.join(HOME_PATH, root, 'Persistence')
tab = utils.GTable.gread(os.path.join(HOME_PATH,
f'{root}_footprint.fits'))
# Fix CLEAR filter names
for i, filt_i in enumerate(tab['filter']):
if 'clear' in filt_i.lower():
spl = filt_i.lower().split(';')
if len(spl) > 1:
for s in spl:
if 'clear' not in s:
#print(filt_i, s)
filt_i = s.upper()
break
tab['filter'][i] = filt_i.upper()
ra, dec = tab.meta['RA'], tab.meta['DEC']
fp = np.load(os.path.join(HOME_PATH, '{0}_footprint.npy'.format(root)),
allow_pickle=True)[0]
radius = np.sqrt(fp.area*np.cos(dec/180*np.pi))*60/np.pi
xy = np.array(fp.boundary.convex_hull.boundary.xy)
dims = np.array([(xy[0].max()-xy[0].min())*np.cos(dec/180*np.pi),
xy[1].max()-xy[1].min()])*60
extra = query.run_query(box=[ra, dec, radius],
proposid=[],
instruments=instruments,
extensions=['FLT'],
filters=filters,
extra=query.DEFAULT_EXTRA)
# Fix CLEAR filter names
for i, filt_i in enumerate(extra['filter']):
if 'clear' in filt_i.lower():
spl = filt_i.lower().split(';')
if len(spl) > 1:
for s in spl:
if 'clear' not in s:
#print(filt_i, s)
filt_i = s.upper()
break
extra['filter'][i] = filt_i.upper()
for k in tab.meta:
extra.meta[k] = tab.meta[k]
extra.write(os.path.join(HOME_PATH, root, 'extra_data.fits'),
format='fits', overwrite=True)
CWD = os.getcwd()
os.chdir(os.path.join(HOME_PATH, root, 'RAW'))
if run_fetch:
if from_mast:
out = fetch_mast.get_from_MAST(extra,
inst_products=DEFAULT_PRODUCTS,
direct=True,
path=os.path.join(HOME_PATH, root, 'RAW'),
skip_existing=True)
else:
curl = fetch.make_curl_script(extra,
level=None,
script_name='extra.sh',
inst_products={'WFC3/UVIS': ['FLC'],
'WFPC2/PC': ['C0M', 'C1M'],
'WFC3/IR': ['RAW'],
'ACS/WFC': ['FLC']},
skip_existing=True,
output_path=os.path.join(HOME_PATH, root, 'RAW'),
s3_sync=s3_sync)
os.system('sh extra.sh')
files = glob.glob('*raw.fits.gz')
files.extend(glob.glob('*fl?.fits.gz'))
for file in files:
print('gunzip '+file)
os.system('gunzip {0}'.format(file))
else:
return extra
remove_bad_expflag(field_root=root, HOME_PATH=HOME_PATH, min_bad=2)
# Reprocess the RAWs into FLTs
status = os.system("python -c 'from grizli.pipeline import reprocess; reprocess.reprocess_wfc3ir(parallel={0})'".format(reprocess_parallel))
if status != 0:
from grizli.pipeline import reprocess
reprocess.reprocess_wfc3ir(parallel=False)
# Persistence products
os.chdir(PERSIST_PATH)
persist_files = fetch.persistence_products(extra)
for file in persist_files:
if not os.path.exists(os.path.basename(file)):
print(file)
os.system('curl -O {0}'.format(file))
for file in persist_files:
root = os.path.basename(file).split('.tar.gz')[0]
if os.path.exists(root):
print('Skip', root)
continue
# Ugly callout to shell
os.system('tar xzvf {0}.tar.gz'.format(root))
os.system('rm {0}/*extper.fits {0}/*flt_cor.fits'.format(root))
os.system('ln -sf {0}/*persist.fits ./'.format(root))
os.chdir(CWD)
def create_path_dict(root='j142724+334246', home='$PWD', raw=None, prep=None, extract=None, persist=None, thumbs=None, paths={}):
"""
Generate path dict.
Default:
{home}
{home}/{root}
{home}/{root}/RAW
{home}/{root}/Prep
{home}/{root}/Persistence
{home}/{root}/Extractions
{home}/{root}/Thumbnails
If ``home`` specified as '$PWD', then will be calculated from
`os.getcwd`.
Only generates values for keys not already specified in `paths`.
"""
import copy
if home == '$PWD':
home = os.getcwd()
base = os.path.join(home, root)
if raw is None:
raw = os.path.join(home, root, 'RAW')
if prep is None:
prep = os.path.join(home, root, 'Prep')
if persist is None:
persist = os.path.join(home, root, 'Persistence')
if extract is None:
extract = os.path.join(home, root, 'Extractions')
if thumbs is None:
thumbs = os.path.join(home, root, 'Thumbnaails')
xpaths = copy.deepcopy(paths)
for k in xpaths:
if xpaths[k] is None:
_ = xpaths.pop(k)
if 'home' not in xpaths:
xpaths['home'] = home
if 'base' not in xpaths:
xpaths['base'] = base
if 'raw' not in xpaths:
xpaths['raw'] = raw
if 'prep' not in xpaths:
xpaths['prep'] = prep
if 'persist' not in xpaths:
xpaths['persist'] = persist
if 'extract' not in xpaths:
xpaths['extract'] = extract
if 'thumbs' not in xpaths:
xpaths['thumbs'] = extract
return xpaths
def go(root='j010311+131615',
HOME_PATH='$PWD',
RAW_PATH=None, PREP_PATH=None, PERSIST_PATH=None, EXTRACT_PATH=None,
filters=args['filters'],
fetch_files_args=args['fetch_files_args'],
inspect_ramps=False,
is_dash=False, run_prepare_dash=True,
run_parse_visits=True,
is_parallel_field=False,
parse_visits_args=args['parse_visits_args'],
manual_alignment=False,
manual_alignment_args=args['manual_alignment_args'],
preprocess_args=args['preprocess_args'],
visit_prep_args=args['visit_prep_args'],
persistence_args=args['persistence_args'],
redo_persistence_mask=False,
run_fine_alignment=True,
fine_backup=True,
fine_alignment_args=args['fine_alignment_args'],
make_mosaics=True,
mosaic_args=args['mosaic_args'],
mosaic_drizzle_args=args['mosaic_drizzle_args'],
mask_spikes=False,
mosaic_driz_cr_type=0,
make_phot=True,
multiband_catalog_args=args['multiband_catalog_args'],
only_preprocess=False,
overwrite_fit_params=False,
grism_prep_args=args['grism_prep_args'],
refine_with_fits=True,
run_extractions=False,
include_photometry_in_fit=False,
extract_args=args['extract_args'],
make_thumbnails=True,
thumbnail_args=args['thumbnail_args'],
make_final_report=True,
get_dict=False,
kill='',
**kwargs
):
"""
Run the full pipeline for a given target
Parameters
----------
root : str
Rootname of the `mastquery` file.
extract_maglim : [min, max]
Magnitude limits of objects to extract and fit.
"""
# Function defaults
if get_dict:
if get_dict <= 2:
# Default function arguments (different value to avoid recursion)
default_args = go(get_dict=10)
frame = inspect.currentframe()
args = inspect.getargvalues(frame).locals
for k in ['root', 'HOME_PATH', 'frame', 'get_dict']:
if k in args:
args.pop(k)
if get_dict == 2:
# Print keywords summary
if len(kwargs) > 0:
print('\n*** Extra args ***\n')
for k in kwargs:
if k not in default_args:
print('\'{0}\':{1},'.format(k, kwargs[k]))
print('\n*** User args ***\n')
for k in args:
if k in default_args:
if args[k] != default_args[k]:
print('\'{0}\':{1},'.format(k, args[k]))
print('\n*** Default args ***\n')
for k in args:
if k in default_args:
print('\'{0}\':{1},'.format(k, args[k]))
return args
else:
return args
# import os
# import glob
# import traceback
#
#
try:
from .. import multifit
from . import auto_script
except:
from grizli import multifit
from grizli.pipeline import auto_script
# #import grizli.utils
import matplotlib.pyplot as plt
# Silence numpy and astropy warnings
utils.set_warnings()
PATHS = create_path_dict(root=root, home=HOME_PATH,
raw=RAW_PATH, prep=PREP_PATH,
persist=PERSIST_PATH, extract=EXTRACT_PATH)
fpfile = os.path.join(PATHS['home'], '{0}_footprint.fits'.format(root))
exptab = utils.GTable.gread(fpfile)
# Fix CLEAR filter names
for i, filt_i in enumerate(exptab['filter']):
if 'clear' in filt_i.lower():
spl = filt_i.lower().split(';')
if len(spl) > 1:
for s in spl:
if 'clear' not in s:
#print(filt_i, s)
filt_i = s.upper()
break
exptab['filter'][i] = filt_i.upper()
utils.LOGFILE = os.path.join(PATHS['home'], f'{root}.auto_script.log.txt')
utils.log_comment(utils.LOGFILE, '### Pipeline start', show_date=True)
######################
# Download data
os.chdir(PATHS['home'])
if fetch_files_args is not None:
fetch_files_args['reprocess_clean_darks'] &= (not is_dash)
auto_script.fetch_files(field_root=root, HOME_PATH=HOME_PATH,
paths=PATHS,
filters=filters, **fetch_files_args)
else:
os.chdir(PATHS['prep'])
if is_dash & run_prepare_dash:
from wfc3dash import process_raw
os.chdir(PATHS['raw'])
process_raw.run_all()
files = glob.glob(os.path.join(PATHS['raw'], '*_fl*fits'))
files += glob.glob(os.path.join(PATHS['raw'], '*_c[01]m.fits'))
if len(files) == 0:
print('No FL[TC] files found!')
utils.LOGFILE = '/tmp/grizli.log'
return False
if kill == 'fetch_files':
print('kill=\'fetch_files\'')
return True
if inspect_ramps:
# Inspect for CR trails
os.chdir(PATHS['raw'])
status = os.system("python -c 'from grizli.pipeline.reprocess import inspect; inspect()'")
######################
# Parse visit associations
os.chdir(PATHS['prep'])
if (not os.path.exists(f'{root}_visits.npy')) | run_parse_visits:
# Parsing for parallel fields, where time-adjacent exposures
# may have different visit IDs and should be combined
if 'combine_same_pa' in parse_visits_args:
if (parse_visits_args['combine_same_pa'] == -1):
if is_parallel_field:
parse_visits_args['combine_same_pa'] = True
parse_visits_args['max_dt'] = 4./24
else:
parse_visits_args['combine_same_pa'] = False
parse_visits_args['max_dt'] = 1.
else:
parse_visits_args['combine_same_pa'] = is_parallel_field
parsed = auto_script.parse_visits(field_root=root,
RAW_PATH=PATHS['raw'],
filters=filters, is_dash=is_dash,
**parse_visits_args)
else:
parsed = np.load(f'{root}_visits.npy', allow_pickle=True)
if kill == 'parse_visits':
print('kill=\'parse_visits\'')
return True
visits, all_groups, info = parsed
run_has_grism = utils.column_string_operation(info['FILTER'],
['G141', 'G102', 'G800L'],
'count', 'or').sum()
# Alignment catalogs
#catalogs = ['PS1','SDSS','GAIA','WISE']
#######################
# Manual alignment
if manual_alignment:
os.chdir(PATHS['prep'])
auto_script.manual_alignment(field_root=root, HOME_PATH=PATHS['home'],
**manual_alignment_args)
if kill == 'manual_alignment':
print('kill=\'manual_alignment\'')
return True
#####################
# Alignment & mosaics
os.chdir(PATHS['prep'])
tweak_max_dist = (5 if is_parallel_field else 1)
if 'tweak_max_dist' not in visit_prep_args:
visit_prep_args['tweak_max_dist'] = tweak_max_dist
if 'use_self_catalog' not in visit_prep_args:
visit_prep_args['use_self_catalog'] = is_parallel_field
auto_script.preprocess(field_root=root, HOME_PATH=PATHS['home'],
PERSIST_PATH=PATHS['persist'],
visit_prep_args=visit_prep_args,
persistence_args=persistence_args,
**preprocess_args)
if kill == 'preprocess':
print('kill=\'preprocess\'')
return True
if redo_persistence_mask:
comment = '# Redo persistence masking: {0}'.format(persistence_args)
print(comment)
utils.log_comment(utils.LOGFILE, comment)
all_flt_files = glob.glob('*_flt.fits')
all_flt_files.sort()
for file in all_flt_files:
print(file)
pfile = os.path.join(PATHS['persist'],
file.replace('_flt', '_persist'))
if os.path.exists(pfile):
prep.apply_persistence_mask(file, path=PATHS['persist'],
**persistence_args)
##########
# Fine alignment
fine_files = glob.glob('{0}*fine.png'.format(root))
if (run_fine_alignment == 2) & (len(fine_files) > 0) & (len(visits) > 1):
msg = '\n\n### Redo visit-level mosaics and catalogs for fine alignment\n\n'
utils.log_comment(utils.LOGFILE, msg, show_date=True, verbose=True)
keep_visits = []
for visit in visits:
visit_files = glob.glob(visit['product']+'*.cat.*')
visit_files += glob.glob(visit['product']+'_dr*')
visit_files += glob.glob(visit['product']+'*seg.fits*')
if len(visit_files) > 0:
keep_visits.append(visit)
for file in visit_files:
os.remove(file)
# Redrizzle visit-level mosaics and remake catalogs
prep.drizzle_overlaps(keep_visits, check_overlaps=False, skysub=False,
static=False, pixfrac=0.5, scale=None,
final_wcs=False, fetch_flats=False,
final_rot=None,
include_saturated=True)
# Make new catalogs
for visit in keep_visits:
if len(visit['files']) == 0:
continue
visit_filter = visit['product'].split('-')[-1]
is_single = len(visit['files']) == 1
isACS = '_flc' in visit['files'][0]
isWFPC2 = '_c0' in visit['files'][0]
if visit_filter in ['g102', 'g141', 'g800l', 'g280']:
print('# Skip grism visit: {0}'.format(visit['product']))
continue
# New catalog
if visit_prep_args['align_thresh'] is None:
thresh = 2.5
else:
thresh = visit_prep_args['align_thresh']
cat = prep.make_SEP_catalog(root=visit['product'],
threshold=thresh)
# New region file
prep.table_to_regions(cat, '{0}.cat.reg'.format(visit['product']))
# New radec
if not ((isACS | isWFPC2) & is_single):
# 140 brightest or mag range
clip = (cat['MAG_AUTO'] > 18) & (cat['MAG_AUTO'] < 23)
clip &= cat['MAGERR_AUTO'] < 0.05
clip &= utils.catalog_mask(cat,
max_err_percentile=visit_prep_args['max_err_percentile'],
pad=visit_prep_args['catalog_mask_pad'],
pad_is_absolute=False, min_flux_radius=1.)
NMAX = 140
so = np.argsort(cat['MAG_AUTO'][clip])
if clip.sum() > NMAX:
so = so[:NMAX]
prep.table_to_radec(cat[clip][so],
'{0}.cat.radec'.format(visit['product']))
for file in fine_files:
print('rm {0}'.format(file))
os.remove(file)
fine_files = []
if (len(fine_files) == 0) & (run_fine_alignment > 0) & (len(visits) > 1):
fine_catalogs = ['GAIA', 'PS1', 'DES', 'SDSS', 'WISE']
try:
out = auto_script.fine_alignment(field_root=root,
HOME_PATH=PATHS['home'],
**fine_alignment_args)
plt.close()
# Update WCS headers with fine alignment
auto_script.update_wcs_headers_with_fine(root, backup=fine_backup)
except:
utils.log_exception(utils.LOGFILE, traceback)
utils.log_comment(utils.LOGFILE, "# !! Fine alignment failed")
# Update the visits file with the new exposure footprints
print('Update exposure footprints in {0}_visits.npy'.format(root))
get_visit_exposure_footprints(visit_file='{0}_visits.npy'.format(root),
check_paths=['./', PATHS['raw'], '../RAW'])
# Make combined mosaics
no_mosaics_found = len(glob.glob(f'{root}-ir_dr?_sci.fits')) == 0
if no_mosaics_found & make_mosaics:
skip_single = preprocess_args['skip_single_optical_visits']
if 'fix_stars' in visit_prep_args:
fix_stars = visit_prep_args['fix_stars']
else:
fix_stars = False
# For running at the command line
# if False:
# mos_args = {'mosaic_args': kwargs['mosaic_args'],
# 'fix_stars': kwargs['visit_prep_args']['fix_stars'],
# 'mask_spikes': kwargs['mask_spikes'], 'skip_single_optical_visits': kwargs['preprocess_args']['skip_single_optical_visits']}
# auto_script.make_combined_mosaics(root, **mos_args)
make_combined_mosaics(root, mosaic_args=mosaic_args,
fix_stars=fix_stars, mask_spikes=mask_spikes,
skip_single_optical_visits=skip_single,
mosaic_driz_cr_type=mosaic_driz_cr_type,
mosaic_drizzle_args=mosaic_drizzle_args)
# Make PSFs. Always set get_line_maps=False since PSFs now
# provided for each object.
mosaic_files = glob.glob('{0}-f*sci.fits'.format(root))
if (not is_dash) & (len(mosaic_files) > 0):
print('Make field PSFs')
auto_script.field_psf(root=root, PREP_PATH=PATHS['prep'],
RAW_PATH=PATHS['raw'],
EXTRACT_PATH=PATHS['extract'],
get_line_maps=False, skip=False)
# Are there full-field mosaics?
mosaic_files = glob.glob(f'{root}-f*sci.fits')
# Photometric catalog
has_phot_file = os.path.exists(f'{root}_phot.fits')
if (not has_phot_file) & make_phot & (len(mosaic_files) > 0):
try:
tab = auto_script.multiband_catalog(field_root=root,
**multiband_catalog_args)
try:
# Add columns indicating objects that fall in grism exposures
phot = utils.read_catalog(f'{root}_phot.fits')
out = count_grism_exposures(phot, all_groups,
grisms=['g800l', 'g102', 'g141'],
verbose=True)
phot.write(f'{root}_phot.fits', overwrite=True)
except:
pass
except:
utils.log_exception(utils.LOGFILE, traceback)
utils.log_comment(utils.LOGFILE,
'# Run `multiband_catalog` with `detection_background=True`')
multiband_catalog_args['detection_background'] = True
tab = auto_script.multiband_catalog(field_root=root,
**multiband_catalog_args)
#tab = auto_script.multiband_catalog(field_root=root, threshold=threshold, detection_background=True, photometry_background=True, get_all_filters=False)
# Make exposure json / html report
auto_script.exposure_report(root, log=True)
# Stop if only want to run pre-processing
if (only_preprocess | (len(all_groups) == 0)):
if make_thumbnails:
print('#####\n# Make RGB thumbnails\n#####')
if thumbnail_args['drizzler_args'] is None:
thumbnail_args['drizzler_args'] = DRIZZLER_ARGS.copy()
os.chdir(PATHS['prep'])
#print('XXX ', thumbnail_args)
auto_script.make_rgb_thumbnails(root=root, **thumbnail_args)
if not os.path.exists(PATHS['thumbs']):
os.mkdir(PATHS['thumbs'])
os.system('mv {0}_[0-9]*.png {0}_[0-9]*.fits {1}'.format(root,
PATHS['thumbs']))
utils.LOGFILE = '/tmp/grizli.log'
return True
######################
# Grism prep
files = glob.glob(os.path.join(PATHS['prep'], '*GrismFLT.fits'))
files += glob.glob(os.path.join(PATHS['extract'], '*GrismFLT.fits'))
if len(files) == 0:
os.chdir(PATHS['prep'])
grp = auto_script.grism_prep(field_root=root, PREP_PATH=PATHS['prep'],
EXTRACT_PATH=PATHS['extract'],
**grism_prep_args)
del(grp)
######################
# Grism extractions
os.chdir(PATHS['extract'])
#####################
# Update the contam model with the "full.fits"
# files in the working directory
if (len(glob.glob('*full.fits')) > 0) & (refine_with_fits):
auto_script.refine_model_with_fits(field_root=root, clean=True,
grp=None, master_files=None,
spectrum='continuum', max_chinu=5)
# Drizzled grp objects
# All files
if len(glob.glob(f'{root}*_grism*fits*')) == 0:
grism_files = glob.glob('*GrismFLT.fits')
grism_files.sort()
catalog = glob.glob(f'{root}-*.cat.fits')[0]
try:
seg_file = glob.glob(f'{root}-*_seg.fits')[0]
except:
seg_file = None
grp = multifit.GroupFLT(grism_files=grism_files, direct_files=[],
ref_file=None, seg_file=seg_file,
catalog=catalog, cpu_count=-1, sci_extn=1,
pad=256)
# Make drizzle model images
grp.drizzle_grism_models(root=root, kernel='point', scale=0.15)
# Free grp object
del(grp)
if is_parallel_field:
pline = auto_script.PARALLEL_PLINE.copy()
else:
pline = auto_script.DITHERED_PLINE.copy()
# Make script for parallel processing
args_file = f'{root}_fit_args.npy'
if (not os.path.exists(args_file)) | (overwrite_fit_params):
msg = '# generate_fit_params: ' + args_file
utils.log_comment(utils.LOGFILE, msg, verbose=True, show_date=True)
pline['pixscale'] = mosaic_args['wcs_params']['pixel_scale']
pline['pixfrac'] = mosaic_args['mosaic_pixfrac']
if pline['pixfrac'] > 0:
pline['kernel'] = 'square'
else:
pline['kernel'] = 'point'
has_g800l = utils.column_string_operation(info['FILTER'], ['G800L'],
'count', 'or').sum()
if has_g800l > 0:
min_sens = 0.
fit_trace_shift = True
else:
min_sens = 0.001
fit_trace_shift = True
try:
auto_script.generate_fit_params(field_root=root, prior=None, MW_EBV=exptab.meta['MW_EBV'], pline=pline, fit_only_beams=True, run_fit=True, poly_order=7, fsps=True, min_sens=min_sens, sys_err=0.03, fcontam=0.2, zr=[0.05, 3.4], save_file=args_file, fit_trace_shift=fit_trace_shift, include_photometry=True, use_phot_obj=include_photometry_in_fit)
except:
# include_photometry failed?
auto_script.generate_fit_params(field_root=root, prior=None, MW_EBV=exptab.meta['MW_EBV'], pline=pline, fit_only_beams=True, run_fit=True, poly_order=7, fsps=True, min_sens=min_sens, sys_err=0.03, fcontam=0.2, zr=[0.05, 3.4], save_file=args_file, fit_trace_shift=fit_trace_shift, include_photometry=False, use_phot_obj=False)
# Copy for now
os.system(f'cp {args_file} fit_args.npy')
# Done?
if (not run_extractions) | (run_has_grism == 0):
# Make RGB thumbnails
if make_thumbnails:
print('#####\n# Make RGB thumbnails\n#####')
if thumbnail_args['drizzler_args'] is None:
thumbnail_args['drizzler_args'] = DRIZZLER_ARGS.copy()
os.chdir(PATHS['prep'])
auto_script.make_rgb_thumbnails(root=root, **thumbnail_args)
if not os.path.exists(PATHS['thumbs']):
os.mkdir(PATHS['thumbs'])
os.system('mv {0}_[0-9]*.png {0}_[0-9]*.fits {1}'.format(root,
PATHS['thumbs']))
utils.LOGFILE = '/tmp/grizli.log'
return True
# Run extractions (and fits)
auto_script.extract(field_root=root, **extract_args)
# Make RGB thumbnails
if make_thumbnails:
print('#####\n# Make RGB thumbnails\n#####')
if thumbnail_args['drizzler_args'] is None:
thumbnail_args['drizzler_args'] = DRIZZLER_ARGS.copy()
os.chdir(PATHS['prep'])
auto_script.make_rgb_thumbnails(root=root, **thumbnail_args)
if not os.path.exists(PATHS['thumbs']):
os.mkdir(PATHS['thumbs'])
os.system('mv {0}_[0-9]*.png {0}_[0-9]*.fits {1}'.format(root,
PATHS['thumbs']))
if extract_args['run_fit']:
os.chdir(PATHS['extract'])
# Redrizzle grism models
grism_files = glob.glob('*GrismFLT.fits')
grism_files.sort()
seg_file = glob.glob(f'{root}-[fi]*_seg.fits')[0]
#catalog = glob.glob(f'{root}-*.cat.fits')[0]
catalog = seg_file.replace('_seg.fits','.cat.fits')
grp = multifit.GroupFLT(grism_files=grism_files, direct_files=[],
ref_file=None, seg_file=seg_file,
catalog=catalog, cpu_count=-1, sci_extn=1,
pad=256)
# Make drizzle model images
grp.drizzle_grism_models(root=root, kernel='point', scale=0.15)
# Free grp object
del(grp)
######################
# Summary catalog & webpage
auto_script.summary_catalog(field_root=root, dzbin=0.01,
use_localhost=False,
filter_bandpasses=None)
if make_final_report:
make_report(root, make_rgb=True)
def make_directories(root='j142724+334246', HOME_PATH='$PWD', paths={}):
"""
Make RAW, Prep, Persistence, Extractions directories
"""
import os
paths = create_path_dict(root=root, home=HOME_PATH, paths=paths)
for k in paths:
if k in ['thumbs']:
continue
dir = paths[k]
if not os.path.exists(dir):
print(f'mkdir {dir}')
os.mkdir(dir)
os.system(f'chmod ugoa+rwx {dir}')
else:
print(f'directory {dir} exists')
return paths
def fetch_files(field_root='j142724+334246', HOME_PATH='$PWD', paths={}, inst_products={'WFPC2/WFC': ['C0M', 'C1M'], 'WFPC2/PC': ['C0M', 'C1M'], 'ACS/WFC': ['FLC'], 'WFC3/IR': ['RAW'], 'WFC3/UVIS': ['FLC']}, remove_bad=True, reprocess_parallel=False, reprocess_clean_darks=True, s3_sync=False, fetch_flt_calibs=['IDCTAB', 'PFLTFILE', 'NPOLFILE'], filters=VALID_FILTERS, min_bad_expflag=2, fetch_only=False):
"""
Fully automatic script
"""
import os
import glob
try:
from .. import utils
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'auto_script.fetch_files')
except:
from grizli import utils
try:
try:
from mastquery import query, fetch
MAST_QUERY = True
instdet_key = 'instrument_name'
except:
from hsaquery import query, fetch
MAST_QUERY = False
instdet_key = 'instdet'
except ImportError as ERR:
warn = """{0}
Get one of the query scripts from
https://github.com/gbrammer/esa-hsaquery
https://github.com/gbrammer/mastquery
""".format(ERR)
raise(ImportError(warn))
paths = create_path_dict(root=field_root, home=HOME_PATH, paths=paths)
print('paths: ', paths)
if not os.path.exists(paths['raw']):
make_directories(root=field_root, HOME_PATH=HOME_PATH,
paths=paths)
tab = utils.read_catalog(os.path.join(paths['home'],
f'{field_root}_footprint.fits'))
# Fix CLEAR filter names
for i, filt_i in enumerate(tab['filter']):
if 'clear' in filt_i.lower():
spl = filt_i.lower().split(';')
if len(spl) > 1:
for s in spl:
if 'clear' not in s:
#print(filt_i, s)
filt_i = s.upper()
break
tab['filter'][i] = filt_i.upper()
use_filters = utils.column_string_operation(tab['filter'], filters,
method='startswith', logical='or')
tab = tab[use_filters]
if len(tab) > 0:
if MAST_QUERY:
tab = query.get_products_table(tab, extensions=['RAW', 'C1M'])
tab = tab[(tab['filter'] != 'F218W')]
if ONLY_F814W:
tab = tab[(tab['filter'] == 'F814W') |
(tab[instdet_key] == 'WFC3/IR')]
# Fetch and preprocess IR backgrounds
os.chdir(paths['raw'])
# Ignore files already moved to RAW/Expflag
bad_files = glob.glob('./Expflag/*')
badexp = np.zeros(len(tab), dtype=bool)
for file in bad_files:
root = os.path.basename(file).split('_')[0]
badexp |= tab['observation_id'] == root.lower()
is_wfpc2 = utils.column_string_operation(tab['instrument_name'],
'WFPC2', method='startswith', logical='or')
use_filters = utils.column_string_operation(tab['filter'],
filters, method='startswith', logical='or')
fetch_selection = (~badexp) & (~is_wfpc2) & use_filters
curl = fetch.make_curl_script(tab[fetch_selection], level=None,
script_name='fetch_{0}.sh'.format(field_root),
inst_products=inst_products, skip_existing=True,
output_path='./', s3_sync=s3_sync)
msg = 'Fetch {0} files (s3_sync={1})'.format(fetch_selection.sum(),
s3_sync)
utils.log_comment(utils.LOGFILE, msg, verbose=True)
# Ugly callout to shell
os.system('sh fetch_{0}.sh'.format(field_root))
if (is_wfpc2 & use_filters).sum() > 0:
# Have to get WFPC2 from ESA
wfpc2_files = (~badexp) & (is_wfpc2) & use_filters
curl = fetch.make_curl_script(tab[wfpc2_files], level=None,
script_name='fetch_wfpc2_{0}.sh'.format(field_root),
inst_products=inst_products, skip_existing=True,
output_path='./', s3_sync=False)
os.system('sh fetch_wfpc2_{0}.sh'.format(field_root))
else:
msg = 'Warning: no files to fetch for filters={0}.'.format(filters)
utils.log_comment(utils.LOGFILE, msg, verbose=True)
# Gunzip if necessary
files = glob.glob('*raw.fits.gz')
files.extend(glob.glob('*fl?.fits.gz'))
files.extend(glob.glob('*c[01]?.fits.gz')) # WFPC2
files.sort()
for file in files:
status = os.system('gunzip {0}'.format(file))
print('gunzip '+file+' # status="{0}"'.format(status))
if status == 256:
os.system('mv {0} {1}'.format(file, file.split('.gz')[0]))
if fetch_only:
files = glob.glob('*raw.fits')
files.sort()
return files
# Remove exposures with bad EXPFLAG
if remove_bad:
remove_bad_expflag(field_root=field_root, HOME_PATH=paths['home'],
min_bad=min_bad_expflag)
# Reprocess the RAWs into FLTs
if reprocess_parallel:
rep = "python -c 'from grizli.pipeline import reprocess; "
rep += "reprocess.reprocess_wfc3ir(parallel={0},clean_dark_refs={1})'"
os.system(rep.format(reprocess_parallel, reprocess_clean_darks))
else:
from grizli.pipeline import reprocess
reprocess.reprocess_wfc3ir(parallel=False,
clean_dark_refs=reprocess_clean_darks)
# Fetch PFLAT reference files needed for optimal drizzled weight images
if fetch_flt_calibs:
flt_files = glob.glob('*_fl?.fits')
flt_files.sort()
#calib_paths = []
for file in flt_files:
cpaths = utils.fetch_hst_calibs(file,
calib_types=fetch_flt_calibs)
# calib_paths.extend(paths)
# Copy mask files generated from preprocessing
os.system('cp *mask.reg {0}'.format(paths['prep']))
# Persistence products
os.chdir(paths['persist'])
persist_files = fetch.persistence_products(tab)
for file in persist_files:
if not os.path.exists(os.path.basename(file)):
print(file)
os.system('curl -O {0}'.format(file))
for file in persist_files:
root = os.path.basename(file).split('.tar.gz')[0]
if os.path.exists(root):
print('Skip', root)
continue
# Ugly callout to shell
os.system('tar xzvf {0}.tar.gz'.format(root))
os.system('rm {0}/*extper.fits {0}/*flt_cor.fits'.format(root))
os.system('ln -sf {0}/*persist.fits ./'.format(root))
def remove_bad_expflag(field_root='', HOME_PATH='./', min_bad=2):
"""
Remove FLT files in RAW directory with bad EXPFLAG values, which
usually corresponds to failed guide stars.
The script moves files associated with an affected visit to a subdirectory
>>> bad_dir = os.path.join(HOME_PATH, field_root, 'RAW', 'Expflag')
Parameters
----------
field_root : str
Field name, i.e., 'j123654+621608'
HOME_PATH : str
Base path where files are found.
min_bad : int
Minimum number of exposures of a visit where
`EXPFLAG == 'INDETERMINATE'`. Occasionally the first exposure of a
visit has this value set even though guiding is OK, so set to 2
to try to flag more problematic visits.
"""
import os
import glob
import numpy as np
try:
from .. import prep, utils
except:
from grizli import prep, utils
os.chdir(os.path.join(HOME_PATH, field_root, 'RAW'))
files = glob.glob('*raw.fits')+glob.glob('*flc.fits')
files.sort()
if len(files) == 0:
return False
expf = utils.header_keys_from_filelist(files, keywords=['EXPFLAG'],
ext=0, colname_case=str.upper)
expf.write('{0}_expflag.txt'.format(field_root),
format='csv', overwrite=True)
visit_name = np.array([file[:6] for file in expf['FILE']])
visits = np.unique(visit_name)
for visit in visits:
bad = (visit_name == visit) & (expf['EXPFLAG'] != 'NORMAL')
if bad.sum() >= min_bad:
logstr = '# Found bad visit: {0}, N={1}\n'
logstr = logstr.format(visit, bad.sum())
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
if not os.path.exists('Expflag'):
os.mkdir('Expflag')
os.system('mv {0}* Expflag/'.format(visit))
def parse_visits(field_root='', RAW_PATH='../RAW', use_visit=True, combine_same_pa=True, combine_minexp=2, is_dash=False, filters=VALID_FILTERS, max_dt=1e9):
"""
Try to combine visits at the same PA/filter with fewer than
`combine_minexp` exposures.
"""
import copy
#import grizli.prep
try:
from .. import prep, utils
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'auto_script.parse_visits')
except:
from grizli import prep, utils
from shapely.geometry import Polygon
from scipy.spatial import ConvexHull
files = glob.glob(os.path.join(RAW_PATH, '*fl[tc].fits'))
files += glob.glob(os.path.join(RAW_PATH, '*c0m.fits'))
files += glob.glob(os.path.join(RAW_PATH, '*c0f.fits'))
files.sort()
info = utils.get_flt_info(files)
#info = info[(info['FILTER'] != 'G141') & (info['FILTER'] != 'G102')]
# Only F814W on ACS
if ONLY_F814W:
info = info[((info['INSTRUME'] == 'WFC3') & (info['DETECTOR'] == 'IR')) | (info['FILTER'] == 'F814W')]
elif filters is not None:
sel = utils.column_string_operation(info['FILTER'], filters,
method='count', logical='OR')
info = info[sel]
if is_dash:
# DASH visits split by exposure
ima_files = glob.glob(os.path.join(RAW_PATH, '*ima.fits'))
ima_files.sort()
visits = []
for file in ima_files:
# Build from IMA filename
root = os.path.basename(file).split("_ima")[0][:-1]
im = pyfits.open(file)
filt = utils.get_hst_filter(im[0].header).lower()
wcs = pywcs.WCS(im['SCI'].header)
fp = Polygon(wcs.calc_footprint())
# q_flt.fits is the pipeline product. will always be
# fewer DASH-split files
files = glob.glob(os.path.join(RAW_PATH,
f'{root}*[a-o]_flt.fits'))
files.sort()
if len(files) == 0:
continue
files = [os.path.basename(file) for file in files]
direct = {'product': '{0}-{1}'.format(root, filt),
'files': files, 'footprint': fp}
visits.append(direct)
all_groups = utils.parse_grism_associations(visits)
np.save('{0}_visits.npy'.format(field_root),
[visits, all_groups, info])
return visits, all_groups, info
visits, filters = utils.parse_flt_files(info=info, uniquename=True, get_footprint=True, use_visit=use_visit, max_dt=max_dt)
# Don't run combine_minexp if have grism exposures
grisms = ['G141', 'G102', 'G800L', 'G280']
has_grism = utils.column_string_operation(info['FILTER'], grisms,
'count', 'or').sum()
if combine_same_pa:
combined = {}
for visit in visits:
filter_pa = '-'.join(visit['product'].split('-')[-2:])
prog = '-'.join(visit['product'].split('-')[-4:-3])
key = 'i{0}-{1}'.format(prog, filter_pa)
if key not in combined:
combined[key] = {'product': key, 'files': [], 'footprint': visit['footprint']}
combined[key]['files'].extend(visit['files'])
visits = [combined[k] for k in combined]
# Account for timing to combine only exposures taken at an
# epoch defined by `max_dt` days.
msg = 'parse_visits(combine_same_pa={0}),'.format(combine_same_pa)
msg += ' max_dt={1:.1f}: {0} {2:>3} visits'
utils.log_comment(utils.LOGFILE,
msg.format('BEFORE', max_dt, len(visits)),
verbose=True, show_date=True)
split_list = []
for v in visits:
split_list.extend(utils.split_visit(v, max_dt=max_dt,
visit_split_shift=1.5))
visits = split_list
utils.log_comment(utils.LOGFILE,
msg.format(' AFTER', max_dt, len(visits)),
verbose=True, show_date=True)
get_visit_exposure_footprints(visits)
print('** Combine same PA: **')
for i, visit in enumerate(visits):
print('{0} {1} {2}'.format(i, visit['product'], len(visit['files'])))
elif (combine_minexp > 0) & (not has_grism):
combined = []
for visit in visits:
if len(visit['files']) >= combine_minexp*1:
combined.append(copy.deepcopy(visit))
else:
filter_pa = '-'.join(visit['product'].split('-')[-2:])
has_match = False
fp = visit['footprint']
for ic, cvisit in enumerate(combined):
ckey = '-'.join(cvisit['product'].split('-')[-2:])
if ckey == filter_pa:
cfp = cvisit['footprint']
if cfp.intersection(fp).area > 0.2*fp.area:
has_match = True
cvisit['files'].extend(visit['files'])
if 'footprints' in visit.keys():
cvisit['footprints'].extend(visit['footprints'])
cvisit['footprint'] = cfp.union(fp)
# No match, add the singleton visit
if not has_match:
combined.append(copy.deepcopy(visit))
visits = combined
print('** Combine Singles: **')
for i, visit in enumerate(visits):
print('{0} {1} {2}'.format(i, visit['product'], len(visit['files'])))
all_groups = utils.parse_grism_associations(visits)
print('\n == Grism groups ==\n')
valid_groups = []
for g in all_groups:
try:
print(g['direct']['product'], len(g['direct']['files']), g['grism']['product'], len(g['grism']['files']))
valid_groups.append(g)
except:
pass
all_groups = valid_groups
np.save('{0}_visits.npy'.format(field_root), [visits, all_groups, info])
return visits, all_groups, info
def get_visit_exposure_footprints(visit_file='j1000p0210_visits.npy', check_paths=['./', '../RAW'], simplify=1.e-6):
"""
Add exposure-level footprints to the visit dictionary
Parameters
----------
visit_file : str, list
File produced by `parse_visits` (`visits`, `all_groups`, `info`).
If a list, just parse a list of visits and don't save the file.
check_paths : list
Look for the individual exposures in `visits[i]['files']` in these
paths.
simplify : float
Shapely `simplify` parameter the visit footprint polygon.
Returns
-------
visits : dict
"""
if isinstance(visit_file, str):
visits, all_groups, info = np.load(visit_file, allow_pickle=True)
else:
visits = visit_file
fps = {}
for visit in visits:
visit['footprints'] = []
visit_fp = None
for file in visit['files']:
fp_i = None
for path in check_paths:
pfile = os.path.join(path, file)
if os.path.exists(pfile):
fp_i = utils.get_flt_footprint(flt_file=pfile)
if visit_fp is None:
visit_fp = fp_i.buffer(1./3600)
else:
visit_fp = visit_fp.union(fp_i.buffer(1./3600))
break
visit['footprints'].append(fp_i)
if visit_fp is not None:
if simplify > 0:
visit['footprint'] = visit_fp.simplify(simplify)
else:
visit['footprint'] = visit_fp
fps[file] = fp_i
# ToDo: also update visits in all_groups with `fps`
# Resave the file
if isinstance(visit_file, str):
np.save(visit_file, [visits, all_groups, info])
return visits
def manual_alignment(field_root='j151850-813028', HOME_PATH='/Volumes/Pegasus/Grizli/Automatic/', skip=True, radius=5., catalogs=['PS1', 'DES', 'SDSS', 'GAIA', 'WISE'], visit_list=None, radec=None):
#import pyds9
import glob
import os
import numpy as np
#import grizli
from ..prep import get_radec_catalog
from .. import utils, prep, ds9
files = glob.glob('*guess')
tab = utils.read_catalog(os.path.join(HOME_PATH,
f'{field_root}_footprint.fits'))
visits, all_groups, info = np.load('{0}_visits.npy'.format(field_root),
allow_pickle=True)
use_visits = []
for visit in visits:
if visit_list is not None:
if visit['product'] not in visit_list:
continue
filt = visit['product'].split('-')[-1]
if (not filt.startswith('g')):
hasg = os.path.exists('{0}.align_guess'.format(visit['product']))
if hasg & skip:
continue
use_visits.append(visit)
print(len(use_visits), len(visits))
if len(use_visits) == 0:
return True
if radec is None:
radec, ref_catalog = get_radec_catalog(ra=np.mean(tab['ra']),
dec=np.median(tab['dec']),
product=field_root,
reference_catalogs=catalogs, radius=radius)
else:
ref_catalog = catalogs[0]
reference = '{0}/{1}_{2}.reg'.format(os.getcwd(), field_root,
ref_catalog.lower())
ds9 = ds9.DS9()
ds9.set('mode pan')
ds9.set('scale zscale')
ds9.set('scale log')
for visit in use_visits:
filt = visit['product'].split('-')[-1]
if (not filt.startswith('g')):
prep.manual_alignment(visit, reference=reference, ds9=ds9)
ds9.set('quit')
def clean_prep(field_root='j142724+334246'):
"""
Clean unneeded files after the field preparation
"""
import glob
import os
visits, all_groups, info = np.load('{0}_visits.npy'.format(field_root),
allow_pickle=True)
for visit in visits:
for ext in ['_drz_wht', '_seg', '_bkg']:
file = visit['product']+ext+'.fits'
if os.path.exists(file):
print('remove '+file)
os.remove(file)
clean_files = glob.glob('*crclean.fits')
for file in clean_files:
print('remove '+file)
os.remove(file)
# Do this in preprocess to avoid doing it over and over
# Fix NaNs
# flt_files = glob.glob('*_fl?.fits')
# for flt_file in flt_files:
# utils.fix_flt_nan(flt_file, verbose=True)
def preprocess(field_root='j142724+334246', HOME_PATH='/Volumes/Pegasus/Grizli/Automatic/', PERSIST_PATH=None, min_overlap=0.2, make_combined=True, catalogs=['PS1', 'DES', 'NSC', 'SDSS', 'GAIA', 'WISE'], use_visit=True, master_radec=None, parent_radec=None, use_first_radec=False, skip_imaging=False, clean=True, skip_single_optical_visits=True, visit_prep_args=args['visit_prep_args'], persistence_args=args['persistence_args']):
"""
master_radec: force use this radec file
parent_radec: use this file if overlap < min_overlap
"""
try:
from .. import prep, utils
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'auto_script.preprocess')
except:
from grizli import prep, utils
import os
import glob
import numpy as np
import grizli
from shapely.geometry import Polygon
from scipy.spatial import ConvexHull
import copy
if PERSIST_PATH is None:
PERSIST_PATH = os.path.join(HOME_PATH, field_root, 'Persistence')
visits, all_groups, info = np.load(f'{field_root}_visits.npy',
allow_pickle=True)
# Grism visits
master_footprint = None
radec = None
# Master table
# visit_table = os.path.join(os.path.dirname(grizli.__file__), 'data/visit_alignment.txt')
# if os.path.exists(visit_table):
# visit_table = utils.GTable.gread(visit_table)
# else:
# visit_table = None
for i in range(len(all_groups)):
direct = all_groups[i]['direct']
grism = all_groups[i]['grism']
print(i, direct['product'], len(direct['files']), grism['product'], len(grism['files']))
if len(glob.glob(grism['product']+'_dr?_sci.fits')) > 0:
print('Skip grism', direct['product'], grism['product'])
continue
# Do all ACS G800L files exist?
if 'g800l' in grism['product']:
test_flc = True
for file in grism['files']:
test_flc &= os.path.exists(file)
if test_flc:
print('Skip grism (all FLC exist)', direct['product'],
grism['product'])
continue
# Make guess file
# if visit_table is not None:
# ix = ((visit_table['visit'] == direct['product']) &
# (visit_table['field'] == field_root))
#
# if ix.sum() > 0:
# guess = visit_table['xshift', 'yshift', 'rot', 'scale'][ix]
# guess['rot'] = 0.
# guess['scale'] = 1.
# print('\nWCS: '+direct['product']+'\n', guess)
# guess.write('{0}.align_guess'.format(direct['product']),
# format='ascii.commented_header')
if master_radec is not None:
radec = master_radec
best_overlap = 0.
else:
radec_files = glob.glob('*cat.radec')
radec = parent_radec
best_overlap = 0
fp = direct['footprint']
for rdfile in radec_files:
if os.path.exists(rdfile.replace('cat.radec', 'wcs_failed')):
continue
points = np.loadtxt(rdfile)
try:
hull = ConvexHull(points)
except:
continue
rd_fp = Polygon(points[hull.vertices, :])
olap = rd_fp.intersection(fp)
if (olap.area > min_overlap*fp.area) & (olap.area > best_overlap):
radec = rdfile
best_overlap = olap.area
if use_first_radec:
master_radec = radec
print('\n\n\n{0} radec: {1}\n\n\n'.format(direct['product'], radec))
###########################
# Preprocessing script, background subtraction, etc.
status = prep.process_direct_grism_visit(direct=direct, grism=grism,
radec=radec, skip_direct=False, **visit_prep_args)
###################################
# Persistence Masking
for file in direct['files']+grism['files']:
print(file)
pfile = os.path.join(PERSIST_PATH,
file.replace('_flt', '_persist'))
if os.path.exists(pfile):
prep.apply_persistence_mask(file, path=PERSIST_PATH,
**persistence_args)
# Fix NaNs
utils.fix_flt_nan(file, verbose=True)
# From here, `radec` will be the radec file from the first grism visit
#master_radec = radec
if skip_imaging:
return True
# Ancillary visits
imaging_visits = []
for visit in visits:
filt = visit['product'].split('-')[-1]
if (len(glob.glob(visit['product']+'_dr?_sci.fits')) == 0) & (not filt.startswith('g1')):
imaging_visits.append(visit)
# Run preprocessing in order of decreasing filter wavelength
filters = [v['product'].split('-')[-1] for v in visits]
fwave = np.cast[float]([f.replace('f1', 'f10'). \
replace('f098m', 'f0980m'). \
replace('lp', 'w'). \
replace('fq', 'f')[1:-1]
for f in filters])
if len(np.unique(fwave)) > 1:
sort_idx = np.argsort(fwave)[::-1]
else:
sort_idx = np.arange(len(fwave), dtype=int)
for i in sort_idx:
direct = visits[i]
if 'g800l' in direct['product']:
continue
# Skip singleton optical visits
if (fwave[i] < 900) & (len(direct['files']) == 1):
if skip_single_optical_visits:
print('Only one exposure, skip', direct['product'])
continue
if len(glob.glob(direct['product']+'_dr?_sci.fits')) > 0:
print('Skip', direct['product'])
continue
else:
print(direct['product'])
if master_radec is not None:
radec = master_radec
best_overlap = 0
fp = direct['footprint']
else:
radec_files = glob.glob('*cat.radec')
radec = parent_radec
best_overlap = 0
radec_n = 0
fp = direct['footprint']
for rdfile in radec_files:
points = np.loadtxt(rdfile)
hull = ConvexHull(points)
rd_fp = Polygon(points[hull.vertices, :])
olap = rd_fp.intersection(fp)
if (olap.area > min_overlap*fp.area) & (olap.area > best_overlap) & (len(points) > 0.2*radec_n):
radec = rdfile
best_overlap = olap.area
radec_n = len(points)
print('\n\n\n{0} radec: {1} ({2:.2f})\n\n\n'.format(direct['product'], radec, best_overlap/fp.area))
try:
try:
status = prep.process_direct_grism_visit(direct=direct,
grism={}, radec=radec,
skip_direct=False, **visit_prep_args)
except:
utils.log_exception(utils.LOGFILE, traceback)
utils.log_comment(utils.LOGFILE, "# !! First `prep` run failed with `run_tweak_align`. Try again")
if 'run_tweak_align' in visit_prep_args:
visit_prep_args['run_tweak_align'] = False
status = prep.process_direct_grism_visit(direct=direct,
grism={}, radec=radec,
skip_direct=False, **visit_prep_args)
failed_file = '%s.failed' % (direct['product'])
if os.path.exists(failed_file):
os.remove(failed_file)
###################################
# Persistence Masking
for file in direct['files']:
print(file)
pfile = os.path.join(PERSIST_PATH,
file.replace('_flt', '_persist'))
if os.path.exists(pfile):
prep.apply_persistence_mask(file, path=PERSIST_PATH,
**persistence_args)
# Fix NaNs
utils.fix_flt_nan(file, verbose=True)
except:
fp = open('%s.failed' % (direct['product']), 'w')
fp.write('\n')
fp.close()
###################################
# WFC3/IR Satellite trails
if False:
from mywfc3.satdet import _detsat_one
wfc3 = (info['INSTRUME'] == 'WFC3') & (info['DETECTOR'] == 'IR')
for file in info['FILE'][wfc3]:
print(file)
mask = _detsat_one(file, update=False, ds9=None, plot=False, verbose=True)
###################################
# Clean up
if clean:
clean_prep(field_root=field_root)
###################################
# Drizzle by filter
# failed = [f.split('.failed')[0] for f in glob.glob('*failed')]
# keep_visits = []
# for visit in visits:
# if visit['product'] not in failed:
# keep_visits.append(visit)
#
# overlaps = utils.parse_visit_overlaps(keep_visits, buffer=15.0)
# np.save('{0}_overlaps.npy'.format(field_root), [overlaps])
#
# keep = []
# wfc3ir = {'product':'{0}-ir'.format(field_root), 'files':[]}
# if not make_combined:
# return True
#
# for overlap in overlaps:
# filt = overlap['product'].split('-')[-1]
# overlap['product'] = '{0}-{1}'.format(field_root, filt)
#
# overlap['reference'] = '{0}-ir_drz_sci.fits'.format(field_root)
#
# if False:
# if 'g1' not in filt:
# keep.append(overlap)
# else:
# keep.append(overlap)
#
# if filt.upper() in ['F098M','F105W','F110W', 'F125W','F140W','F160W']:
# wfc3ir['files'].extend(overlap['files'])
#
# prep.drizzle_overlaps([wfc3ir], parse_visits=False, pixfrac=0.6, scale=0.06, skysub=False, bits=None, final_wcs=True, final_rot=0, final_outnx=None, final_outny=None, final_ra=None, final_dec=None, final_wht_type='IVM', final_wt_scl='exptime', check_overlaps=False)
#
# prep.drizzle_overlaps(keep, parse_visits=False, pixfrac=0.6, scale=0.06, skysub=False, bits=None, final_wcs=True, final_rot=0, final_outnx=None, final_outny=None, final_ra=None, final_dec=None, final_wht_type='IVM', final_wt_scl='exptime', check_overlaps=False)
def mask_IR_psf_spikes(visit={},
mag_lim=17, cat=None, cols=['mag_auto', 'ra', 'dec'], minR=8, dy=5, selection=None, length_scale=1, dq_bit=2048):
"""
Mask 45-degree diffraction spikes around bright stars
minR: float
Mask spike pixels > minR from the star centers
dy : int
Mask spike pixels +/- `dy` pixels from the computed center of a spike.
selection : bool array
If None, then compute `mag < mag_auto` from `cat`. Otherwise if
supplied, use as the selection mask.
length_scale : float
Scale length of the spike mask by this factor. The default spike mask
length in pixels is
>>> # m = star AB magnitude
>>> mask_len = 4*np.sqrt(10**(-0.4*(np.minimum(m,17)-17)))/0.06
"""
from scipy.interpolate import griddata
if cat is None:
cat = utils.read_catalog('{0}.cat.fits'.format(visit['product']))
try:
mag, ra, dec = cat[cols[0]], cat[cols[1]], cat[cols[2]]
except:
mag, ra, dec = cat['MAG_AUTO'], cat['X_WORLD'], cat['Y_WORLD']
if selection is None:
selection = mag < 17
for file in visit['files']:
if not os.path.exists(file):
print('Mask diffraction spikes (skip file {0})'.format(file))
continue
im = pyfits.open(file, mode='update')
print('Mask diffraction spikes ({0}), N={1} objects'.format(file, selection.sum()))
for ext in [1, 2, 3, 4]:
if ('SCI', ext) not in im:
break
wcs = pywcs.WCS(im['SCI', ext].header, fobj=im)
try:
cd = wcs.wcs.cd
except:
cd = wcs.wcs.pc
footp = utils.WCSFootprint(wcs)
points = np.array([ra, dec]).T
selection &= footp.path.contains_points(points)
if selection.sum() == 0:
continue
sh = im['SCI', ext].data.shape
mask = np.zeros(sh, dtype=int)
iy, ix = np.indices(sh)
# Spider angles, by hand!
thetas = np.array([[1.07000000e+02, 1.07000000e+02, -8.48089636e-01, 8.46172810e-01],
[3.07000000e+02, 1.07000000e+02, -8.48252315e-01, 8.40896646e-01],
[5.07000000e+02, 1.07000000e+02, -8.42360089e-01, 8.38631568e-01],
[7.07000000e+02, 1.07000000e+02, -8.43990233e-01, 8.36766818e-01],
[9.07000000e+02, 1.07000000e+02, -8.37264191e-01, 8.31481992e-01],
[1.07000000e+02, 3.07000000e+02, -8.49196752e-01, 8.47137753e-01],
[3.07000000e+02, 3.07000000e+02, -8.46919396e-01, 8.43697746e-01],
[5.07000000e+02, 3.07000000e+02, -8.43849045e-01, 8.39136104e-01],
[7.07000000e+02, 3.07000000e+02, -8.40070025e-01, 8.36362299e-01],
[9.07000000e+02, 3.07000000e+02, -8.35218388e-01, 8.34258999e-01],
[1.07000000e+02, 5.07000000e+02, -8.48708154e-01, 8.48377823e-01],
[3.07000000e+02, 5.07000000e+02, -8.45874787e-01, 8.38512574e-01],
[5.07000000e+02, 5.07000000e+02, -8.37238493e-01, 8.42544142e-01],
[7.07000000e+02, 5.07000000e+02, -8.26696970e-01, 8.37981214e-01],
[9.07000000e+02, 5.07000000e+02, -8.29422567e-01, 8.32182726e-01],
[1.07000000e+02, 7.07000000e+02, -8.42331487e-01, 8.43417815e-01],
[3.07000000e+02, 7.07000000e+02, -8.40006233e-01, 8.48355643e-01],
[5.07000000e+02, 7.07000000e+02, -8.39776844e-01, 8.48106508e-01],
[7.07000000e+02, 7.07000000e+02, -8.38620315e-01, 8.40031240e-01],
[9.07000000e+02, 7.07000000e+02, -8.28351652e-01, 8.31933185e-01],
[1.07000000e+02, 9.07000000e+02, -8.40726238e-01, 8.51621083e-01],
[3.07000000e+02, 9.07000000e+02, -8.36006159e-01, 8.46746171e-01],
[5.07000000e+02, 9.07000000e+02, -8.35987878e-01, 8.48932633e-01],
[7.07000000e+02, 9.07000000e+02, -8.34104095e-01, 8.46009851e-01],
[9.07000000e+02, 9.07000000e+02, -8.32700159e-01, 8.38512715e-01]])
thetas[thetas == 107] = 0
thetas[thetas == 907] = 1014
xy = np.array(wcs.all_world2pix(ra[selection], dec[selection], 0)).T
t0 = griddata(thetas[:, :2], thetas[:, 2], xy, method='linear',
fill_value=np.mean(thetas[:, 2]))
t1 = griddata(thetas[:, :2], thetas[:, 3], xy, method='linear',
fill_value=np.mean(thetas[:, 3]))
for i, m in enumerate(mag[selection]):
# Size that depends on magnitude
xlen = 4*np.sqrt(10**(-0.4*(np.minimum(m, 17)-17)))/0.06
xlen *= length_scale
x = np.arange(-xlen, xlen, 0.05)
xx = np.array([x, x*0.])
for t in [t0[i], t1[i]]:
_mat = np.array([[np.cos(t), -np.sin(t)],
[np.sin(t), np.cos(t)]])
xr = _mat.dot(xx).T
x = xr+xy[i, :]
xp = np.cast[int](np.round(x))
#plt.plot(xp[:,0], xp[:,1], color='pink', alpha=0.3, linewidth=5)
for j in range(-dy, dy+1):
ok = (xp[:, 1]+j >= 0) & (xp[:, 1]+j < sh[0])
ok &= (xp[:, 0] >= 0) & (xp[:, 0] < sh[1])
ok &= np.abs(xp[:, 1]+j - xy[i, 1]) > minR
ok &= np.abs(xp[:, 0] - xy[i, 0]) > minR
mask[xp[ok, 1]+j, xp[ok, 0]] = 1
im['DQ', ext].data |= mask*dq_bit
im.flush()
def multiband_catalog(field_root='j142724+334246', threshold=1.8, detection_background=True, photometry_background=True, get_all_filters=False, filters=None, det_err_scale=-np.inf, rescale_weight=True, run_detection=True, detection_filter='ir', detection_root=None, output_root=None, use_psf_filter=True, detection_params=prep.SEP_DETECT_PARAMS, phot_apertures=prep.SEXTRACTOR_PHOT_APERTURES_ARCSEC, master_catalog=None, bkg_mask=None, bkg_params={'bw': 64, 'bh': 64, 'fw': 3, 'fh': 3, 'pixel_scale': 0.06}, use_bkg_err=False, aper_segmask=True):
"""
Make a detection catalog and run aperture photometry with the
SExtractor clone `~sep`.
phot_apertures are aperture *diameters*. If provided as a string, then
apertures assumed to be in pixel units. Can also provide a list of
elements with astropy.unit attributes, which are converted to pixels
given the image WCS/pixel size.
"""
try:
from .. import prep, utils
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'auto_script.multiband_catalog')
except:
from grizli import prep, utils
# Make catalog
if master_catalog is None:
master_catalog = '{0}-{1}.cat.fits'.format(field_root, detection_filter)
else:
if not os.path.exists(master_catalog):
print('Master catalog {0} not found'.format(master_catalog))
return False
if not os.path.exists(master_catalog):
run_detection = True
if detection_root is None:
detection_root = '{0}-{1}'.format(field_root, detection_filter)
if output_root is None:
output_root = field_root
if run_detection:
if use_psf_filter:
psf_files = glob.glob('{0}*psf.fits'.format(field_root))
if len(psf_files) > 0:
psf_files.sort()
psf_im = pyfits.open(psf_files[-1])
msg = '# Generate PSF kernel from {0}\n'.format(psf_files[-1])
utils.log_comment(utils.LOGFILE, msg, verbose=True)
sh = psf_im['PSF', 'DRIZ1'].data.shape
# Cut out center of PSF
skip = (sh[0]-1-11)//2
psf = psf_im['PSF', 'DRIZ1'].data[skip:-1-skip, skip:-1-skip]*1
# Optimal filter is reversed PSF (i.e., PSF cross-correlation)
# https://arxiv.org/pdf/1512.06872.pdf
psf_kernel = psf[::-1, :][:, ::-1]
psf_kernel /= psf_kernel.sum()
detection_params['filter_kernel'] = psf_kernel
tab = prep.make_SEP_catalog(root=detection_root, threshold=threshold, get_background=detection_background, save_to_fits=True, rescale_weight=rescale_weight, err_scale=det_err_scale, phot_apertures=phot_apertures, detection_params=detection_params, bkg_mask=bkg_mask, bkg_params=bkg_params, use_bkg_err=use_bkg_err, aper_segmask=aper_segmask)
cat_pixel_scale = tab.meta['asec_0'][0]/tab.meta['aper_0'][0]
else:
tab = utils.GTable.gread(master_catalog)
cat_pixel_scale = tab.meta['ASEC_0']/tab.meta['APER_0']
# Source positions
#source_xy = tab['X_IMAGE'], tab['Y_IMAGE']
if aper_segmask:
seg_data = pyfits.open('{0}_seg.fits'.format(detection_root))[0].data
seg_data = np.cast[np.int32](seg_data)
aseg, aseg_id = seg_data, tab['NUMBER']
source_xy = tab['X_WORLD'], tab['Y_WORLD'], aseg, aseg_id
aseg_half = None
else:
source_xy = tab['X_WORLD'], tab['Y_WORLD']
if filters is None:
visits_file = '{0}_visits.npy'.format(field_root)
if not os.path.exists(visits_file):
get_all_filters = True
if get_all_filters:
mq = '{0}-f*dr?_sci.fits*'
mq = mq.format(field_root.replace('-100mas','-*mas'))
mosaic_files = glob.glob(mq)
mosaic_files.sort()
filters = [file.split('_')[-3][len(field_root)+1:]
for file in mosaic_files]
else:
vfile = '{0}_visits.npy'.format(field_root)
visits, all_groups, info = np.load(vfile, allow_pickle=True)
if ONLY_F814W:
info = info[((info['INSTRUME'] == 'WFC3') &
(info['DETECTOR'] == 'IR')) |
(info['FILTER'] == 'F814W')]
# UVIS
info_filters = [f for f in info['FILTER']]
for i in range(len(info)):
file_i = info['FILE'][i]
if file_i.startswith('i') & ('_flc' in file_i):
info_filters[i] += 'U'
info['FILTER'] = info_filters
filters = [f.lower() for f in np.unique(info['FILTER'])]
#filters.insert(0, 'ir')
#segment_img = pyfits.open('{0}-ir_seg.fits'.format(field_root))[0].data
fq = '{0}-{1}_dr?_sci.fits*'
for ii, filt in enumerate(filters):
print(filt)
if filt.startswith('g'):
continue
if filt not in ['g102', 'g141', 'g800l']:
sci_files = glob.glob(fq.format(field_root.replace('-100mas','-*mas'),
filt))
if len(sci_files) == 0:
continue
root = sci_files[0].split('{0}_dr'.format(filt))[0]+filt
# root = '{0}-{1}'.format(field_root, filt)
# Check for half-pixel optical images if using segmask
if aper_segmask:
sci = pyfits.open(sci_files[0])
sci_shape = sci[0].data.shape
sci.close()
del(sci)
if sci_shape[0] != aseg.shape[0]:
print('# filt={0}, need half-size segmentation image!'.format(filt), sci_shape, aseg.shape)
if aseg_half is None:
aseg_half = np.zeros(sci_shape, dtype=aseg.dtype)
for i in [0, 1]:
for j in [0, 1]:
aseg_half[i::2, j::2] += aseg
source_xy = tab['X_WORLD'], tab['Y_WORLD'], aseg_half, aseg_id
else:
source_xy = tab['X_WORLD'], tab['Y_WORLD'], aseg, aseg_id
filter_tab = prep.make_SEP_catalog(root=root,
threshold=threshold,
rescale_weight=rescale_weight,
err_scale=det_err_scale,
get_background=photometry_background,
save_to_fits=False, source_xy=source_xy,
phot_apertures=phot_apertures, bkg_mask=bkg_mask,
bkg_params=bkg_params, use_bkg_err=use_bkg_err)
for k in filter_tab.meta:
newk = '{0}_{1}'.format(filt.upper(), k)
tab.meta[newk] = filter_tab.meta[k]
for c in filter_tab.colnames:
newc = '{0}_{1}'.format(filt.upper(), c)
tab[newc] = filter_tab[c]
# Kron total correction from EE
filt_plam = tab.meta['{0}_PLAM'.format(filt.upper())]
tot_corr = prep.get_kron_tot_corr(tab, filt.lower(),
pixel_scale=cat_pixel_scale,
photplam=filt_plam)
#ee_corr = prep.get_kron_tot_corr(tab, filter=filt.lower())
tab['{0}_tot_corr'.format(filt.upper())] = tot_corr
else:
continue
for c in tab.colnames:
tab.rename_column(c, c.lower())
idcol = utils.GTable.Column(data=tab['number'], name='id')
tab.add_column(idcol, index=0)
tab.write('{0}_phot.fits'.format(output_root), format='fits', overwrite=True)
return tab
def count_grism_exposures(phot, groups, grisms=['g800l', 'g102', 'g141'], reset=True, verbose=False):
"""
Count number of grism exposures that contain objects in a catalog
"""
from matplotlib.path import Path
points = np.array([phot['ra'], phot['dec']]).T
for g in grisms:
if ('nexp_'+g not in phot.colnames) | reset:
phot['nexp_'+g] = np.zeros(len(phot), dtype=np.int32)
for ig, g in enumerate(groups):
gri = g['grism']['product'].split('-')[-1]
if gri not in grisms:
continue
if verbose:
print('{0:<4} {1:48} {2}'.format(ig, g['grism']['product'], gri))
for fp in g['grism']['footprints']:
hull = Path(np.array(fp.convex_hull.boundary.xy).T)
phot['nexp_'+gri] += hull.contains_points(points)*1
phot['has_grism'] = (phot['nexp_'+grisms[0]] > 0).astype(np.uint8)
if len(grisms) > 1:
for ig, g in enumerate(grisms):
phot['has_grism'] |= (phot['nexp_'+g] > 0).astype(np.uint8)*2**ig
phot.meta[g+'bit'] = 2**ig
return phot
def photutils_catalog(field_root='j142724+334246', threshold=1.8, subtract_bkg=True):
"""
Make a detection catalog with SExtractor and then measure
photometry with `~photutils`.
"""
from photutils import segmentation, background
import photutils.utils
import warnings
warnings.warn('photutils_catalog is deprecated, use ``sep`` catalog '
'in multiband_catalog')
try:
from .. import prep, utils
except:
from grizli import prep, utils
# Photutils catalog
#overlaps = np.load('{0}_overlaps.npy'.format(field_root))[0]
# Make catalog
sexcat = prep.make_drz_catalog(root='{0}-ir'.format(field_root), threshold=threshold, extra_config=prep.SEXTRACTOR_CONFIG_3DHST)
#sexcat = prep.make_SEP_catalog(root='{0}-ir'.format(field_root), threshold=threshold, extra_config=prep.SEXTRACTOR_CONFIG_3DHST)
for c in sexcat.colnames:
sexcat.rename_column(c, c.lower())
sexcat = sexcat['number', 'mag_auto', 'flux_radius']
files = glob.glob('../RAW/*fl[tc].fits')
info = utils.get_flt_info(files)
if ONLY_F814W:
info = info[((info['INSTRUME'] == 'WFC3') & (info['DETECTOR'] == 'IR')) | (info['FILTER'] == 'F814W')]
filters = [f.lower() for f in np.unique(info['FILTER'])]
filters.insert(0, 'ir')
segment_img = pyfits.open('{0}-ir_seg.fits'.format(field_root))[0].data
for ii, filt in enumerate(filters):
print(filt)
if filt.startswith('g'):
continue
if filt not in ['g102', 'g141']:
sci_files = glob.glob(('{0}-{1}_dr?_sci.fits'.format(field_root, filt)))
if len(sci_files) == 0:
continue
else:
sci_file = sci_files[0]
sci = pyfits.open(sci_file)
wht = pyfits.open(sci_file.replace('_sci', '_wht'))
else:
continue
photflam = sci[0].header['PHOTFLAM']
ABZP = (-2.5*np.log10(sci[0].header['PHOTFLAM']) - 21.10 -
5*np.log10(sci[0].header['PHOTPLAM']) + 18.6921)
bkg_err = 1/np.sqrt(wht[0].data)
bkg_err[~np.isfinite(bkg_err)] = 0 # 1e30
total_error = photutils.utils.calc_total_error(sci[0].data, bkg_err, sci[0].header['EXPTIME'])
wht_mask = (wht[0].data == 0) | (sci[0].data == 0)
sci[0].data[wht[0].data == 0] = 0
mask = None # bkg_err > 1.e29
ok = wht[0].data > 0
if ok.sum() == 0:
print(' No valid pixels')
continue
if subtract_bkg:
try:
bkg = background.Background2D(sci[0].data, 100, mask=wht_mask | (segment_img > 0), filter_size=(3, 3), filter_threshold=None, edge_method='pad')
bkg_obj = bkg.background
except:
bkg_obj = None
utils.log_exception(utils.LOGFILE, traceback)
utils.log_comment(utils.LOGFILE, "# !! Couldn't make bkg_obj")
else:
bkg_obj = None
cat = segmentation.source_properties(sci[0].data, segment_img, error=total_error, mask=mask, background=bkg_obj, filter_kernel=None, wcs=pywcs.WCS(sci[0].header), labels=None)
if filt == 'ir':
cols = ['id', 'xcentroid', 'ycentroid', 'sky_centroid', 'sky_centroid_icrs', 'source_sum', 'source_sum_err', 'xmin', 'xmax', 'ymin', 'ymax', 'min_value', 'max_value', 'minval_xpos', 'minval_ypos', 'maxval_xpos', 'maxval_ypos', 'area', 'equivalent_radius', 'perimeter', 'semimajor_axis_sigma', 'semiminor_axis_sigma', 'eccentricity', 'orientation', 'ellipticity', 'elongation', 'covar_sigx2', 'covar_sigxy', 'covar_sigy2', 'cxx', 'cxy', 'cyy']
tab = utils.GTable(cat.to_table(columns=cols))
cols = ['source_sum', 'source_sum_err']
for c in cols:
tab[c.replace('sum', 'flam')] = tab[c]*photflam
else:
cols = ['source_sum', 'source_sum_err']
t_i = cat.to_table(columns=cols)
mask = (np.isfinite(t_i['source_sum_err']))
for c in cols:
tab['{0}_{1}'.format(filt, c)] = t_i[c]
tab['{0}_{1}'.format(filt, c)][~mask] = np.nan
cflam = c.replace('sum', 'flam')
tab['{0}_{1}'.format(filt, cflam)] = t_i[c]*photflam
tab['{0}_{1}'.format(filt, cflam)][~mask] = np.nan
tab.meta['PW{0}'.format(filt.upper())] = sci[0].header['PHOTPLAM']
tab.meta['ZP{0}'.format(filt.upper())] = ABZP
tab.meta['FL{0}'.format(filt.upper())] = sci[0].header['PHOTFLAM']
icrs = [(coo.ra.value, coo.dec.value) for coo in tab['sky_centroid_icrs']]
tab['ra'] = [coo[0] for coo in icrs]
tab['dec'] = [coo[1] for coo in icrs]
tab.remove_column('sky_centroid_icrs')
tab.remove_column('sky_centroid')
tab.write('{0}_phot.fits'.format(field_root), format='fits', overwrite=True)
return tab
def load_GroupFLT(field_root='j142724+334246', PREP_PATH='../Prep', force_ref=None, force_seg=None, force_cat=None, galfit=False, pad=256, files=None, gris_ref_filters=GRIS_REF_FILTERS, split_by_grism=False):
"""
Initialize a GroupFLT object
"""
import glob
import os
import numpy as np
from .. import prep, utils, multifit
if files is None:
files = glob.glob(os.path.join(PREP_PATH, '*fl[tc].fits'))
files.sort()
info = utils.get_flt_info(files)
g141 = info['FILTER'] == 'G141'
g102 = info['FILTER'] == 'G102'
g800l = info['FILTER'] == 'G800L'
if force_cat is None:
#catalog = '{0}-ir.cat.fits'.format(field_root)
catalog = glob.glob('{0}-ir.cat.fits'.format(field_root))[0]
else:
catalog = force_cat
grp_objects = []
#grp = None
if (g141.sum() > 0) & ('G141' in gris_ref_filters):
for f in gris_ref_filters['G141']:
if os.path.exists(f'{field_root}-{f.lower()}_drz_sci.fits'):
g141_ref = f
break
# Segmentation image
if force_seg is None:
if galfit == 'clean':
seg_file = '{0}-{1}_galfit_orig_seg.fits'.format(field_root, g141_ref.lower())
elif galfit == 'model':
seg_file = '{0}-{1}_galfit_seg.fits'.format(field_root, g141_ref.lower())
else:
seg_file = glob.glob('{0}-*_seg.fits'.format(field_root))[0]
#seg_file = '{0}-ir_seg.fits'.format(field_root)
else:
seg_file = force_seg
# Reference image
if force_ref is None:
if galfit == 'clean':
ref_file = '{0}-{1}_galfit_clean.fits'.format(field_root, g141_ref.lower())
elif galfit == 'model':
ref_file = '{0}-{1}_galfit.fits'.format(field_root, g141_ref.lower())
else:
ref_file = '{0}-{1}_drz_sci.fits'.format(field_root, g141_ref.lower())
else:
ref_file = force_ref
grp = multifit.GroupFLT(grism_files=list(info['FILE'][g141]), direct_files=[], ref_file=ref_file, seg_file=seg_file, catalog=catalog, cpu_count=-1, sci_extn=1, pad=pad)
grp_objects.append(grp)
if (g102.sum() > 0) & ('G102' in gris_ref_filters):
for f in gris_ref_filters['G102']:
if os.path.exists('{0}-{1}_drz_sci.fits'.format(field_root, f.lower())):
g102_ref = f
break
# Segmentation image
if force_seg is None:
if galfit == 'clean':
seg_file = '{0}-{1}_galfit_orig_seg.fits'.format(field_root, g102_ref.lower())
elif galfit == 'model':
seg_file = '{0}-{1}_galfit_seg.fits'.format(field_root, g102_ref.lower())
else:
seg_file = glob.glob('{0}-*_seg.fits'.format(field_root))[0]
else:
seg_file = force_seg
# Reference image
if force_ref is None:
if galfit == 'clean':
ref_file = '{0}-{1}_galfit_clean.fits'.format(field_root, g102_ref.lower())
elif galfit == 'model':
ref_file = '{0}-{1}_galfit.fits'.format(field_root, g102_ref.lower())
else:
ref_file = '{0}-{1}_drz_sci.fits'.format(field_root, g102_ref.lower())
else:
ref_file = force_ref
grp_i = multifit.GroupFLT(grism_files=list(info['FILE'][g102]), direct_files=[], ref_file=ref_file, seg_file=seg_file, catalog=catalog, cpu_count=-1, sci_extn=1, pad=pad)
# if g141.sum() > 0:
# grp.extend(grp_i)
# else:
# grp = grp_i
grp_objects.append(grp_i)
# del(grp_i)
# ACS
if (g800l.sum() > 0) & ('G800L' in gris_ref_filters):
acs_grp = None
for f in gris_ref_filters['G800L']:
if os.path.exists('{0}-{1}_drc_sci.fits'.format(field_root, f.lower())):
g800l_ref = f
break
# Segmentation image
if force_seg is None:
if galfit == 'clean':
seg_file = '{0}-{1}_galfit_orig_seg.fits'.format(field_root, g800l_ref.lower())
elif galfit == 'model':
seg_file = '{0}-{1}_galfit_seg.fits'.format(field_root, g800l_ref.lower())
else:
#seg_file = '{0}-ir_seg.fits'.format(field_root)
seg_file = glob.glob('{0}-*_seg.fits'.format(field_root))[0]
else:
seg_file = force_seg
# Reference image
if force_ref is None:
if galfit == 'clean':
ref_file = '{0}-{1}_galfit_clean.fits'.format(field_root, g800l_ref.lower())
elif galfit == 'model':
ref_file = '{0}-{1}_galfit.fits'.format(field_root, g800l_ref.lower())
else:
ref_file = '{0}-{1}_drc_sci.fits'.format(field_root, g800l_ref.lower())
else:
ref_file = force_ref
for sci_extn in [1, 2]:
grp_i = multifit.GroupFLT(grism_files=list(info['FILE'][g800l]), direct_files=[], ref_file=ref_file, seg_file=seg_file, catalog=catalog, cpu_count=-1, sci_extn=sci_extn, pad=0, shrink_segimage=False)
if acs_grp is not None:
acs_grp.extend(grp_i)
del(grp_i)
else:
acs_grp = grp_i
if acs_grp is not None:
grp_objects.append(acs_grp)
if split_by_grism:
return grp_objects
else:
grp = grp_objects[0]
if len(grp_objects) > 0:
for i in range(1, len(grp_objects)):
grp.extend(grp_objects[i])
del(grp_objects[i])
return [grp]
def grism_prep(field_root='j142724+334246', PREP_PATH='../Prep', EXTRACT_PATH='../Extractions', ds9=None, refine_niter=3, gris_ref_filters=GRIS_REF_FILTERS, files=None, split_by_grism=True, refine_poly_order=1, refine_fcontam=0.5, cpu_count=0, mask_mosaic_edges=True, prelim_mag_limit=25, refine_mag_limits=[18, 24], grisms_to_process=None):
"""
Contamination model for grism exposures
"""
import glob
import os
import numpy as np
import scipy.stats
try:
from .. import prep, utils, multifit
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'auto_script.grism_prep')
except:
from grizli import prep, utils, multifit
if grisms_to_process is not None:
for g in gris_ref_filters.copy():
if g not in grisms_to_process:
pg = gris_ref_filters.pop(g)
grp_objects = load_GroupFLT(field_root=field_root, PREP_PATH=PREP_PATH,
gris_ref_filters=gris_ref_filters,
files=files, split_by_grism=split_by_grism)
for grp in grp_objects:
################
# Compute preliminary model
grp.compute_full_model(fit_info=None, verbose=True, store=False,
mag_limit=prelim_mag_limit, coeffs=[1.1, -0.5],
cpu_count=cpu_count)
##############
# Save model to avoid having to recompute it again
grp.save_full_data()
#############
# Mask edges of the exposures not covered by reference image
if mask_mosaic_edges:
try:
# Read footprint file created ealier
fp_file = '{0}-ir.npy'.format(field_root)
det_poly = np.load(fp_file, allow_pickle=True)[0]['footprint']
for flt in grp.FLTs:
flt.mask_mosaic_edges(sky_poly=det_poly, verbose=True,
dq_mask=False, dq_value=1024,
err_scale=10, resid_sn=-1)
except:
pass
################
# Remove constant modal background
for i in range(grp.N):
mask = (grp.FLTs[i].model < grp.FLTs[i].grism['ERR']*0.6)
mask &= (grp.FLTs[i].grism['SCI'] != 0)
# Fit Gaussian to the masked pixel distribution
clip = np.ones(mask.sum(), dtype=bool)
for iter in range(3):
clip_data = grp.FLTs[i].grism.data['SCI'][mask][clip]
n = scipy.stats.norm.fit(clip_data)
clip = np.abs(grp.FLTs[i].grism.data['SCI'][mask]) < 3*n[1]
del(clip_data)
mode = n[0]
logstr = '# grism_mode_bg {0} {1} {2:.4f}'
logstr = logstr.format(grp.FLTs[i].grism.parent_file,
grp.FLTs[i].grism.filter, mode)
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
try:
ds9.view(grp.FLTs[i].grism['SCI'] - grp.FLTs[i].model)
except:
pass
# Subtract
grp.FLTs[i].grism.data['SCI'] -= mode
#############
# Refine the model
i = 0
if ds9:
ds9.view(grp.FLTs[i].grism['SCI'] - grp.FLTs[i].model)
fr = ds9.get('frame')
utils.log_comment(utils.LOGFILE, '# Refine contamination',
verbose=True, show_date=True)
for iter in range(refine_niter):
print('\nRefine contamination model, iter # {0}\n'.format(iter))
if ds9:
ds9.set('frame {0}'.format(int(fr)+iter+1))
if (iter == 0) & (refine_niter > 0):
refine_i = 1
else:
refine_i = refine_fcontam
grp.refine_list(poly_order=refine_poly_order,
mag_limits=refine_mag_limits,
max_coeff=5, ds9=ds9, verbose=True,
fcontam=refine_i)
##############
# Save model to avoid having to recompute it again
grp.save_full_data()
# Link minimal files to Extractions directory
os.chdir(EXTRACT_PATH)
os.system(f'ln -s {PREP_PATH}/*GrismFLT* .')
os.system(f'ln -s {PREP_PATH}/*_fl*wcs.fits .')
os.system(f'ln -s {PREP_PATH}/{field_root}-*.cat.fits .')
os.system(f'ln -s {PREP_PATH}/{field_root}-*seg.fits .')
os.system(f'ln -s {PREP_PATH}/*_phot.fits .')
return grp
DITHERED_PLINE = {'kernel': 'point', 'pixfrac': 0.2, 'pixscale': 0.1, 'size': 8, 'wcs': None}
PARALLEL_PLINE = {'kernel': 'square', 'pixfrac': 1.0, 'pixscale': 0.1, 'size': 8, 'wcs': None}
def refine_model_with_fits(field_root='j142724+334246', grp=None, master_files=None, spectrum='continuum', clean=True, max_chinu=5):
"""
Refine the full-field grism models with the best fit spectra from
individual extractions.
"""
import glob
import traceback
try:
from .. import multifit
except:
from grizli import multifit
if grp is None:
if master_files is None:
master_files = glob.glob('*GrismFLT.fits')
master_files.sort()
catalog = glob.glob(f'{field_root}-*.cat.fits')[0]
try:
seg_file = glob.glob(f'{field_root}-*_seg.fits')[0]
except:
seg_file = None
grp = multifit.GroupFLT(grism_files=master_files, direct_files=[], ref_file=None, seg_file=seg_file, catalog=catalog, cpu_count=-1, sci_extn=1, pad=256)
fit_files = glob.glob('*full.fits')
fit_files.sort()
N = len(fit_files)
if N == 0:
return False
msg = 'Refine model ({0}/{1}): {2} / skip (chinu={3:.1f}, dof={4})'
for i, file in enumerate(fit_files):
try:
hdu = pyfits.open(file)
id = hdu[0].header['ID']
fith = hdu['ZFIT_STACK'].header
chinu = fith['CHIMIN']/fith['DOF']
if (chinu > max_chinu) | (fith['DOF'] < 10):
print(msg.format(i, N, file, chinu, fith['DOF']))
continue
sp = utils.GTable(hdu['TEMPL'].data)
dt = np.float
wave = np.cast[dt](sp['wave']) # .byteswap()
flux = np.cast[dt](sp[spectrum]) # .byteswap()
grp.compute_single_model(int(id), mag=19, size=-1, store=False,
spectrum_1d=[wave, flux], is_cgs=True,
get_beams=None, in_place=True)
print('Refine model ({0}/{1}): {2}'.format(i, N, file))
except:
print('Refine model ({0}/{1}): {2} / failed'.format(i, N, file))
grp.save_full_data()
if clean:
print('# refine_model_with_fits: cleanup')
files = glob.glob('*_grism_*fits')
files += glob.glob('*beams.fits')
files += glob.glob('*stack.fits')
files += glob.glob('*stack.png')
files += glob.glob('*full.fits')
for file in files:
os.remove(file)
del(grp)
def extract(field_root='j142724+334246', maglim=[13, 24], prior=None, MW_EBV=0.00, ids=[], pline=DITHERED_PLINE, fit_only_beams=True, run_fit=True, poly_order=7, oned_R=30, master_files=None, grp=None, bad_pa_threshold=None, fit_trace_shift=False, size=32, diff=True, min_sens=0.02, fcontam=0.2, min_mask=0.01, sys_err=0.03, skip_complete=True, fit_args={}, args_file='fit_args.npy', get_only_beams=False):
import glob
import os
import numpy as np
import matplotlib.pyplot as plt
#import grizli
try:
from .. import multifit, prep, utils, fitting
except:
from grizli import multifit, prep, utils, fitting
if master_files is None:
master_files = glob.glob('*GrismFLT.fits')
master_files.sort()
if grp is None:
init_grp = True
catalog = glob.glob('{0}-*.cat.fits'.format(field_root))[0]
try:
seg_file = glob.glob('{0}-*_seg.fits'.format(field_root))[0]
except:
seg_file = None
grp = multifit.GroupFLT(grism_files=master_files, direct_files=[], ref_file=None, seg_file=seg_file, catalog=catalog, cpu_count=-1, sci_extn=1, pad=256)
else:
init_grp = False
###############
# PHotometry
target = field_root
try:
file_args = np.load(args_file, allow_pickle=True)[0]
MW_EBV = file_args['MW_EBV']
min_sens = file_args['min_sens']
min_mask = file_args['min_mask']
fcontam = file_args['fcontam']
sys_err = file_args['sys_err']
pline = file_args['pline']
fit_args = file_args
fit_args.pop('kwargs')
except:
pass
if get_only_beams:
beams = grp.get_beams(ids, size=size, beam_id='A', min_sens=min_sens)
if init_grp:
del(grp)
return(beams)
###########
# IDs to extract
# ids=[1096]
if ids == []:
clip = (grp.catalog['MAG_AUTO'] > maglim[0]) & (grp.catalog['MAG_AUTO'] < maglim[1])
so = np.argsort(grp.catalog['MAG_AUTO'][clip])
ids = grp.catalog['NUMBER'][clip][so]
else:
ids = [int(id) for id in ids]
# Stack the different beans
# Use "binning" templates for standardized extraction
if oned_R:
bin_steps, step_templ = utils.step_templates(wlim=[5000, 18000.0],
R=oned_R, round=10)
init_templates = step_templ
else:
# Polynomial templates
wave = np.linspace(2000, 2.5e4, 100)
poly_templ = utils.polynomial_templates(wave, order=poly_order)
init_templates = poly_templ
#size = 32
close = True
show_beams = True
if __name__ == '__main__': # Interactive
size = 32
close = Skip = False
pline = {'kernel': 'point', 'pixfrac': 0.2, 'pixscale': 0.1, 'size': 8, 'wcs': None}
prior = None
skip_complete = True
fit_trace_shift = False
bad_pa_threshold = 1.6
MW_EBV = 0
###############
# Stacked spectra
for ii, id in enumerate(ids):
if skip_complete:
if os.path.exists('{0}_{1:05d}.stack.png'.format(target, id)):
continue
beams = grp.get_beams(id, size=size, beam_id='A', min_sens=min_sens)
for i in range(len(beams))[::-1]:
if beams[i].fit_mask.sum() < 10:
beams.pop(i)
print('{0}/{1}: {2} {3}'.format(ii, len(ids), id, len(beams)))
if len(beams) < 1:
continue
#mb = multifit.MultiBeam(beams, fcontam=fcontam, group_name=target, psf=False, MW_EBV=MW_EBV, min_sens=min_sens)
mb = multifit.MultiBeam(beams, fcontam=fcontam, group_name=target, psf=False, MW_EBV=MW_EBV, sys_err=sys_err, min_mask=min_mask, min_sens=min_sens)
if bad_pa_threshold is not None:
out = mb.check_for_bad_PAs(chi2_threshold=bad_pa_threshold,
poly_order=1, reinit=True,
fit_background=True)
fit_log, keep_dict, has_bad = out
if has_bad:
print('\n Has bad PA! Final list: {0}\n{1}'.format(keep_dict, fit_log))
ixi = grp.catalog['NUMBER'] == id
if (fit_trace_shift > 0) & (grp.catalog['MAG_AUTO'][ixi][0] < 24.5):
b = mb.beams[0]
b.compute_model()
sn_lim = fit_trace_shift*1
if (np.max((b.model/b.grism['ERR'])[b.fit_mask.reshape(b.sh)]) > sn_lim) | (sn_lim > 100):
print(' Fit trace shift: \n')
try:
shift = mb.fit_trace_shift(tol=1.e-3, verbose=True, split_groups=True, lm=True)
except:
pass
try:
tfit = mb.template_at_z(z=0, templates=init_templates, fit_background=True, fitter='lstsq', get_uncertainties=2)
except:
tfit = None
try:
fig1 = mb.oned_figure(figsize=[5, 3], tfit=tfit, show_beams=show_beams, scale_on_stacked=True, ylim_percentile=5)
if oned_R:
outroot = '{0}_{1:05d}.R{2:.0f}'.format(target, id, oned_R)
hdu = mb.oned_spectrum_to_hdu(outputfile=outroot+'.fits',
tfit=tfit, wave=bin_steps)
else:
outroot = '{0}_{1:05d}.1D'.format(target, id)
hdu = mb.oned_spectrum_to_hdu(outputfile=outroot+'.fits',
tfit=tfit)
fig1.savefig(outroot+'.png')
except:
continue
hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=0.5, flambda=False, kernel='point', size=32, tfit=tfit, diff=diff)
fig.savefig('{0}_{1:05d}.stack.png'.format(target, id))
hdu.writeto('{0}_{1:05d}.stack.fits'.format(target, id),
overwrite=True)
mb.write_master_fits()
if False:
# Fit here for AWS...
fitting.run_all_parallel(id, verbose=True)
if close:
plt.close(fig)
plt.close(fig1)
del(hdu)
del(mb)
for k in range(100000):
plt.close()
if not run_fit:
if init_grp:
return grp
else:
return True
for ii, id in enumerate(ids):
print('{0}/{1}: {2}'.format(ii, len(ids), id))
if not os.path.exists('{0}_{1:05d}.beams.fits'.format(target, id)):
continue
if skip_complete:
if os.path.exists('{0}_{1:05d}.line.png'.format(target, id)):
continue
try:
out = fitting.run_all_parallel(id, get_output_data=True, **fit_args, args_file=args_file)
mb, st, fit, tfit, line_hdu = out
spectrum_1d = [tfit['cont1d'].wave, tfit['cont1d'].flux]
grp.compute_single_model(id, mag=-99, size=-1, store=False, spectrum_1d=spectrum_1d, get_beams=None, in_place=True, is_cgs=True)
if close:
for k in range(1000):
plt.close()
del(out)
except:
pass
# Re-save data with updated models
if init_grp:
grp.save_full_data()
return grp
else:
return True
def generate_fit_params(field_root='j142724+334246', fitter=['nnls', 'bounded'], prior=None, MW_EBV=0.00, pline=DITHERED_PLINE, fit_only_beams=True, run_fit=True, poly_order=7, fsps=True, min_sens=0.01, sys_err=0.03, fcontam=0.2, zr=[0.05, 3.6], dz=[0.004, 0.0004], fwhm=1000, lorentz=False, include_photometry=True, use_phot_obj=False, save_file='fit_args.npy', fit_trace_shift=False, **kwargs):
"""
Generate a parameter dictionary for passing to the fitting script
"""
import numpy as np
from grizli import utils, fitting
from . import photoz
phot = None
t0 = utils.load_templates(fwhm=fwhm, line_complexes=True, stars=False, full_line_list=None, continuum_list=None, fsps_templates=fsps, alf_template=True, lorentz=lorentz)
t1 = utils.load_templates(fwhm=fwhm, line_complexes=False, stars=False, full_line_list=None, continuum_list=None, fsps_templates=fsps, alf_template=True, lorentz=lorentz)
args = fitting.run_all(0, t0=t0, t1=t1, fwhm=1200, zr=zr, dz=dz, fitter=fitter, group_name=field_root, fit_stacks=False, prior=prior, fcontam=fcontam, pline=pline, min_sens=min_sens, mask_sn_limit=np.inf, fit_beams=False, root=field_root, fit_trace_shift=fit_trace_shift, phot=phot, use_phot_obj=use_phot_obj, verbose=True, scale_photometry=False, show_beams=True, overlap_threshold=10, get_ir_psfs=True, fit_only_beams=fit_only_beams, MW_EBV=MW_EBV, sys_err=sys_err, get_dict=True)
# EAZY-py photometry object from HST photometry
try:
import eazy.photoz
HAS_EAZY = True
except:
HAS_EAZY = False
if include_photometry & HAS_EAZY:
aper_ix = include_photometry*1
utils.set_warnings()
total_flux = 'flux_auto'
obj = photoz.eazy_photoz(field_root, object_only=True,
apply_prior=False, beta_prior=True, aper_ix=aper_ix-1,
force=True,
get_external_photometry=False, compute_residuals=False,
total_flux=total_flux)
cat = obj.cat
#apcorr = cat['flux_iso']/(cat['flux_auto']*cat['tot_corr'])
apcorr = None
phot_obj = photoz.EazyPhot(obj, grizli_templates=t0,
source_text='grizli_HST_photometry',
apcorr=apcorr,
include_photometry=True, include_pz=False)
args['phot_obj'] = phot_obj
args['scale_photometry'] = True
np.save(save_file, [args])
print('Saved arguments to {0}.'.format(save_file))
return args
def summary_catalog(**kwargs):
from . import summary
res = summary.summary_catalog(**kwargs)
return res
def fine_alignment(field_root='j142724+334246', HOME_PATH='/Volumes/Pegasus/Grizli/Automatic/', min_overlap=0.2, stopme=False, ref_err=1.e-3, radec=None, redrizzle=True, shift_only=True, maglim=[17, 24], NITER=1, catalogs=['GAIA', 'PS1', 'NSC', 'SDSS', 'WISE'], method='Powell', radius=5., program_str=None, match_str=[], all_visits=None, date=None, gaia_by_date=False, tol=None, fit_options=None, print_options={'precision': 3, 'sign': ' '}, include_internal_matches=True):
"""
Try fine alignment from visit-based SExtractor catalogs
"""
import os
import glob
import time
try:
from .. import prep, utils
from ..prep import get_radec_catalog
from ..utils import transform_wcs
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'auto_script.fine_alignment')
except:
from grizli import prep, utils
from grizli.prep import get_radec_catalog
from grizli.utils import transform_wcs
import numpy as np
np.set_printoptions(**print_options)
import matplotlib.pyplot as plt
from shapely.geometry import Polygon
from scipy.spatial import ConvexHull
from drizzlepac import updatehdr
import astropy.units as u
from scipy.optimize import minimize, fmin_powell
import copy
if all_visits is None:
_ = np.load(f'{field_root}_visits.npy', allow_pickle=True)
all_visits, all_groups, info = _
failed_list = glob.glob('*failed')
visits = []
files = []
for visit in all_visits:
file = '{0}.cat.fits'.format(visit['product'])
if visit['product']+'.failed' in failed_list:
continue
if os.path.exists(file):
if program_str is not None:
prog = visit['product'].split('-')[-4]
if prog != program_str:
continue
if len(match_str) > 0:
has_match = False
for m in match_str:
has_match |= m in visit['product']
if not has_match:
continue
visits.append(visit)
files.append(file)
if radec is None:
ra_i, dec_i = np.median(info['RA_TARG']), np.median(info['DEC_TARG'])
print('Center coordinate: ', ra_i, dec_i)
if date is not None:
radec, ref_catalog = get_radec_catalog(ra=ra_i, dec=dec_i,
product=field_root, date=date,
reference_catalogs=catalogs, radius=radius)
else:
radec, ref_catalog = get_radec_catalog(ra=ra_i, dec=dec_i,
product=field_root,
reference_catalogs=catalogs, radius=radius)
#ref = 'j152643+164738_sdss.radec'
ref_tab = utils.GTable(np.loadtxt(radec, unpack=True).T,
names=['ra', 'dec'])
ridx = np.arange(len(ref_tab))
# Find matches
tab = {}
for i, file in enumerate(files):
tab[i] = {}
t_i = utils.GTable.gread(file)
mclip = (t_i['MAG_AUTO'] > maglim[0]) & (t_i['MAG_AUTO'] < maglim[1])
if mclip.sum() == 0:
continue
tab[i]['cat'] = t_i[mclip]
try:
sci_file = glob.glob(file.replace('.cat', '_dr?_sci'))[0]
except:
sci_file = glob.glob(file.replace('.cat', '_wcs'))[0]
im = pyfits.open(sci_file)
tab[i]['wcs'] = pywcs.WCS(im[0].header)
tab[i]['transform'] = [0, 0, 0, 1]
tab[i]['xy'] = np.array([tab[i]['cat']['X_IMAGE'], tab[i]['cat']['Y_IMAGE']]).T
tab[i]['match_idx'] = {}
if gaia_by_date:
drz_file = glob.glob(file.replace('.cat.fits', '*dr?_sci.fits'))[0]
drz_im = pyfits.open(drz_file)
radec, ref_catalog = get_radec_catalog(ra=drz_im[0].header['CRVAL1'],
dec=drz_im[0].header['CRVAL2'],
product='-'.join(file.split('-')[:-1]), date=drz_im[0].header['EXPSTART'], date_format='mjd',
reference_catalogs=['GAIA'], radius=radius)
ref_tab = utils.GTable(np.loadtxt(radec, unpack=True).T, names=['ra', 'dec'])
ridx = np.arange(len(ref_tab))
tab[i]['ref_tab'] = ref_tab
idx, dr = tab[i]['cat'].match_to_catalog_sky(ref_tab)
clip = dr < 0.6*u.arcsec
if clip.sum() > 1:
tab[i]['match_idx'][-1] = [idx[clip], ridx[clip]]
print('{0} Ncat={1} Nref={2}'.format(sci_file, mclip.sum(), clip.sum()))
# ix, jx = tab[i]['match_idx'][-1]
# ci = tab[i]['cat']#[ix]
# cj = ref_tab#[jx]
if include_internal_matches:
for i, file in enumerate(files):
for j in range(i+1, len(files)):
sidx = np.arange(len(tab[j]['cat']))
idx, dr = tab[i]['cat'].match_to_catalog_sky(tab[j]['cat'])
clip = dr < 0.3*u.arcsec
print(file, files[j], clip.sum())
if clip.sum() < 5:
continue
if clip.sum() > 0:
tab[i]['match_idx'][j] = [idx[clip], sidx[clip]]
#ref_err = 0.01
# shift_only=True
if shift_only > 0:
# Shift only
p0 = np.vstack([[0, 0] for i in tab])
pscl = np.array([10., 10.])
elif shift_only < 0:
# Shift + rot + scale
p0 = np.vstack([[0, 0, 0, 1] for i in tab])
pscl = np.array([10., 10., 100., 100.])
else:
# Shift + rot
p0 = np.vstack([[0, 0, 0] for i in tab])
pscl = np.array([10., 10., 100.])
#ref_err = 0.06
if False:
field_args = (tab, ref_tab, ref_err, shift_only, 'field')
_objfun_align(p0*10., *field_args)
fit_args = (tab, ref_tab, ref_err, shift_only, 'huber')
plot_args = (tab, ref_tab, ref_err, shift_only, 'plot')
plotx_args = (tab, ref_tab, ref_err, shift_only, 'plotx')
pi = p0*1. # *10.
for iter in range(NITER):
fit = minimize(_objfun_align, pi*pscl, args=fit_args, method=method, jac=None, hess=None, hessp=None, bounds=None, constraints=(), tol=tol, callback=None, options=fit_options)
pi = fit.x.reshape((-1, len(pscl)))/pscl
########
# Show the result
fig = plt.figure(figsize=[8, 8])
ax = fig.add_subplot(221)
_objfun_align(p0*pscl, *plot_args)
ax.set_xticklabels([])
ax.set_ylabel('dDec')
ax = fig.add_subplot(223)
_objfun_align(p0*pscl, *plotx_args)
ax.set_ylabel('dDec')
ax.set_xlabel('dRA')
ax = fig.add_subplot(222)
_objfun_align(fit.x, *plot_args)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax = fig.add_subplot(224)
_objfun_align(fit.x, *plotx_args)
ax.set_yticklabels([])
ax.set_xlabel('dRA')
for ax in fig.axes:
ax.grid()
ax.set_xlim(-0.35, 0.35)
ax.set_ylim(-0.35, 0.35)
fig.tight_layout(pad=0.5)
extra_str = ''
if program_str:
extra_str += '.{0}'.format(program_str)
if match_str:
extra_str += '.{0}'.format('.'.join(match_str))
fig.text(0.97, 0.02, time.ctime(), ha='right', va='bottom', fontsize=5, transform=fig.transFigure)
fig.savefig('{0}{1}_fine.png'.format(field_root, extra_str))
np.save('{0}{1}_fine.npy'.format(field_root, extra_str), [visits, fit])
return tab, fit, visits
def update_wcs_headers_with_fine(field_root, backup=True):
"""
Update grism headers with the fine shifts
"""
import os
import numpy as np
import glob
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
from drizzlepac import updatehdr
#import grizli.prep
try:
from .. import prep
except:
from grizli import prep
if backup:
if not os.path.exists('FineBkup'):
os.mkdir('FineBkup')
visits, all_groups, info = np.load(f'{field_root}_visits.npy',
allow_pickle=True)
fit_files = glob.glob('{0}*fine.npy'.format(field_root))
for fit_file in fit_files:
fine_visits, fine_fit = np.load(fit_file, allow_pickle=True)
N = len(fine_visits)
if backup:
for i in range(N):
direct = fine_visits[i]
for file in direct['files']:
os.system(f'cp {file} FineBkup/')
print(file)
trans = np.reshape(fine_fit.x, (N, -1)) # /10.
sh = trans.shape
if sh[1] == 2:
pscl = np.array([10., 10.])
trans = np.hstack([trans/pscl, np.zeros((N, 1)), np.ones((N, 1))])
elif sh[1] == 3:
pscl = np.array([10., 10., 100])
trans = np.hstack([trans/pscl, np.ones((N, 1))])
elif sh[1] == 4:
pscl = np.array([10., 10., 100, 100])
trans = trans/pscl
# Update direct WCS
for ix, direct in enumerate(fine_visits):
#direct = visits[ix]
out_shift, out_rot = trans[ix, :2], trans[ix, 2]
out_scale = trans[ix, 3]
xyscale = trans[ix, :4]
xyscale[2] *= -1
out_rot *= -1
try:
wcs_ref_file = str('{0}.cat.fits'.format(direct['product']))
wcs_ref = pywcs.WCS(pyfits.open(wcs_ref_file)['WCS'].header,
relax=True)
except:
wcs_ref_file = str('{0}_wcs.fits'.format(direct['product']))
wcs_ref = pywcs.WCS(pyfits.open(wcs_ref_file)[0].header,
relax=True)
for file in direct['files']:
prep.update_wcs_fits_log(file, wcs_ref,
xyscale=xyscale,
initialize=False,
replace=('.fits', '.wcslog.fits'),
wcsname='FINE')
updatehdr.updatewcs_with_shift(file,
wcs_ref_file,
xsh=out_shift[0], ysh=out_shift[1],
rot=out_rot, scale=out_scale,
wcsname='FINE', force=True,
reusename=True, verbose=True,
sciext='SCI')
# Bug in astrodrizzle? Dies if the FLT files don't have MJD-OBS
# keywords
im = pyfits.open(file, mode='update')
im[0].header['MJD-OBS'] = im[0].header['EXPSTART']
im.flush()
# Update grism WCS
for i in range(len(all_groups)):
direct = all_groups[i]['direct']
grism = all_groups[i]['grism']
for j in range(N):
if fine_visits[j]['product'] == direct['product']:
print(direct['product'], grism['product'], trans[j, :])
if backup:
for file in grism['files']:
os.system(f'cp {file} FineBkup/')
print(file)
prep.match_direct_grism_wcs(direct=direct, grism=grism,
get_fresh_flt=False,
xyscale=trans[j, :])
def make_reference_wcs(info, files=None, output='mosaic_wcs-ref.fits', filters=['G800L', 'G102', 'G141'], pad_reference=90, pixel_scale=None, get_hdu=True):
"""
Make a reference image WCS based on the grism exposures
Parameters
----------
info : `~astropy.table.Table`
Exposure information table with columns 'FILE' and 'FILTER'.
output : str, None
Filename for output wcs reference image.
filters : list or None
List of filters to consider for the output mosaic. If None, then
use all exposures in the `info` list.
pad_reference : float
Image padding, in `~astropy.units.arcsec`.
pixel_scale : None or float
Pixel scale in in `~astropy.units.arcsec`. If None, then the script
computes automatically
get_hdu : bool
If True, then generate an `~astropy.io.fits.ImageHDU` object and
save to a file if `output` is defined. If False, return just the
computed `~astropy.wcs.WCS`.
Returns
-------
`~astropy.io.fits.ImageHDU` or `~astropy.wcs.WCS`, see `get_hdu`.
"""
if filters is not None:
use = utils.column_values_in_list(info['FILTER'], filters)
if use.sum() == 0:
# All files
files = info['FILE']
else:
files = info['FILE'][use]
else:
files = info['FILE']
# Just ACS, pixel scale 0.03
if pixel_scale is None:
# Auto determine pixel size, 0.03" pixels if only ACS, otherwise 0.06
any_grism = utils.column_values_in_list(info['FILTER'],
['G800L', 'G102', 'G141'])
acs_grism = (info['FILTER'] == 'G800L')
only_acs = list(np.unique(info['INSTRUME'])) == ['ACS']
if ((acs_grism.sum() == any_grism.sum()) & (any_grism.sum() > 0)) | (only_acs):
pixel_scale = 0.03
else:
pixel_scale = 0.06
ref_hdu = utils.make_maximal_wcs(files, pixel_scale=pixel_scale,
get_hdu=get_hdu, pad=pad_reference,
verbose=True)
if get_hdu:
ref_hdu.data = ref_hdu.data.astype(np.int16)
if output is not None:
ref_hdu.writeto(output, overwrite=True, output_verify='fix')
return ref_hdu
else:
return ref_hdu[1]
def drizzle_overlaps(field_root, filters=['F098M', 'F105W', 'F110W', 'F125W', 'F140W', 'F160W'], ref_image=None, ref_wcs=None, bits=None, pixfrac=0.75, scale=0.06, make_combined=False, drizzle_filters=True, skysub=False, skymethod='localmin', match_str=[], context=False, pad_reference=60, min_nexp=2, static=True, skip_products=[], include_saturated=False, multi_driz_cr=False, filter_driz_cr=False, **kwargs):
import numpy as np
import glob
try:
from .. import prep, utils
except:
from grizli import prep
##############
# Redrizzle
visits, all_groups, info = np.load('{0}_visits.npy'.format(field_root),
allow_pickle=True)
failed_list = glob.glob('*failed')
#overlaps = np.load('{0}_overlaps.npy'.format(field_root))[0]
#keep = []
if make_combined:
if isinstance(make_combined, str):
label = make_combined
else:
label = 'ir'
else:
label = 'ir'
wfc3ir = {'product': '{0}-{1}'.format(field_root, label), 'files': []}
if ref_image is not None:
wfc3ir['reference'] = ref_image
if ref_wcs is not None:
wfc3ir['reference_wcs'] = ref_wcs
filter_groups = {}
for visit in visits:
# Visit failed for some reason
if (visit['product']+'.wcs_failed' in failed_list) | (visit['product']+'.failed' in failed_list) | (visit['product'] in skip_products):
continue
# Too few exposures (i.e., one with unreliable CR flags)
if len(visit['files']) < min_nexp:
continue
# Not one of the desired filters
filt = visit['product'].split('-')[-1]
if filt.upper() not in filters:
continue
# Are all of the exposures in ./?
has_exposures = True
for file in visit['files']:
has_exposures &= os.path.exists('../Prep/'+file)
if not has_exposures:
print('Visit {0} missing exposures, skip'.format(visit['product']))
continue
# IS UVIS?
if visit['files'][0].startswith('i') & ('_flc' in visit['files'][0]):
filt += 'u'
is_uvis = True
else:
is_uvis = False
if len(match_str) > 0:
has_match = False
for m in match_str:
has_match |= m in visit['product']
if not has_match:
continue
if filt not in filter_groups:
filter_groups[filt] = {'product': '{0}-{1}'.format(field_root, filt), 'files': [], 'reference': ref_image, 'reference_wcs': ref_wcs}
filter_groups[filt]['files'].extend(visit['files'])
# Add polygon
if 'footprints' in visit:
for fp in visit['footprints']:
if 'footprint' in filter_groups[filt]:
filter_groups[filt]['footprint'] = filter_groups[filt]['footprint'].union(fp)
else:
filter_groups[filt]['footprint'] = fp.buffer(0)
if (filt.upper() in filters) | (is_uvis & (filt.upper()[:-1] in filters)):
wfc3ir['files'].extend(visit['files'])
if 'footprint' in filter_groups[filt]:
fp_i = filter_groups[filt]['footprint']
if 'footprint' in wfc3ir:
wfc3ir['footprint'] = wfc3ir['footprint'].union(fp_i)
else:
wfc3ir['footprint'] = fp_i.buffer(0)
if len(filter_groups) == 0:
print('No filters found ({0})'.format(filters))
return None
keep = [filter_groups[k] for k in filter_groups]
if (ref_image is None) & (ref_wcs is None):
print('\nCompute mosaic WCS: {0}_wcs-ref.fits\n'.format(field_root))
ref_hdu = utils.make_maximal_wcs(wfc3ir['files'], pixel_scale=scale, get_hdu=True, pad=pad_reference, verbose=True)
ref_hdu.writeto('{0}_wcs-ref.fits'.format(field_root), overwrite=True,
output_verify='fix')
wfc3ir['reference'] = '{0}_wcs-ref.fits'.format(field_root)
for i in range(len(keep)):
keep[i]['reference'] = '{0}_wcs-ref.fits'.format(field_root)
if ref_wcs is not None:
pass
#
if make_combined:
# Figure out if we have more than one instrument
inst_keys = np.unique([os.path.basename(file)[0] for file in wfc3ir['files']])
prep.drizzle_overlaps([wfc3ir], parse_visits=False, pixfrac=pixfrac, scale=scale, skysub=False, bits=bits, final_wcs=True, final_rot=0, final_outnx=None, final_outny=None, final_ra=None, final_dec=None, final_wht_type='IVM', final_wt_scl='exptime', check_overlaps=False, context=context, static=(static & (len(inst_keys) == 1)), include_saturated=include_saturated, run_driz_cr=multi_driz_cr, **kwargs)
np.save('{0}.npy'.format(wfc3ir['product']), [wfc3ir])
if drizzle_filters:
print('Drizzle mosaics in filters: {0}'.format(filter_groups.keys()))
prep.drizzle_overlaps(keep, parse_visits=False, pixfrac=pixfrac, scale=scale, skysub=skysub, skymethod=skymethod, bits=bits, final_wcs=True, final_rot=0, final_outnx=None, final_outny=None, final_ra=None, final_dec=None, final_wht_type='IVM', final_wt_scl='exptime', check_overlaps=False, context=context, static=static, include_saturated=include_saturated, run_driz_cr=filter_driz_cr, **kwargs)
FILTER_COMBINATIONS = {'ir': IR_M_FILTERS+IR_W_FILTERS,
'opt': OPT_M_FILTERS+OPT_W_FILTERS}
def make_filter_combinations(root, weight_fnu=True, filter_combinations=FILTER_COMBINATIONS, min_count=1):
"""
Combine ir/opt mosaics manually scaling a specific zeropoint
"""
# Output normalization os F814W/F140W
ref_h = {}
ref_h['opt'] = {'INSTRUME': 'ACS', 'DETECTOR': 'WFC',
'PHOTFLAM': 7.0178627203125e-20,
'PHOTBW': 653.24393453125, 'PHOTZPT': -21.1,
'PHOTMODE': 'ACS WFC1 F814W MJD#56438.5725',
'PHOTPLAM': 8045.415190625002,
'FILTER1': 'CLEAR1L', 'FILTER2': 'F814W'}
ref_h['ir'] = {'INSTRUME': 'WFC3', 'DETECTOR': 'IR',
'PHOTFNU': 9.5291135e-08,
'PHOTFLAM': 1.4737148e-20,
'PHOTBW': 1132.39, 'PHOTZPT': -21.1,
'PHOTMODE': 'WFC3 IR F140W',
'PHOTPLAM': 13922.907, 'FILTER': 'F140W'}
####
count = {}
num = {}
den = {}
for f in filter_combinations:
num[f] = None
den[f] = None
count[f] = 0
output_sci = {}
head = {}
sci_files = glob.glob('{0}-f*sci.fits*'.format(root))
for sci_file in sci_files:
filt_i = sci_file.split('_dr')[0].split('-')[-1]
filt_ix = sci_file.split('_dr')[0].split('-')[-1]
# UVIS
if filt_i.startswith('f') & filt_i.endswith('u'):
filt_i = filt_i[:-1]
band = None
for f in filter_combinations:
if filt_i.upper() in filter_combinations[f]:
band = f
break
if band is None:
continue
# Which reference parameters to use?
if filt_i.upper() in OPT_W_FILTERS + OPT_M_FILTERS:
ref_h_i = ref_h['opt']
else:
ref_h_i = ref_h['ir']
print(sci_file, filt_i, band)
output_sci[band] = sci_file.replace(filt_ix, band)
im_i = pyfits.open(sci_file)
wht_i = pyfits.open(sci_file.replace('_sci', '_wht'))
photflam = im_i[0].header['PHOTFLAM']
ref_photflam = ref_h_i['PHOTFLAM']
photplam = im_i[0].header['PHOTPLAM']
ref_photplam = ref_h_i['PHOTPLAM']
head[band] = im_i[0].header.copy()
for k in ref_h_i:
head[band][k] = ref_h_i[k]
if num[band] is None:
num[band] = im_i[0].data*0
den[band] = num[band]*0
scl = photflam/ref_photflam
if weight_fnu:
scl_weight = photplam**2/ref_photplam**2
else:
scl_weight = 1.
den_i = wht_i[0].data/scl**2*scl_weight
num[band] += im_i[0].data*scl*den_i
den[band] += den_i
count[band] += 1
# Done, make outputs
for band in filter_combinations:
if (num[band] is not None) & (count[band] >= min_count):
sci = num[band]/den[band]
wht = den[band]
mask = (~np.isfinite(sci)) | (den == 0)
sci[mask] = 0
wht[mask] = 0
print('Write {0}'.format(output_sci[band]))
pyfits.PrimaryHDU(data=sci, header=head[band]).writeto(output_sci[band], overwrite=True, output_verify='fix')
pyfits.PrimaryHDU(data=wht, header=head[band]).writeto(output_sci[band].replace('_sci', '_wht'), overwrite=True, output_verify='fix')
def make_combined_mosaics(root, fix_stars=False, mask_spikes=False, skip_single_optical_visits=True, mosaic_args=args['mosaic_args'], mosaic_driz_cr_type=0, mosaic_drizzle_args=args['mosaic_drizzle_args'], **kwargs):
"""
Drizzle combined mosaics
mosaic_driz_cr_type : int
(mosaic_driz_cr_type & 1) : flag CRs on all IR combined
(mosaic_driz_cr_type & 2) : flag CRs on IR filter combinations
(mosaic_driz_cr_type & 4) : flag CRs on all OPT combined
(mosaic_driz_cr_type & 8) : flag CRs on OPT filter combinations
"""
# if False:
# # j = 125+110w
# auto_script.field_rgb('j013804m2156', HOME_PATH=None, show_ir=True, filters=['f160w','j','f105w'], xsize=16, rgb_scl=[1, 0.85, 1], rgb_min=-0.003)
visits_file = '{0}_visits.npy'.format(root)
visits, groups, info = np.load(visits_file, allow_pickle=True)
# Mosaic WCS
wcs_ref_file = '{0}_wcs-ref.fits'.format(root)
if not os.path.exists(wcs_ref_file):
make_reference_wcs(info, output=wcs_ref_file, get_hdu=True,
**mosaic_args['wcs_params'])
mosaic_pixfrac = mosaic_args['mosaic_pixfrac']
combine_all_filters = mosaic_args['combine_all_filters']
# # Combine all available filters?
# if combine_all_filters:
# all_filters = mosaic_args['ir_filters'] + mosaic_args['optical_filters']
# auto_script.drizzle_overlaps(root,
# filters=all_filters,
# min_nexp=1, pixfrac=mosaic_pixfrac,
# make_combined=True,
# ref_image=wcs_ref_file,
# drizzle_filters=False)
# IR filters
# if 'fix_stars' in visit_prep_args:
# fix_stars = visit_prep_args['fix_stars']
# else:
# fix_stars = False
drizzle_overlaps(root, filters=mosaic_args['ir_filters'], min_nexp=1,
pixfrac=mosaic_pixfrac,
make_combined=False,
ref_image=wcs_ref_file, include_saturated=fix_stars,
multi_driz_cr=(mosaic_driz_cr_type & 1) > 0,
filter_driz_cr=(mosaic_driz_cr_type & 2) > 0,
**mosaic_drizzle_args)
make_filter_combinations(root, weight_fnu=True, min_count=1,
filter_combinations={'ir': IR_M_FILTERS+IR_W_FILTERS})
# Mask diffraction spikes
ir_mosaics = glob.glob('{0}-f*drz_sci.fits'.format(root))
if (len(ir_mosaics) > 0) & (mask_spikes):
cat = prep.make_SEP_catalog('{0}-ir'.format(root), threshold=4,
save_fits=False,
column_case=str.lower)
selection = (cat['mag_auto'] < 18) & (cat['flux_radius'] < 4.5)
selection |= (cat['mag_auto'] < 15.2) & (cat['flux_radius'] < 20)
# Bright GAIA stars to catch things with bad photometry
if True:
print('## Include GAIA stars in spike mask')
ra_center = np.median(cat['ra'])
dec_center = np.median(cat['dec'])
rad_arcmin = np.sqrt((cat['ra']-ra_center)**2*np.cos(cat['dec']/180*np.pi)**2+(cat['dec']-dec_center)**2)*60
try:
gaia_tmp = prep.get_gaia_DR2_catalog(ra_center, dec_center,
radius=rad_arcmin.max()*1.1, use_mirror=False)
idx, dr = utils.GTable(gaia_tmp).match_to_catalog_sky(cat)
gaia_match = (dr.value < 0.5)
gaia_match &= (gaia_tmp['phot_g_mean_mag'][idx] < 20)
gaia_match &= (cat['mag_auto'] < 17.5)
selection |= gaia_match
except:
print('## Include GAIA stars in spike mask - failed')
pass
# Note: very bright stars could still be saturated and the spikes
# might not be big enough given their catalog mag
msg = '\n### mask_spikes: {0} stars\n\n'.format(selection.sum())
utils.log_comment(utils.LOGFILE, msg, show_date=True,
verbose=True)
if selection.sum() > 0:
for visit in visits:
filt = visit['product'].split('-')[-1]
if filt[:2] in ['f0', 'f1']:
mask_IR_psf_spikes(visit=visit, selection=selection,
cat=cat, minR=8, dy=5)
# Remake mosaics
drizzle_overlaps(root, filters=mosaic_args['ir_filters'],
min_nexp=1,
pixfrac=mosaic_pixfrac,
make_combined=False,
ref_image=wcs_ref_file,
include_saturated=fix_stars,
**mosaic_drizzle_args)
make_filter_combinations(root, weight_fnu=True, min_count=1,
filter_combinations={'ir': IR_M_FILTERS+IR_W_FILTERS})
# More IR filter combinations for mosaics
if False:
extra_combinations = {'h': ['F140W', 'F160W'],
'yj': ['F098M', 'F105W', 'F110W', 'F125W']}
make_filter_combinations(root, weight_fnu=True, min_count=2,
filter_combinations=extra_combinations)
# Optical filters
mosaics = glob.glob('{0}-ir_dr?_sci.fits'.format(root))
if (mosaic_args['half_optical_pixscale']): # & (len(mosaics) > 0):
# Drizzle optical images to half the pixel scale determined for
# the IR mosaics. The optical mosaics can be 2x2 block averaged
# to match the IR images.
ref = pyfits.open('{0}_wcs-ref.fits'.format(root))
try:
h = ref[1].header.copy()
_ = h['CRPIX1']
except:
h = ref[0].header.copy()
for k in ['NAXIS1', 'NAXIS2', 'CRPIX1', 'CRPIX2']:
h[k] *= 2
h['CRPIX1'] -= 0.5
h['CRPIX2'] -= 0.5
for k in ['CD1_1', 'CD2_2']:
h[k] /= 2
wcs_ref_optical = '{0}-opt_wcs-ref.fits'.format(root)
data = np.zeros((h['NAXIS2'], h['NAXIS1']), dtype=np.int16)
pyfits.writeto(wcs_ref_optical, header=h, data=data, overwrite=True)
else:
wcs_ref_optical = wcs_ref_file
if len(mosaics) == 0:
# Call a single combined mosaic "ir" for detection catalogs, etc.
make_combined_label = 'ir'
else:
# Make a separate optical combined image
make_combined_label = 'opt'
drizzle_overlaps(root, filters=mosaic_args['optical_filters'],
pixfrac=mosaic_pixfrac, make_combined=False,
ref_image=wcs_ref_optical,
min_nexp=1+skip_single_optical_visits*1,
multi_driz_cr=(mosaic_driz_cr_type & 4) > 0,
filter_driz_cr=(mosaic_driz_cr_type & 8) > 0,
**mosaic_drizzle_args)
make_filter_combinations(root, weight_fnu=True, min_count=1,
filter_combinations={make_combined_label: OPT_M_FILTERS+OPT_W_FILTERS})
# Fill IR filter mosaics with scaled combined data so they can be used
# as grism reference
fill_mosaics = mosaic_args['fill_mosaics']
if fill_mosaics:
if fill_mosaics == 'grism':
# Only fill mosaics if grism filters exist
has_grism = utils.column_string_operation(info['FILTER'],
['G141', 'G102', 'G800L'],
'count', 'or').sum() > 0
if has_grism:
fill_filter_mosaics(root)
else:
fill_filter_mosaics(root)
# Remove the WCS reference files
for file in [wcs_ref_optical, wcs_ref_file]:
if os.path.exists(file):
os.remove(file)
def make_mosaic_footprints(field_root):
"""
Make region files where wht images nonzero
"""
import matplotlib.pyplot as plt
files = glob.glob('{0}-f*dr?_wht.fits'.format(field_root))
files.sort()
fp = open('{0}_mosaic.reg'.format(field_root), 'w')
fp.write('fk5\n')
fp.close()
for weight_image in files:
filt = weight_image.split('_dr')[0].split('-')[-1]
wave = filt[1:4]
if wave[0] in '01':
w = float(wave)*10
else:
w = float(wave)
wint = np.clip(np.interp(np.log10(w/800), [-0.3, 0.3], [0, 1]), 0, 1)
print(filt, w, wint)
clr = utils.RGBtoHex(plt.cm.Spectral_r(wint))
#plt.scatter([0],[0], color=clr, label=filt)
reg = prep.drizzle_footprint(weight_image, shrink=10, ext=0, outfile=None, label=filt) + ' color={0}\n'.format(clr)
fp = open('{0}_mosaic.reg'.format(field_root), 'a')
fp.write(reg)
fp.close()
def fill_filter_mosaics(field_root):
"""
Fill field mosaics with the average value taken from other filters so that all images have the same coverage
Parameters
----------
field_root : str
"""
import glob
import os
import scipy.ndimage as nd
import astropy.io.fits as pyfits
mosaic_files = glob.glob('{0}-ir_dr?_sci.fits'.format(field_root))
mosaic_files += glob.glob('{0}-opt_dr?_sci.fits'.format(field_root))
if len(mosaic_files) == 0:
return False
ir = pyfits.open(mosaic_files[0])
filter_files = glob.glob('{0}-f[01]*sci.fits'.format(field_root))
# If not IR filters, try optical
if len(filter_files) == 0:
filter_files = glob.glob('{0}-f[5-8]*sci.fits'.format(field_root))
for file in filter_files:
print(file)
sci = pyfits.open(file, mode='update')
wht = pyfits.open(file.replace('sci', 'wht'))
mask = wht[0].data == 0
scale = ir[0].header['PHOTFLAM']/sci[0].header['PHOTFLAM']
sci[0].data[mask] = ir[0].data[mask]*scale
sci.flush()
# Fill empty parts of IR mosaic with optical if both available
if len(mosaic_files) == 2:
print('Fill -ir- mosaic with -opt-')
ir_sci = pyfits.open(mosaic_files[0], mode='update')
ir_wht = pyfits.open(mosaic_files[0].replace('sci', 'wht'),
mode='update')
opt_sci = pyfits.open(mosaic_files[1])
opt_wht = pyfits.open(mosaic_files[1].replace('sci', 'wht'))
opt_sci_data = opt_sci[0].data
opt_wht_data = opt_wht[0].data
if opt_sci_data.shape[0] == 2*ir_wht[0].data.shape[0]:
# Half pixel scale
kern = np.ones((2, 2))
num = nd.convolve(opt_sci_data*opt_wht_data, kern)[::2, ::2]
den = nd.convolve(opt_wht_data, kern)[::2, ::2]
opt_sci_data = num/den
opt_sci_data[den <= 0] = 0
opt_wht_data = den
mask = ir_wht[0].data == 0
scale = opt_sci[0].header['PHOTFLAM']/ir_sci[0].header['PHOTFLAM']
ir_sci[0].data[mask] = opt_sci_data[mask]*scale
ir_wht[0].data[mask] = opt_wht_data[mask]/scale**2
ir_sci.flush()
ir_wht.flush()
return True
######################
# Objective function for catalog shifts
def _objfun_align(p0, tab, ref_tab, ref_err, shift_only, ret):
#from grizli.utils import transform_wcs
from scipy.special import huber
from scipy.stats import t as student
from scipy.stats import norm
import numpy as np
import matplotlib.pyplot as plt
from ..utils import transform_wcs
N = len(tab)
trans = np.reshape(p0, (N, -1)) # /10.
#trans[0,:] = [0,0,0,1]
sh = trans.shape
if sh[1] == 2:
# Shift only
pscl = np.array([10., 10.])
trans = np.hstack([trans/pscl, np.zeros((N, 1)), np.ones((N, 1))])
elif sh[1] == 3:
# Shift + rot
pscl = np.array([10., 10., 100.])
trans = np.hstack([trans/pscl, np.ones((N, 1))])
elif sh[1] == 4:
# Shift + rot + scale
pscl = np.array([10., 10., 100., 100])
trans = trans/pscl
print(trans)
#N = trans.shape[0]
trans_wcs = {}
trans_rd = {}
for ix, i in enumerate(tab):
if (ref_err > 0.1) & (ix == 0):
trans_wcs[i] = transform_wcs(tab[i]['wcs'], translation=[0, 0], rotation=0., scale=1.)
trans_rd[i] = trans_wcs[i].all_pix2world(tab[i]['xy'], 1)
else:
trans_wcs[i] = transform_wcs(tab[i]['wcs'], translation=list(trans[ix, :2]), rotation=trans[ix, 2]/180*np.pi, scale=trans[ix, 3])
trans_rd[i] = trans_wcs[i].all_pix2world(tab[i]['xy'], 1)
# Cosine declination factor
cosd = np.cos(np.median(trans_rd[i][:, 1]/180*np.pi))
if ret == 'field':
for ix, i in enumerate(tab):
print(tab[i]['wcs'])
plt.scatter(trans_rd[i][:, 0], trans_rd[i][:, 1], alpha=0.8, marker='x')
continue
for m in tab[i]['match_idx']:
ix, jx = tab[i]['match_idx'][m]
if m < 0:
continue
else:
# continue
dx_i = (trans_rd[i][ix, 0] - trans_rd[m][jx, 0])*3600.*cosd
dy_i = (trans_rd[i][ix, 1] - trans_rd[m][jx, 1])*3600.
for j in range(len(ix)):
if j == 0:
p = plt.plot(trans_rd[i][j, 0]+np.array([0, dx_i[j]/60.]), trans_rd[i][j, 1]+np.array([0, dy_i[j]/60.]), alpha=0.8)
c = p[0].get_color()
else:
p = plt.plot(trans_rd[i][j, 0]+np.array([0, dx_i[j]/60.]), trans_rd[i][j, 1]+np.array([0, dy_i[j]/60.]), alpha=0.8, color=c)
return True
trans_wcs = {}
trans_rd = {}
for ix, i in enumerate(tab):
trans_wcs[i] = transform_wcs(tab[i]['wcs'],
translation=list(trans[ix, :2]),
rotation=trans[ix, 2]/180*np.pi,
scale=trans[ix, 3])
trans_rd[i] = trans_wcs[i].all_pix2world(tab[i]['xy'], 1)
dx, dy = [], []
for i in tab:
mcount = 0
for m in tab[i]['match_idx']:
ix, jx = tab[i]['match_idx'][m]
if m < 0:
continue
else:
# continue
dx_i = (trans_rd[i][ix, 0] - trans_rd[m][jx, 0])*3600.*cosd
dy_i = (trans_rd[i][ix, 1] - trans_rd[m][jx, 1])*3600.
mcount += len(dx_i)
dx.append(dx_i/0.01)
dy.append(dy_i/0.01)
if ret == 'plot':
plt.gca().scatter(dx_i, dy_i, marker='.', alpha=0.1)
# Reference sources
if -1 in tab[i]['match_idx']:
m = -1
ix, jx = tab[i]['match_idx'][m]
dx_i = (trans_rd[i][ix, 0] - tab[i]['ref_tab']['ra'][jx])*3600.*cosd
dy_i = (trans_rd[i][ix, 1] - tab[i]['ref_tab']['dec'][jx])*3600.
rcount = len(dx_i)
mcount = np.maximum(mcount, 1)
rcount = np.maximum(rcount, 1)
dx.append(dx_i/(ref_err/np.clip(mcount/rcount, 1, 1000)))
dy.append(dy_i/(ref_err/np.clip(mcount/rcount, 1, 1000)))
if ret.startswith('plotx') & (ref_err < 0.1):
plt.gca().scatter(dx_i, dy_i, marker='+', color='k', alpha=0.3, zorder=1000)
# Residuals
dr = np.sqrt(np.hstack(dx)**2+np.hstack(dy)**2)
if ret == 'huber': # Minimize Huber loss function
loss = huber(1, dr).sum()*2
return loss
elif ret == 'student': # student-t log prob (maximize)
df = 2.5 # more power in wings than normal
lnp = student.logpdf(dr, df, loc=0, scale=1).sum()
return lnp
else: # Normal log prob (maximize)
lnp = norm.logpdf(dr, loc=0, scale=1).sum()
return lnp
def get_rgb_filters(filter_list, force_ir=False, pure_sort=False):
"""
Compute which filters to use to make an RGB cutout
Parameters
----------
filter_list : list
All available filters
force_ir : bool
Only use IR filters.
pure_sort : bool
Don't use preference for red filters, just use order they appear
Returns
-------
rgb_filt : [r, g, b]
List of filters to use
"""
from collections import OrderedDict
# Sort by wavelength
for_sort = OrderedDict()
use_filters = []
ir_filters = []
# Preferred combinations
filter_list_lower = [f.lower() for f in filter_list]
rpref = ['h', 'f160w', 'f140w']
gpref = ['j', 'yj', 'f125w', 'f110w', 'f105w', 'f098m']
bpref = ['opt', 'visr', 'visb', 'f814w', 'f814wu', 'f606w', 'f606wu' 'f775w', 'f850lp', 'f435w']
pref_list = [None, None, None]
has_pref = 0
for i, pref in enumerate([rpref, gpref, bpref]):
for f in pref:
if f in filter_list_lower:
pref_list[i] = f
has_pref += 1
break
if has_pref == 3:
print('Use preferred r/g/b combination: {0}'.format(pref_list))
return pref_list
for f in filter_list:
if f == 'ir':
continue
elif f == 'opt':
continue
if f == 'uv':
val = 'f0300'
elif f == 'visb':
val = 'f0435'
elif f == 'visr':
val = 'f0814'
elif f == 'y':
val = 'f1000'
elif f == 'yj':
val = 'f1100'
elif f == 'j':
val = 'f1250'
elif f == 'h':
val = 'f1500'
elif f[1] in '01':
val = f[:4]+'0'
else:
val = 'f0'+f[1:4]
# Red filters (>6000)
if val > 'f07':
if (val >= 'v09') & (force_ir):
ir_filters.append(f)
use_filters.append(f)
for_sort[f] = val
pop_indices = []
joined = {'uv': '23', 'visb': '45', 'visr': '678',
'y': ['f098m', 'f105w'],
'j': ['f110w', 'f125w'],
'h': ['f140w', 'f160w']}
for j in joined:
if j in use_filters:
indices = []
for f in use_filters:
if f in joined:
continue
if j in 'yjh':
if f in joined[j]:
indices.append(use_filters.index(f))
else:
if f[1] in joined[j]:
indices.append(use_filters.index(f))
if len(indices) == len(use_filters)-1:
# All filters are in a given group so pop the group
pop_indices.append(use_filters.index(j))
else:
pop_indices.extend(indices)
pop_indices.sort()
for i in pop_indices[::-1]:
filt_i = use_filters.pop(i)
for_sort.pop(filt_i)
# Only one filter
if len(use_filters) == 1:
f = use_filters[0]
return [f, f, f]
if len(filter_list) == 1:
f = filter_list[0]
return [f, f, f]
if (len(use_filters) == 0) & (len(filter_list) > 0):
so = np.argsort(filter_list)
f = filter_list[so[-1]]
return [f, f, f]
# Preference for red filters
if (len(ir_filters) >= 3) & (not pure_sort):
use_filters = ir_filters
for k in list(for_sort.keys()):
if k not in ir_filters:
p = for_sort.pop(k)
so = np.argsort(list(for_sort.values()))
waves = np.cast[float]([for_sort[f][1:] for f in for_sort])
# Reddest
rfilt = use_filters[so[-1]]
# Bluest
bfilt = use_filters[so[0]]
if len(use_filters) == 2:
return [rfilt, 'sum', bfilt]
elif len(use_filters) == 3:
gfilt = use_filters[so[1]]
return [rfilt, gfilt, bfilt]
else:
# Closest to average wavelength
mean = np.mean([waves.max(), waves.min()])
ix_g = np.argmin(np.abs(waves-mean))
gfilt = use_filters[ix_g]
return [rfilt, gfilt, bfilt]
TICKPARAMS = dict(axis='both', colors='w', which='both')
def field_rgb(root='j010514+021532', xsize=6, output_dpi=None, HOME_PATH='./', show_ir=True, pl=1, pf=1, scl=1, scale_ab=None, rgb_scl=[1, 1, 1], ds9=None, force_ir=False, filters=None, add_labels=True, output_format='jpg', rgb_min=-0.01, xyslice=None, pure_sort=False, verbose=True, force_rgb=None, suffix='.field', mask_empty=False, tick_interval=60, timestamp=False, mw_ebv=0, use_background=False, tickparams=TICKPARAMS, fill_black=False, ref_spectrum=None, gzext='', full_dimensions=False, invert=False):
"""
RGB image of the field mosaics
"""
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
#import montage_wrapper
from astropy.visualization import make_lupton_rgb
try:
from .. import utils
except:
from grizli import utils
if HOME_PATH is not None:
phot_file = '{0}/{1}/Prep/{1}_phot.fits'.format(HOME_PATH, root)
if not os.path.exists(phot_file):
print('Photometry file {0} not found.'.format(phot_file))
return False
phot = utils.GTable.gread(phot_file)
sci_files = glob.glob('{0}/{1}/Prep/{1}-[ofuvyjh]*sci.fits{2}'.format(HOME_PATH, root, gzext))
PATH_TO = '{0}/{1}/Prep'.format(HOME_PATH, root)
else:
PATH_TO = './'
sci_files = glob.glob('./{1}-[fuvyjho]*sci.fits{2}'.format(PATH_TO, root, gzext))
print('PATH: {0}, files:{1}'.format(PATH_TO, sci_files))
if filters is None:
filters = [file.split('_')[-3].split('-')[-1] for file in sci_files]
if show_ir:
filters += ['ir']
#mag_auto = 23.9-2.5*np.log10(phot['flux_auto'])
ims = {}
for f in filters:
try:
img = glob.glob('{0}/{1}-{2}_dr?_sci.fits{3}'.format(PATH_TO, root, f, gzext))[0]
except:
print('Failed: {0}/{1}-{2}_dr?_sci.fits{3}'.format(PATH_TO, root, f, gzext))
try:
ims[f] = pyfits.open(img)
if 'IMGMED' in ims[f][0].header:
imgmed = ims[f][0].header['IMGMED']
ims[f][0].data -= imgmed
else:
imgmed = 0
bkg_file = img.split('_dr')[0]+'_bkg.fits'
if use_background & os.path.exists(bkg_file):
print('Subtract background: '+bkg_file)
bkg = pyfits.open(bkg_file)
ims[f][0].data -= bkg[0].data - imgmed
except:
continue
filters = list(ims.keys())
wcs = pywcs.WCS(ims[filters[-1]][0].header)
pscale = utils.get_wcs_pscale(wcs)
minor = MultipleLocator(tick_interval/pscale)
if force_rgb is None:
rf, gf, bf = get_rgb_filters(filters, force_ir=force_ir, pure_sort=pure_sort)
else:
rf, gf, bf = force_rgb
logstr = '# field_rgb {0}: r {1} / g {2} / b {3}'.format(root, rf, gf, bf)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
#pf = 1
#pl = 1
if scale_ab is not None:
zp_r = utils.calc_header_zeropoint(ims[rf], ext=0)
scl = 10**(-0.4*(zp_r-5-scale_ab))
scl *= (0.06/pscale)**2
if mw_ebv > 0:
MW_F99 = utils.MW_F99(mw_ebv*utils.MW_RV, r_v=utils.MW_RV)
else:
MW_F99 = None
rimg = ims[rf][0].data * (ims[rf][0].header['PHOTFLAM']/5.e-20)**pf * (ims[rf][0].header['PHOTPLAM']/1.e4)**pl*scl*rgb_scl[0]
if MW_F99 is not None:
rmw = 10**(0.4*(MW_F99(np.array([ims[rf][0].header['PHOTPLAM']]))))[0]
print('MW_EBV={0:.3f}, {1}: {2:.2f}'.format(mw_ebv, rf, rmw))
rimg *= rmw
if bf == 'sum':
bimg = rimg
else:
bimg = ims[bf][0].data * (ims[bf][0].header['PHOTFLAM']/5.e-20)**pf * (ims[bf][0].header['PHOTPLAM']/1.e4)**pl*scl*rgb_scl[2]
if MW_F99 is not None:
bmw = 10**(0.4*(MW_F99(np.array([ims[bf][0].header['PHOTPLAM']]))))[0]
print('MW_EBV={0:.3f}, {1}: {2:.2f}'.format(mw_ebv, bf, bmw))
bimg *= bmw
# Double-acs
if bimg.shape != rimg.shape:
import scipy.ndimage as nd
kern = np.ones((2, 2))
bimg = nd.convolve(bimg, kern)[::2, ::2]
if gf == 'sum':
gimg = (rimg+bimg)/2.
else:
gscl = (ims[gf][0].header['PHOTFLAM']/5.e-20)**pf
gscl *= (ims[gf][0].header['PHOTPLAM']/1.e4)**pl
gimg = ims[gf][0].data * gscl * scl * rgb_scl[1] # * 1.5
if MW_F99 is not None:
gmw = 10**(0.4*(MW_F99(np.array([ims[gf][0].header['PHOTPLAM']]))))[0]
print('MW_EBV={0:.3f}, {1}: {2:.2f}'.format(mw_ebv, gf, gmw))
gimg *= gmw
rmsk = rimg == 0
gmsk = gimg == 0
bmsk = bimg == 0
if gimg.shape != rimg.shape:
import scipy.ndimage as nd
kern = np.ones((2, 2))
gimg = nd.convolve(gimg, kern)[::2, ::2]
gmsk = gmsk[::2,::2]
# Scale by reference synphot spectrum
if ref_spectrum is not None:
import pysynphot as S
try:
_obsm = [utils.get_filter_obsmode(filter=_f) for _f in [rf, gf, bf]]
_bp = [S.ObsBandpass(_m) for _m in _obsm]
_bpf = [ref_spectrum.integrate_filter(_b)/_b.pivot()**2 for _b in _bp]
gimg *= _bpf[0]/_bpf[1]
bimg *= _bpf[0]/_bpf[2]
print('ref_spectrum supplied: {0}*{1:.2f} {2}*{3:.2f}'.format(gf, _bpf[0]/_bpf[1], bf, _bpf[0]/_bpf[2]))
except:
pass
if mask_empty:
mask = rmsk | gmsk | bmsk
print('Mask empty pixels in any channel: {0}'.format(mask.sum()))
rimg[mask] = 0
gimg[mask] = 0
bimg[mask] = 0
if ds9:
ds9.set('rgb')
ds9.set('rgb channel red')
wcs_header = utils.to_header(pywcs.WCS(ims[rf][0].header))
ds9.view(rimg, header=wcs_header)
ds9.set_defaults()
ds9.set('cmap value 9.75 0.8455')
ds9.set('rgb channel green')
ds9.view(gimg, wcs_header)
ds9.set_defaults()
ds9.set('cmap value 9.75 0.8455')
ds9.set('rgb channel blue')
ds9.view(bimg, wcs_header)
ds9.set_defaults()
ds9.set('cmap value 9.75 0.8455')
ds9.set('rgb channel red')
ds9.set('rgb lock colorbar')
return False
xsl = ysl = None
if show_ir & (not full_dimensions):
# Show only area where IR is available
yp, xp = np.indices(ims[rf][0].data.shape)
wht = pyfits.open(ims[rf].filename().replace('_sci', '_wht'))
mask = wht[0].data > 0
xsl = slice(xp[mask].min(), xp[mask].max())
ysl = slice(yp[mask].min(), yp[mask].max())
rimg = rimg[ysl, xsl]
bimg = bimg[ysl, xsl]
gimg = gimg[ysl, xsl]
if fill_black:
rmsk = rmsk[ysl, xsl]
gmsk = gmsk[ysl, xsl]
bmsk = bmsk[ysl, xsl]
else:
if xyslice is not None:
xsl, ysl = xyslice
rimg = rimg[ysl, xsl]
bimg = bimg[ysl, xsl]
gimg = gimg[ysl, xsl]
if fill_black:
rmsk = rmsk[ysl, xsl]
gmsk = gmsk[ysl, xsl]
bmsk = bmsk[ysl, xsl]
image = make_lupton_rgb(rimg, gimg, bimg, stretch=0.1, minimum=rgb_min)
if invert:
image = 255-image
if fill_black:
image[rmsk,0] = 0
image[gmsk,1] = 0
image[bmsk,2] = 0
sh = image.shape
ny, nx, _ = sh
if full_dimensions:
dpi = int(nx/xsize)
xsize = nx/dpi
print('xsize: ', xsize, ny, nx, dpi)
elif (output_dpi is not None):
xsize = nx/output_dpi
dim = [xsize, xsize/nx*ny]
fig, ax = plt.subplots(1,1,figsize=dim)
ax.imshow(image, origin='lower', extent=(-nx/2, nx/2, -ny/2, ny/2))
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.xaxis.set_major_locator(minor)
ax.yaxis.set_major_locator(minor)
#ax.tick_params(axis='x', colors='w', which='both')
#ax.tick_params(axis='y', colors='w', which='both')
if tickparams:
ax.tick_params(**tickparams)
if add_labels:
ax.text(0.03, 0.97, root, bbox=dict(facecolor='w', alpha=0.8), size=10, ha='left', va='top', transform=ax.transAxes)
ax.text(0.06+0.08*2, 0.02, rf, color='r', bbox=dict(facecolor='w', alpha=1), size=8, ha='center', va='bottom', transform=ax.transAxes)
ax.text(0.06+0.08, 0.02, gf, color='g', bbox=dict(facecolor='w', alpha=1), size=8, ha='center', va='bottom', transform=ax.transAxes)
ax.text(0.06, 0.02, bf, color='b', bbox=dict(facecolor='w', alpha=1), size=8, ha='center', va='bottom', transform=ax.transAxes)
if timestamp:
fig.text(0.97, 0.03, time.ctime(), ha='right', va='bottom', fontsize=5, transform=fig.transFigure, color='w')
if full_dimensions:
ax.axis('off')
fig.tight_layout(pad=0)
dpi = int(nx/xsize/full_dimensions)
fig.savefig('{0}{1}.{2}'.format(root, suffix, output_format), dpi=dpi)
else:
fig.tight_layout(pad=0.1)
fig.savefig('{0}{1}.{2}'.format(root, suffix, output_format))
return xsl, ysl, (rf, gf, bf), fig
#########
THUMB_RGB_PARAMS = {'xsize': 4,
'output_dpi': None,
'rgb_min': -0.01,
'add_labels': False,
'output_format': 'png',
'show_ir': False,
'scl': 2,
'suffix': '.rgb',
'mask_empty': False,
'tick_interval': 1,
'pl': 1, # 1 for f_lambda, 2 for f_nu
}
DRIZZLER_ARGS = {'aws_bucket': False,
'scale_ab': 21.5,
'subtract_median': False,
'theta': 0.,
'pixscale': 0.1,
'pixfrac': 0.33,
'kernel': 'square',
'half_optical_pixscale': True,
'filters': ['f160w', 'f814w', 'f140w', 'f125w', 'f105w',
'f110w', 'f098m', 'f850lp', 'f775w', 'f606w',
'f475w', 'f555w', 'f600lp', 'f390w', 'f350lp'],
'size': 3,
'thumb_height': 1.5,
'rgb_params': THUMB_RGB_PARAMS,
'remove': False,
'include_ir_psf': True,
'combine_similar_filters': False,
'single_output': True}
def make_rgb_thumbnails(root='j140814+565638', ids=None, maglim=21,
drizzler_args=DRIZZLER_ARGS, use_line_wcs=False,
remove_fits=False, skip=True, min_filters=2,
auto_size=False, size_limits=[4, 15], mag=None,
make_segmentation_figure=True):
"""
Make RGB thumbnails in working directory
"""
import matplotlib.pyplot as plt
import astropy.wcs as pywcs
from grizli.aws import aws_drizzler
phot_cat = glob.glob('../Prep/{0}_phot.fits'.format(root))[0]
cat = utils.read_catalog(phot_cat)
if make_segmentation_figure:
plt.ioff()
seg_files = glob.glob('../*/{0}*seg.fits*'.format(root))
if len(seg_files) == 0:
make_segmentation_figure = False
else:
seg = pyfits.open(seg_files[0])
seg_data = seg[0].data
seg_wcs = pywcs.WCS(seg[0].header)
# Randomize seg to get dispersion between neighboring objects
np.random.seed(hash(root) % (10 ** 8))
rnd_ids = np.append([0], np.argsort(np.random.rand(len(cat)))+1)
#rnd_seg = rnd_ids[seg[0].data]
#phot_xy = seg_wcs.all_world2pix(cat['ra'], cat['dec'], 0)
# Count filters
num_filters = 0
for k in cat.meta:
if k.startswith('F') & k.endswith('uJy2dn'):
num_filters += 1
if min_filters > num_filters:
print('# make_rgb_thumbnails: only {0} filters found'.format(num_filters))
return False
if mag is None:
auto_mag = 23.9-2.5*np.log10(cat['flux_auto']*cat['tot_corr'])
# More like surface brightness
try:
mag = 23.9-2.5*np.log10(cat['flux_aper_2'])
mag[~np.isfinite(mag)] = auto_mag[~np.isfinite(mag)]
except:
mag = auto_mag
pixel_scale = cat.meta['ASEC_0']/cat.meta['APER_0']
sx = (cat['xmax']-cat['xmin'])*pixel_scale
sy = (cat['ymax']-cat['ymin'])*pixel_scale
#lim_mag = 23.9-2.5*np.log10(200*np.percentile(cat['fluxerr_aper_4'], 50))
#print('limiting mag: ', lim_mag)
lim_mag = 22.8
extracted_ids = False
if ids is None:
ids = cat['id'][mag < maglim]
elif ids == 'extracted':
extracted_ids = True
# Make thumbnails for extracted objects
beams_files = glob.glob('../Extractions/*beams.fits')
if len(beams_files) == 0:
return False
beams_files.sort()
ids = [int(os.path.basename(file).split('_')[-1].split('.beams')[0]) for file in beams_files]
for id_column in ['id', 'number']:
if id_column in cat.colnames:
break
args = drizzler_args.copy()
N = len(ids)
for i, id in enumerate(ids):
ix = cat[id_column] == id
label = '{0}_{1:05d}'.format(root, id)
thumb_files = glob.glob('../*/{0}.thumb.png'.format(label))
if (skip) & (len(thumb_files) > 0):
print('\n##\n## RGB thumbnail {0} ({1}/{2})\n##'.format(label, i+1, N))
continue
args['scale_ab'] = np.clip(mag[ix][0]-1, 17, lim_mag)
# Use drizzled line image for WCS?
if use_line_wcs:
line_file = glob.glob('../Extractions/{0}.full.fits'.format(label))
# Reset
if 'wcs' in args:
args.pop('wcs')
for k in ['pixfrac', 'kernel']:
if k in drizzler_args:
args[k] = drizzler_args[k]
# Find line extrension
msg = '\n# Use WCS from {0}[{1},{2}] (pixfrac={3:.2f}, kernel={4})'
if len(line_file) > 0:
full = pyfits.open(line_file[0])
for ext in full:
if 'EXTNAME' in ext.header:
if ext.header['EXTNAME'] == 'LINE':
try:
wcs = pywcs.WCS(ext.header)
args['wcs'] = wcs
args['pixfrac'] = ext.header['PIXFRAC']
args['kernel'] = ext.header['DRIZKRNL']
print(msg.format(line_file[0],
ext.header['EXTNAME'],
ext.header['EXTVER'], args['pixfrac'],
args['kernel']))
except:
pass
break
if (auto_size) & ('wcs' not in args):
s_i = np.maximum(sx[ix][0], sy[ix][0])
args['size'] = np.ceil(np.clip(s_i,
size_limits[0], size_limits[1]))
print('\n##\n## RGB thumbnail {0} *size={3}* ({1}/{2})\n##'.format(label, i+1, N, args['size']))
else:
print('\n##\n## RGB thumbnail {0} ({1}/{2})\n##'.format(label, i+1, N))
aws_drizzler.drizzle_images(label=label,
ra=cat['ra'][ix][0], dec=cat['dec'][ix][0],
master='local', single_output=True,
make_segmentation_figure=False, **args)
files = glob.glob('{0}.thumb.fits'.format(label))
blot_seg = None
if (make_segmentation_figure) & (len(files) > 0):
th = pyfits.open(files[0], mode='update')
th_wcs = pywcs.WCS(th[0].header)
blot_seg = utils.blot_nearest_exact(seg_data, seg_wcs, th_wcs,
stepsize=-1, scale_by_pixel_area=False)
rnd_seg = rnd_ids[np.cast[int](blot_seg)]*1.
th_ids = np.unique(blot_seg)
sh = th[0].data.shape
yp, xp = np.indices(sh)
thumb_height = 2.
fig = plt.figure(figsize=[thumb_height*sh[1]/sh[0], thumb_height])
ax = fig.add_subplot(111)
rnd_seg[rnd_seg == 0] = np.nan
ax.imshow(rnd_seg, aspect='equal', cmap='terrain_r',
vmin=-0.05*len(cat), vmax=1.05*len(cat))
ax.set_xticklabels([])
ax.set_yticklabels([])
ix = utils.column_values_in_list(cat['number'], th_ids)
xc, yc = th_wcs.all_world2pix(cat['ra'][ix], cat['dec'][ix], 0)
xc = np.clip(xc, 0.09*sh[1], 0.91*sh[1])
yc = np.clip(yc, 0.08*sh[0], 0.92*sh[0])
for th_id, x_i, y_i in zip(cat['number'][ix], xc, yc):
if th_id == 0:
continue
ax.text(x_i, y_i, '{0:.0f}'.format(th_id), ha='center', va='center', fontsize=8, color='w')
ax.text(x_i, y_i, '{0:.0f}'.format(th_id), ha='center', va='center', fontsize=8, color='k', alpha=0.95)
ax.set_xlim(0, sh[1]-1)
ax.set_ylim(0, sh[0]-1)
ax.set_axis_off()
fig.tight_layout(pad=0.01)
fig.savefig('{0}.seg.png'.format(label))
plt.close(fig)
# Append to thumbs file
seg_hdu = pyfits.ImageHDU(data=np.cast[int](blot_seg), name='SEG')
th.append(seg_hdu)
th.writeto('{0}.thumb.fits'.format(label), overwrite=True,
output_verify='fix')
th.close()
if remove_fits > 0:
files = glob.glob('{0}*_dr[cz]*fits'.format(label))
for file in files:
os.remove(file)
def field_psf(root='j020924-044344', PREP_PATH='../Prep', RAW_PATH='../RAW', EXTRACT_PATH='../Extractions', factors=[1, 2, 4], get_drizzle_scale=True, subsample=256, size=6, get_line_maps=False, raise_fault=False, verbose=True, psf_filters=['F098M', 'F110W', 'F105W', 'F125W', 'F140W', 'F160W'], skip=False, make_fits=True, **kwargs):
"""
Generate PSFs for the available filters in a given field
"""
import os
import glob
import astropy.wcs as pywcs
import astropy.io.fits as pyfits
try:
from .. import utils
from ..galfit import psf as gpsf
except:
from grizli import utils
from grizli.galfit import psf as gpsf
os.chdir(PREP_PATH)
drz_str = '{0}-ir_dr?_sci.fits'.format(root)
drz_file = glob.glob(drz_str)
if len(drz_file) == 0:
err = f'Reference file {drz_str} not found.'
if raise_fault:
raise FileNotFoundError(err)
else:
print(err)
return False
else:
drz_file = drz_file[0]
scale = []
pixfrac = []
kernel = []
labels = []
# For the line maps
if get_line_maps:
args_file = os.path.join(EXTRACT_PATH, f'{root}_fit_args.npy')
if not os.path.exists(args_file):
err = 'fit_args.npy not found.'
if raise_fault:
raise FileNotFoundError(err)
else:
print(err)
return False
default = DITHERED_PLINE
# Parameters of the line maps
args = np.load(args_file, allow_pickle=True)[0]
# Line images
pline = args['pline']
for factor in factors:
if 'pixscale' in pline:
scale.append(pline['pixscale']/factor)
else:
scale.append(default['pixscale']/factor)
if 'pixfrac' in pline:
pixfrac.append(pline['pixfrac'])
else:
pixfrac.append(default['pixfrac'])
if 'kernel' in pline:
kernel.append(pline['kernel'])
else:
kernel.append(default['kernel'])
labels.append('LINE{0}'.format(factor))
# Mosaic
im = pyfits.open(drz_file)
drz_wcs = pywcs.WCS(im[0].header)
pscale = utils.get_wcs_pscale(drz_wcs)
sh = im[0].data.shape
if get_drizzle_scale:
rounded = int(np.round(im[0].header['D001SCAL']*1000))/1000.
for factor in factors:
scale.append(rounded/factor)
labels.append('DRIZ{0}'.format(factor))
kernel.append(im[0].header['D001KERN'])
pixfrac.append(im[0].header['D001PIXF'])
# FITS info
visits_file = '{0}_visits.npy'.format(root)
if not os.path.exists(visits_file):
parse_visits(field_root=root, RAW_PATH=RAW_PATH)
visits, groups, info = np.load(visits_file, allow_pickle=True)
# Append "U" to UVIS filters in info
if 'DETECTOR' in info.colnames:
uvis = np.where(info['DETECTOR'] == 'UVIS')[0]
filters = [f for f in info['FILTER']]
for i in uvis:
filters[i] += 'U'
info['FILTER'] = filters
# Average PSF
xp, yp = np.meshgrid(np.arange(0, sh[1], subsample),
np.arange(0, sh[0], subsample))
ra, dec = drz_wcs.all_pix2world(xp, yp, 0)
# Ref images
files = glob.glob('{0}-f[0-9]*sci.fits'.format(root))
if verbose:
print(' ')
hdus = []
for file in files:
filter = file.split(root+'-')[1].split('_')[0]
if filter.upper() not in psf_filters:
continue
if (os.path.exists('{0}-{1}_psf.fits'.format(root, filter))) & skip:
continue
flt_files = list(info['FILE'][info['FILTER'] == filter.upper()])
if len(flt_files) == 0:
# Try to use HDRTAB in drizzled image
flt_files = None
driz_image = file
else:
driz_image = drz_file
driz_hdu = pyfits.open(file)
GP = gpsf.DrizzlePSF(flt_files=flt_files, info=None,
driz_image=driz_image)
hdu = pyfits.HDUList([pyfits.PrimaryHDU()])
hdu[0].header['ROOT'] = root
for scl, pf, kern_i, label in zip(scale, pixfrac, kernel, labels):
ix = 0
psf_f = None
if pf == 0:
kern = 'point'
else:
kern = kern_i
logstr = '# psf {0} {5:6} / {1:.3f}" / pixf: {2} / {3:8} / {4}'
logstr = logstr.format(root, scl, pf, kern, filter, label)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
for ri, di in zip(ra.flatten(), dec.flatten()):
slice_h, wcs_slice = utils.make_wcsheader(ra=ri, dec=di,
size=size, pixscale=scl,
get_hdu=False, theta=0)
# Filters with extended profiles
irext = ['F098M', 'F110W', 'F105W', 'F125W', 'F140W', 'F160W']
get_extended = filter.upper() in irext
try:
psf_i = GP.get_psf(ra=ri, dec=di, filter=filter.upper(),
pixfrac=pf, kernel=kern, verbose=False,
wcs_slice=wcs_slice,
get_extended=get_extended,
get_weight=True)
except:
continue
msk_i = (psf_i[1].data != 0)
msk_i &= np.isfinite(psf_i[1].data)
if msk_i.sum() == 0:
continue
if ix == 0:
# Initialize
msk_f = msk_i*1
psf_f = psf_i
psf_f[1].data[msk_f == 0] = 0
ix += 1
else:
# Add to existing
msk_f += msk_i*1
psf_f[1].data[msk_i > 0] += psf_i[1].data[msk_i > 0]
ix += 1
if psf_f is None:
msg = 'PSF for {0} (filter={1}) is empty'
print(msg.format(file, filter))
continue
# Average
psf_f[1].data /= np.maximum(msk_f, 1)
psf_f[1].header['FILTER'] = filter, 'Filter'
psf_f[1].header['PSCALE'] = scl, 'Pixel scale, arcsec'
psf_f[1].header['PIXFRAC'] = pf, 'Pixfrac'
psf_f[1].header['KERNEL'] = kern, 'Kernel'
psf_f[1].header['EXTNAME'] = 'PSF'
psf_f[1].header['EXTVER'] = label
hdu.append(psf_f[1])
if make_fits:
psf_file = '{0}-{1}_psf.fits'.format(root, filter)
hdu.writeto(psf_file, overwrite=True)
hdus.append(hdu)
return hdus
def make_report(root, gzipped_links=True, xsize=18, output_dpi=None, make_rgb=True, mw_ebv=0):
"""
Make HTML report of the imaging and grism data products
"""
import glob
import matplotlib.pyplot as plt
import astropy.time
now = astropy.time.Time.now().iso
plt.ioff()
os.chdir('../Prep/')
bfilters = glob.glob('{0}-f[2-8]*sci.fits'.format(root))
bfilters.sort()
rfilters = glob.glob('{0}-f[01]*sci.fits'.format(root))
rfilters.sort()
filters = [f.split('-')[-1].split('_dr')[0] for f in bfilters + rfilters]
if len(filters) == 0:
has_mosaics = False
visits, groups, info = np.load('{0}_visits.npy'.format(root),
allow_pickle=True)
filters = np.unique([v['product'].split('-')[-1] for v in visits])
else:
has_mosaics = True
if make_rgb & has_mosaics:
field_rgb(root, HOME_PATH=None, xsize=xsize, output_dpi=output_dpi, ds9=None, scl=2, suffix='.rgb', timestamp=True, mw_ebv=mw_ebv)
for filter in filters:
field_rgb(root, HOME_PATH=None, xsize=18, ds9=None, scl=2, force_rgb=[filter, 'sum', 'sum'], suffix='.'+filter, timestamp=True)
##
## Mosaic table
##
rows = []
for filter in filters:
os.system('grep -e " 0 " -e "radec" *{0}*wcs.log > /tmp/{1}.log'.format(filter, root))
wcs_files = glob.glob('*{0}*wcs.log'.format(filter))
wcs = '<pre>'+''.join(open('/tmp/{0}.log'.format(root)).readlines())+'</pre>'
for file in wcs_files:
png_url = '<a href={1}>{0}</a>'.format(file, file.replace('.log', '.png').replace('+', '%2B'))
wcs = wcs.replace(file, png_url)
try:
im = pyfits.open(glob.glob('{0}-{1}*sci.fits'.format(root, filter))[0])
h = im[0].header
url = '<a href="./{0}">sci</a>'.format(im.filename())
url += ' '+url.replace('_sci', '_wht').replace('>sci', '>wht')
if gzipped_links:
url = url.replace('.fits', '.fits.gz')
psf_file = '{0}-{1}_psf.fits'.format(root, filter)
if os.path.exists(psf_file):
url += ' '+'<a href="./{0}">psf</a>'.format(psf_file)
row = [filter, url, '{0} {1}'.format(h['NAXIS1'], h['NAXIS2']), '{0:.5f} {1:.5f}'.format(h['CRVAL1'], h['CRVAL2']), h['EXPTIME'], h['NDRIZIM'], wcs, '<a href={0}.{1}.jpg><img src={0}.{1}.jpg height=200px></a>'.format(root, filter)]
except:
row = [filter, '--', '--', '--', 0., 0, wcs, '--']
rows.append(row)
tab = utils.GTable(rows=rows, names=['filter', 'FITS', 'naxis', 'crval', 'exptime', 'ndrizim', 'wcs_log', 'img'], dtype=[str, str, str, str, float, int, str, str])
tab['exptime'].format = '.1f'
tab.write_sortable_html('{0}.summary.html'.format(root), replace_braces=True, localhost=False, max_lines=500, table_id=None, table_class='display compact', css=None, filter_columns=[], buttons=['csv'], toggle=False, use_json=False)
## Grism figures
column_files = glob.glob('*column.png')
if len(column_files) > 0:
column_files.sort()
column_url = '<div>' + ' '.join(['<a href="./{0}"><img src="./{0}" height=100px title="{1}"></a>'.format(f.replace('+', '%2B'), f) for f in column_files]) + '</div>'
else:
column_url = ''
grism_files = glob.glob('../Extractions/*grism*fits*')
if len(grism_files) > 0:
grism_files.sort()
grism_pngs = glob.glob('../Extractions/*grism*png')
if len(grism_pngs) > 0:
grism_pngs.sort()
grism_url = '<div>' + ' '.join(['<a href="./{0}"><img src="./{0}" width=400px title="{1}"></a>'.format(f.replace('+', '%2B'), f) for f in grism_pngs]) + '</div>\n'
else:
grism_url = ''
grism_url += '<pre>'
grism_url += '\n'.join(['<a href="./{0}">{1}</a>'.format(f.replace('+', '%2B'), f) for f in grism_files])
grism_url += '\n <a href=../Extractions/{0}-fit.html> {0}-fit.html </a>'.format(root)
grism_url += '\n <a href="../Extractions/{0}_zhist.png"><img src="../Extractions/{0}_zhist.png" width=400px title="{0}_zhist.png"> </a>'.format(root)
grism_url += '\n</pre>'
if gzipped_links:
grism_url = grism_url.replace('.fits', '.fits.gz')
else:
grism_url = ''
try:
catalog = glob.glob('{0}-*.cat.fits'.format(root))[0]
except:
catalog = 'xxx'
catroot = catalog.split('.cat.fits')[0]
root_files = glob.glob('{0}-[ioyh]*fits*'.format(root))
root_files.sort()
if gzipped_links:
gzext = '.gz'
else:
gzext = ''
root_urls = '\n '.join(['<a href={0}{1}>{0}{1}</a>'.format(f, gzext) for f in root_files])
body = """
<h4>{root} </h4>
{now}<br>
<a href={root}.exposures.html>Exposure report</a>
/ <a href={root}_expflag.txt>{root}_expflag.txt</a>
/ <a href={root}.auto_script.log.txt>{root}.auto_script.log.txt</a>
/ <a href={root}.auto_script.yml>{root}.auto_script.yml</a>
<pre>
{root_urls}
<a href="{root}_visits.npy">{root}_visits.npy</a>
</pre>
{column}
{grism}
<a href="./{root}.rgb.jpg"><img src="./{root}.rgb.jpg" height=300px></a>
<a href="https://s3.amazonaws.com/grizli-v1/Master/{root}_footprint.png"><img src="https://s3.amazonaws.com/grizli-v1/Master/{root}_footprint.png" height=300px></a>
<a href="./{root}_fine.png"><img src="./{root}_fine.png" height=200px></a>
<br>
""".format(root=root, column=column_url, grism=grism_url, gz='.gz'*(gzipped_links), now=now, catroot=catroot, root_urls=root_urls)
lines = open('{0}.summary.html'.format(root)).readlines()
for i in range(len(lines)):
if '<body>' in lines[i]:
break
lines.insert(i+1, body)
fp = open('{0}.summary.html'.format(root), 'w')
fp.writelines(lines)
fp.close()
def exposure_report(root, log=True):
"""
Save exposure info to webpage & json file
"""
if log:
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'auto_script.exposure_report')
from collections import OrderedDict
import json
# Exposures
visits, all_groups, info = np.load('{0}_visits.npy'.format(root),
allow_pickle=True)
tab = utils.GTable(info)
tab.add_index('FILE')
visit_product = ['']*len(info)
ramp = ['']*len(info)
trails = ['']*len(info)
persnpix = [-1]*len(info)
tab['complete'] = False
flt_dict = OrderedDict()
for visit in visits:
failed = len(glob.glob('{0}*fail*'.format(visit['product']))) > 0
for file in visit['files']:
ix = tab.loc_indices[file]
if os.path.exists(file):
fobj = pyfits.open(file)
fd = utils.flt_to_dict(fobj)
fd['complete'] = not failed
flt_dict[file] = fd
flt_dict['visit'] = visit['product']
if 'PERSNPIX' in fobj[0].header:
persnpix[ix] = fobj[0].header['PERSNPIX']
visit_product[ix] = visit['product']
tab['complete'][ix] = not failed
base = file.split('_')[0]
ramp_file = '../RAW/{0}_ramp.png'.format(base)
has_mask = glob.glob('{0}*mask.reg'.format(base))
if has_mask:
extra = ' style="border:5px solid red;"'
else:
extra = ''
if os.path.exists(ramp_file):
ramp[ix] = '<a href="{0}"><img src="{0}" height=180 {1}></a>'.format(ramp_file, extra)
trails_file = '../RAW/{0}_trails.png'.format(base)
if os.path.exists(trails_file):
trails[ix] = '<a href="{0}"><img src="{0}" height=180 {1}></a>'.format(trails_file, extra)
tab['persnpix'] = persnpix
tab['product'] = visit_product
tab['ramp'] = ramp
tab['trails'] = trails
tab['EXPSTART'].format = '.3f'
tab['EXPTIME'].format = '.1f'
tab['PA_V3'].format = '.1f'
tab['RA_TARG'].format = '.6f'
tab['DEC_TARG'].format = '.6f'
# Turn fileinto a URL
file_urls = ['<a href="./{0}">{0}</a>'.format(f) for f in tab['FILE']]
tab['FLT'] = file_urls
cols = ['FLT']+tab.colnames[1:-1]
fp = open('{0}_exposures.json'.format(root), 'w')
json.dump(flt_dict, fp)
fp.close()
tab[cols].write_sortable_html('{0}.exposures.html'.format(root), replace_braces=True, localhost=False, max_lines=1e5, table_id=None, table_class='display compact', css=None, filter_columns=[], buttons=['csv'], toggle=True, use_json=False)
| mit |
RachitKansal/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 230 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
Mecanon/morphing_wing | dynamic_model/optimization/DOE_FullFactorial.py | 1 | 23889 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 24 18:36:06 2015
@author: Pedro Leal
"""
import pickle
import time
import os, sys
import matplotlib.pyplot as plt
import xfoil_module as xf
#adding path to static model
lib_path = os.path.abspath(os.path.join('..'))
sys.path.append(lib_path)
import static_model as model
in_Abaqus = False
class DOE:
"""Create a Design of Experiences Environment."""
def __init__(self, levels=5, driver='Taguchi'):
self.levels = levels
self.driver = driver
# All variable will be defined through the add_variable method
self.variables = []
# For the influence method, we need a list of the names of all variables
self.variables_names = []
def add_variable(self, name, lower, upper, levels=None, type=float):
"""Add variables to the DOE problem. """
if levels is None:
levels = self.levels
try:
self.variables.append({'upper': upper, 'lower': lower, 'name': name,
'levels': levels, 'type': type})
self.variables_names.append(name)
except:
print 'Forgot to define upper, lower or name'
def define_points(self):
"""
Method to define the points to be evaluated based on the results from the
distribution given by the array method and the bound defined by the
add_variable method"""
self.n_var = 0
self.n_var_2 = 0
for variable in self.variables:
if variable['levels'] == self.levels:
self.n_var += 1
elif variable['levels'] == 2:
self.n_var_2 += 1
else:
raise Exception('A variable has a number of levels that is ' +
'not the default or 2')
if self.driver == 'Taguchi':
self.Taguchi()
elif self.driver == 'Full Factorial':
self.FullFactorial()
#TODO: Define a FullFactorial Driver or something like that
self.domain = {}
for j in range(self.n_var+self.n_var_2):
upper = self.variables[j]['upper']
lower = self.variables[j]['lower']
levels = self.variables[j]['levels']
type = self.variables[j]['type']
dummy = []
for i in range(self.runs):
scale = self.array[i][j]
if type == int and (scale*(upper-lower) % (levels-1.) != 0):
raise Exception('The bounds of the defined integer are not '+
'compatible with the number of levels.')
else:
dummy.append(lower + scale*(upper-lower) / (levels-1.))
self.domain[self.variables[j]['name']] = dummy
def run(self, function, inputs = None, cte_input=None,
dependent_variables = None, parameters = None):
"""Runs and saves the results for the configurations obtained in define_points
method.
- inputs: if defined will define the format with which the inputs
will be passed to the main function.
- cte_input : if defined, is a dictionary containing the constant
inputs.
- dependent_variables: if defined, it creates a relationship between
different variables such as {'t_spar':'t_rib'}
- parameters: some main functions need extra parameters. this is a list
"""
DataFile = open('all_data.txt','w')
key_list = ['xs-', 'ys-', 'xs+', 'ys+', 'xl-', 'yl-', 'xl+', 'yl+']
output_list = ['delta_xi', 'theta', 'T', 'k']
for key in ['i'] + key_list + output_list:
DataFile.write(key + '\t')
DataFile.write('\n')
DataFile.close()
def set_input(self, run):
output = {}
for key in self.domain:
output[key] = self.domain[key][run]
return output
for i in range(self.runs):
input = set_input(self,i)
# If there is a constant input, add it to input dictionary
if cte_input != None:
input.update(cte_input)
if dependent_variables != None:
for key_dependent in dependent_variables:
key_independent = dependent_variables[key_dependent]
input.update({key_dependent : input[key_independent]})
DataFile = open('all_data.txt','a')
DataFile.write(str(i))
for key in key_list:
DataFile.write( '\t' + str(input[key]))
DataFile.close()
if inputs != None:
new_input = {}
for key_input in inputs:
if type(inputs[key_input]) == dict:
new_input[key_input] = {}
for key_var in inputs[key_input]:
new_input[key_input][key_var] = input[inputs[key_input][key_var]]
else:
new_input[key_input]= input[inputs[key_input]]
input = new_input
print input
if parameters == None:
result = function(input)
else:
result = function(input, parameters = parameters)
if i == 0:
# We will save the name of the putputs for plotting and etc
self.output_names = [key for key in result]
self.output = {}
for key in self.output_names:
self.output[key] = []
for key in self.output_names:
self.output[key].append(result[key])
DataFile = open('all_data.txt','a')
for output in output_list:
DataFile.write( '\t' + str(self.output[output][-1]))
DataFile.write('\n')
DataFile.close()
def find_influences(self, not_zero=False):
""" Calculate average influence of each variable over the
objective functions. If refinement_criteria is defined, certain points
will be eliminated. Works for Taguchi, Full Factorial and probably
anything else.
"""
self.influences = {}
# For refinement reasons during plotting, it is relevant to
# know which ones have zeros
self.equal_to_zero = {key:[False]*(self.n_var+self.n_var_2)*self.levels for
key in self.output_names}
for output_name in self.output_names:
Y = self.output[output_name]
# List of outputs
self.influences[output_name] = []
for var in self.variables_names:
X = self.domain[var]
# Eliminate repetitions by transforming the list in to a set and
# sort them. X_set will be used for counting
unique_X = sorted(set(X))
# For each unique X, the average of its values will be calculated
for j in range(len(unique_X)):
indices = [i for i, x in enumerate(X) if x == unique_X[j]]
# Filter option
if not_zero:
# Evaluate if any of the values of output is
# zero
for key, item in self.output.items():
if 0 in item:
# Eliminate it from the list of indices
for i in indices:
if self.output[key][i] == 0:
indices.remove(i)
self.equal_to_zero[key][j] = True
# Count number of times the variable repeats itself
count = len(indices)
# Create an empyt slot in Y_DOE list to add all Ys
self.influences[output_name].append(0)
# Average of all points with same X value
dummy = 0
for index in indices:
dummy += Y[index]/count
# Add to the last term of Y_DOE (sum of all)
self.influences[output_name][-1] += Y[index]/count
if not in_Abaqus:
def plot(self, shadow = [], xlabel = None, ylabel = None):
import matplotlib.pyplot as plt
def list_to_string(self, separator=', '):
"""Summ all the elements of a list of strings in to a string"""
resultant_string = ''
for component in self.variables_names:
resultant_string += component + separator
# Remove the last separator.
resultant_string = resultant_string[:-len(separator)]
return resultant_string
def create_ticks(self):
# In japanese mora is the length of each sylab, here it is the length of e
if self.levels == 2:
mora = ['-', '+']
elif self.levels == 3:
mora = ['-', 'o', '+']
elif self.levels == 4:
mora = ['-', '-o', 'o+', '+']
elif self.levels == 5:
mora = ['-', '-o', 'o', 'o+', '+']
else:
raise Exception('n_range to high, max is 5!')
# Replicate standard for all variables
return (self.n_var_2)*['-', '+'] + (self.n_var)*mora
def subtick_distance(self, border_spacing):
"""Function togenerate the distances of the second x axis
using figtext"""
# normalizing values forimage to be >0 and <1
norm = (2*border_spacing + self.levels*self.n_var - 1)
# Initial proportional distance
x0 = border_spacing/norm
distances = []
for i in range(len(self.variables_names)):
current = x0 + i*(self.levels - 1)/norm
print 'x0', current
if self.levels % 2 == 0: # if even
if i==0:
current += (self.levels)/2./norm
else:
current += (self.levels + 1)/2./norm
else: # if odd
if i == 0:
current += (self.levels/2 )/norm
else:
current += (self.levels/2 +1)/norm
print current
distances.append(current)
return distances
# IF the user wants to add pretty names, if not just do with the
# variable names
if xlabel == None:
xlabel = self.variables_names
if ylabel == None:
ylabel = self.output_names
ticks = create_ticks(self)
border_spacing = .2
for output in self.output_names:
Y = self.influences[output]
plt.subplot(100*len(self.output_names) + 11 +
self.output_names.index(output))
# Creating dummy values for horizontal axis
xi = range((self.n_var+self.n_var_2) * self.levels)
# Define ticks
plt.xticks(xi, ticks)
# plt.fill_between(xi, min(Y) - 0.05*(max(Y)-min(Y)),
# max(Y) + 0.05*(max(Y)-min(Y)),
# where = self.equal_to_zero[output],color = '0.75')
for i in range(self.n_var+self.n_var_2):
plt.plot(xi[i*self.levels : (i+1) * self.levels],
Y[i*self.levels : (i+1) * self.levels],
'-o')
# if output in shadow:
# plt.plot(xi[i*self.levels : (i+1) * self.levels],
# Y[i*self.levels : (i+1) * self.levels],
# '--',color=plt.getp(line, 'linewidth'))
plt.ylabel(ylabel[self.output_names.index(output)])
# plt.xlabel("Design Variables ("+list_to_string(self)+")")
if self.output_names.index(output) == 0:
plt.title("Design of Experiment: %i level %s" %
(self.levels, self.driver))
plt.xlim([-border_spacing, max(xi) + border_spacing])
plt.ylim(min(Y) - 0.05*(max(Y)-min(Y)),
max(Y) + 0.05*(max(Y)-min(Y)))
plt.grid()
plt.show()
# Create the second x axis
distances = subtick_distance(self, border_spacing)
print distances
for i in range(len(distances)):
plt.annotate(xlabel[i], xy =(distances[i], 0),
xytext = (0, -25), xycoords='axes fraction',
textcoords='offset points', horizontalalignment='center',
verticalalignment='center')
def plot_domain(self, Xaxis, Yaxis):
"""Plots all the points in a 2D plot for the definided Xaxis and
Yaxis
param: Xaxis: string containing key for x axis
param: Yaxis: string containing key for y axis"""
plt.scatter(self.output[Xaxis],self.output[Yaxis])
plt.xlabel(Xaxis)
plt.ylabel(Yaxis)
def load(self, filename, variables_names, outputs_names, header=None):
""" Load data from text file with results of DOE.
TODO: NEED TO INCLUDE POSSIBILITY FOR TWO LEVEL VARIABLE
- input:
- header: If not specified, the first line in the file
will be considered to be the header.
"""
if self.variables_names == []:
if header == None:
Data = xf.output_reader(filename=filename)
else:
Data = xf.output_reader(filename=filename, header=header)
if True==True:
self.output_names = outputs_names
self.variables_names = variables_names
self.n_var = len(variables_names)
self.n_var_2 = 0
self.output = {key:Data[key] for key in self.output_names}
self.domain = {key:Data[key] for key in self.variables_names}
# except:
# raise Exception('Something wrong with variables_names and '+
# 'outputs_names.')
else:
raise Exception('Cannot atribute variables and load data at the' +
'same object.')
# def save(self):
def Taguchi(self):
""" Find the necessary Taguchi array."""
self.runs = 50
Taguchi_L50 = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
[0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
[0, 1, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4],
[0, 1, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0],
[0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1],
[0, 1, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2],
[0, 1, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3],
[0, 2, 0, 2, 4, 1, 3, 3, 0, 2, 4, 1],
[0, 2, 1, 3, 0, 2, 4, 4, 1, 3, 0, 2],
[0, 2, 2, 4, 1, 3, 0, 0, 2, 4, 1, 3],
[0, 2, 3, 0, 2, 4, 1, 1, 3, 0, 2, 4],
[0, 2, 4, 1, 3, 0, 2, 2, 4, 1, 3, 0],
[0, 3, 0, 3, 1, 4, 2, 4, 2, 0, 3, 1],
[0, 3, 1, 4, 2, 0, 3, 0, 3, 1, 4, 2],
[0, 3, 2, 0, 3, 1, 4, 1, 4, 2, 0, 3],
[0, 3, 3, 1, 4, 2, 0, 2, 0, 3, 1, 4],
[0, 3, 4, 2, 0, 3, 1, 3, 1, 4, 2, 0],
[0, 4, 0, 4, 3, 2, 1, 3, 2, 1, 0, 4],
[0, 4, 1, 0, 4, 3, 2, 4, 3, 2, 1, 0],
[0, 4, 2, 1, 0, 4, 3, 0, 4, 3, 2, 1],
[0, 4, 3, 2, 1, 0, 4, 1, 0, 4, 3, 2],
[0, 4, 4, 3, 2, 1, 0, 2, 1, 0, 4, 3],
[1, 0, 0, 0, 3, 4, 3, 2, 1, 4, 1, 2],
[1, 0, 1, 1, 4, 0, 4, 3, 2, 0, 2, 3],
[1, 0, 2, 2, 0, 1, 0, 4, 3, 1, 3, 4],
[1, 0, 3, 3, 1, 2, 1, 0, 4, 2, 4, 0],
[1, 0, 4, 4, 2, 3, 2, 1, 0, 3, 0, 1],
[1, 1, 0, 1, 0, 2, 2, 1, 3, 4, 4, 3],
[1, 1, 1, 2, 1, 3, 3, 2, 4, 0, 0, 4],
[1, 1, 2, 3, 2, 4, 4, 3, 0, 1, 1, 0],
[1, 1, 3, 4, 3, 0, 0, 4, 1, 2, 2, 1],
[1, 1, 4, 0, 4, 1, 1, 0, 2, 3, 3, 2],
[1, 2, 0, 2, 2, 0, 1, 4, 4, 3, 1, 3],
[1, 2, 1, 3, 3, 1, 2, 0, 0, 4, 2, 4],
[1, 2, 2, 4, 4, 2, 3, 1, 1, 0, 3, 0],
[1, 2, 3, 0, 0, 3, 4, 2, 2, 1, 4, 1],
[1, 2, 4, 1, 1, 4, 0, 3, 3, 2, 0, 2],
[1, 3, 0, 3, 4, 3, 0, 1, 4, 1, 2, 2],
[1, 3, 1, 4, 0, 4, 1, 2, 0, 2, 3, 3],
[1, 3, 2, 0, 1, 0, 2, 3, 1, 3, 4, 4],
[1, 3, 3, 1, 2, 1, 3, 4, 2, 4, 0, 0],
[1, 3, 4, 2, 3, 2, 4, 0, 3, 0, 1, 1],
[1, 4, 0, 4, 1, 1, 4, 2, 3, 3, 2, 0],
[1, 4, 1, 0, 2, 2, 0, 3, 4, 4, 3, 1],
[1, 4, 2, 1, 3, 3, 1, 4, 0, 0, 4, 2],
[1, 4, 3, 2, 4, 4, 2, 0, 1, 1, 0, 3],
[1, 4, 4, 3, 0, 0, 3, 1, 2, 2, 1, 4]
]
# Initialize the Taguchi array.
self.array = self.runs*[[]]
# The reange of easch array was defined in:
# https://controls.engin.umich.edu/wiki/index.php/
# Design_of_experiments_via_taguchi_methods:_orthogonal_arrays
if (self.n_var >= 7 and self.n_var <= 12) and self.n_var_2 <= 1:
# Since the first column is for two level variables, we can ignore it.
for i in range(self.runs):
self.array[i] = Taguchi_L50[i][1-self.n_var_2 : self.n_var+1]
def FullFactorial(self):
"""Define array for Full Factorial for a given number of
levels.
"""
def product(*args, **kwds):
""" Returns all the possible combinations beween two lists
or between itself.
>>> print product('ABCD', 'xy')
>>> Ax Ay Bx By Cx Cy Dx Dy
>>> print product(range(2), repeat=3)
>>>000 001 010 011 100 101 110 111
Source: itertools
"""
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
self.array = []
possibilities = [i for i in range(self.levels)]
for subset in product(possibilities, repeat = self.n_var):
self.array.append(subset)
self.runs = len(self.array)
def find_nadir_utopic(self, not_zero=True):
"""Find the minimum and maximum, nadir and utopic, for each
output variable.
This function is quite relevant for normalizing the objective
function for the optimization.
param: not_zero: filters the zero values out.
returns: attributes utopic and nadir dictionaries for the output
variables, each containing a float value
sources: http://stackoverflow.com/questions/16122362/python-matplotlib-how-to-put-text-in-the-corner-of-equal-aspect-figure
"""
# First verify if there are any zeros, if true, get them out
equal_to_zero = {}
for key in self.output_names:
equal_to_zero[key] = [False]*len(self.output[key])
if not_zero:
for i in range(len(self.output[key])):
for key2 in self.output_names:
if self.output[key2][i] == 0:
equal_to_zero[key][i] = True
elif equal_to_zero[key][i] != True:
equal_to_zero[key][i] = False
# Now we can find the nadir and the utopic points
self.nadir = {}
self.utopic = {}
for key in self.output_names:
self.nadir[key] = 0
self.utopic[key] = 99999999999999999.
for i in range(len(self.output[key])):
if equal_to_zero[key][i] != True and self.output[key][i] < self.utopic[key]:
self.utopic[key] = self.output[key][i]
if equal_to_zero[key][i] != True and self.output[key][i] > self.nadir[key]:
self.nadir[key] = self.output[key][i]
if __name__ == "__main__":
import matlab.engine
#Start Matlab engine
eng = matlab.engine.start_matlab()
#Go to directory where matlab file is
eng.cd('..')
eng.cd('SMA_temperature_strain_driven')
chord = 0.6175
x_hinge = 0.25
safety = 0.05*chord
problem = DOE(levels=2, driver='Full Factorial')
problem.add_variable('xs-', lower = x_hinge/2. , upper = x_hinge, type=float)
problem.add_variable('ys-', lower = -.9, upper = -0., type=float)
problem.add_variable('xs+', lower = x_hinge + safety, upper = chord - safety, type=float)
problem.add_variable('ys+', lower = 0., upper = .9, type=float)
problem.add_variable('xl-', lower = x_hinge/2., upper = x_hinge, type=float)
problem.add_variable('yl-', lower = -.9, upper = 0.9, type=float)
problem.add_variable('xl+', lower = x_hinge + safety, upper = chord - safety, type=float)
problem.add_variable('yl+', lower = -.9, upper = 0.9, type=float)
problem.define_points()
#inputs [sma, linear, sigma_o]
inputs = {'sma':{'x-':'xs-', 'y-':'ys-', 'x+':'xs+', 'y+':'ys+'},
'linear':{'x-':'xl-', 'y-':'yl-', 'x+':'xl+', 'y+':'yl+'}}
problem.run(model.run, inputs = inputs, parameters = [eng])
timestr = time.strftime('%Y%m%d')
fileObject = open('DOE_'+ problem.driver + '_' + timestr,'wb')
# fileObject = open('DOE_FullFactorial_20150828','wb')
pickle.dump(problem, fileObject)
fileObject.close()
# problem.load(filename='FullFactorial.txt',
# variables_names= ['Al0', 'Al1'],
# outputs_names = ['Weight', 'Lift', 'Drag', 'MaxMises', 'DispTip', 'EigenValue', 'Velocity'])
problem.find_influences(not_zero=True)
problem.find_nadir_utopic(not_zero=True)
print 'Nadir: ', problem.nadir
print 'Utopic: ', problem.utopic
# problem.plot(xlabel = ['$A_{l_0}$', '$A_{l_1}$'],
# ylabel = ['Weight(N)', 'Lift(N)', 'Drag(N)', 'MaxMises(Pa)',
# 'Displacement(m)','Eigenvalue', 'Velocity(m/s)'])
# print problem.influences
| mit |
SeldonIO/seldon-server | python/seldon/tests/test_xgb.py | 2 | 2515 | import unittest
import pandas as pd
from seldon import xgb
import numpy as np
import sys
from sklearn.pipeline import Pipeline
from sklearn.externals import joblib
import logging
class Test_XGBoostClassifier(unittest.TestCase):
def test_set_params(self):
t = xgb.XGBoostClassifier(target="target",learning_rate=0.1,silent=0,objective='binary:logistic')
t.set_params(learning_rate=0.9,gamma=0.1)
self.assertEquals(t.get_params()['learning_rate'],0.9)
def test_sklearn_pipeline(self):
t = xgb.XGBoostClassifier(target="target",learning_rate=0.1,silent=0,objective='binary:logistic')
f1 = {"target":0,"b":1.0,"c":0}
f2 = {"target":1,"b":0,"c":2.0}
fs = []
for i in range (1,50):
fs.append(f1)
fs.append(f2)
print "features=>",fs
df = pd.DataFrame.from_dict(fs)
estimators = [("xgb",t)]
p = Pipeline(estimators)
p.fit(df)
preds = p.predict_proba(df)
print preds
print "-------------------"
joblib.dump(p,"/tmp/pipeline/p")
p2 = joblib.load("/tmp/pipeline/p")
df3 = p2.predict_proba(df)
print df3
def test_create_features(self):
t = xgb.XGBoostClassifier(target="target",learning_rate=0.1,silent=0,objective='binary:logistic')
f1 = {"target":0,"b":1.0,"c":0}
f2 = {"target":1,"b":0,"c":2.0}
fs = []
for i in range (1,50):
fs.append(f1)
fs.append(f2)
print "features=>",fs
df = pd.DataFrame.from_dict(fs)
t.fit(df)
scores = t.predict_proba(df)
print scores.shape
print "scores->",scores[0]
preds = t.predict(df)
print "predictions->",preds[0],preds[1]
self.assertEquals(preds[0],0)
self.assertEquals(preds[1],1)
def test_numpy_input(self):
t = xgb.XGBoostClassifier(n_estimators=10,learning_rate=0.1,silent=0)
X = np.random.randn(6,4)
y = np.array([0,1,1,0,0,1])
t.fit(X,y)
scores = t.predict_proba(X)
print scores
def test_svmlight_features(self):
t = xgb.XGBoostClassifier(target="target",svmlight_feature="svm",learning_rate=0.1,silent=0,objective='binary:logistic')
df = pd.DataFrame([{"svm":[(1,2.0),(2,3.0)],"target":1}])
t.fit(df)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
unittest.main()
| apache-2.0 |
astroswego/plotypus | test/demo.py | 1 | 2337 | from sys import exit
import numpy as np
np.random.seed(4) # chosen by fair dice roll. guaranteed to be random.
from sklearn.linear_model import LinearRegression, LassoCV
from sklearn.pipeline import Pipeline
from plotypus.preprocessing import Fourier
from plotypus.utils import colvec
from plotypus.resources import matplotlibrc
import matplotlib
matplotlib.use('Agg')
from matplotlib import rc_file
rc_file(matplotlibrc)
import matplotlib.pyplot as plt
color = True
def lc(X):
return 10 + np.cos(2*np.pi*X) + 0.1*np.cos(18*np.pi*X)
def main():
X_true = np.linspace(0, 1, 1001)
y_true = lc(X_true)
n_samples = 50
X_sample = np.random.uniform(size=n_samples)
y_sample = lc(X_sample) + np.random.normal(0, 0.1, n_samples)
predictor = Pipeline([('Fourier', Fourier(9)),
('OLS', LinearRegression())])
predictor = predictor.fit(colvec(X_sample), y_sample)
y_pred = predictor.predict(colvec(X_true))
predictor = Pipeline([('Fourier', Fourier(9)),
('Lasso', LassoCV())])
predictor = predictor.fit(colvec(X_sample), y_sample)
y_lasso = predictor.predict(colvec(X_true))
ax = plt.gca()
signal, = plt.plot(np.hstack((X_true,1+X_true)),
np.hstack((y_true, y_true)),
linewidth=0.66, color='black')
fd, = plt.plot(np.hstack((X_true,1+X_true)),
np.hstack((y_pred, y_pred)),
linewidth=2.5, ls='dashed',
color='darkred' if color else 'black')
lasso, = plt.plot(np.hstack((X_true,1+X_true)),
np.hstack((y_lasso, y_lasso)),
linewidth=3, color='black', ls='dotted')
sc = plt.scatter(np.hstack((X_sample,1+X_sample)),
np.hstack((y_sample, y_sample)),
marker='+', s=20,
color='darkblue' if color else 'black')
plt.legend([signal, sc, fd, lasso],
["True Signal", "Noisy Data", "OLS", "Lasso"],
loc='best')
plt.xlim(0,2)
plt.xlabel('Phase')
plt.ylabel('Magnitude')
plt.title('Simulated Lightcurve Example')
plt.tight_layout(pad=0.1)
plt.savefig('demo.eps')
plt.clf()
if __name__ == '__main__':
exit(main())
| gpl-3.0 |
HGladiator/MyCodes | Python/exercise/titanic2.py | 1 | 4696 | # -*- coding: utf-8 -*-
"""
Created on Mon May 1 19:15:53 2017
@author: Isola
"""
'''
这是一个比较典型的基于特征的分类问题,根据一般的数据处理流程可以将问题的求解分解成为以下步骤:
数据预处理
读取数据,在本文代码中使用了 python 的 pandas 包管理数据结构
特征向量化,在本文代码中将性别和登船港口特征转成向量化表示
处理残缺数据,在本文代码中将残缺年龄用平均年龄表示,残缺的登船港口用频繁项表示
扔掉多余项,姓名、ID、舱号、票号在本问题中被认为是对分类没有帮助的信息,扔掉了这些特征项
数据训练
在本文代码中使用了 sklearn 中的随机森林进行分类,随机森林每次随机选取若干特征和数据项生成决策树,最后采用投票的方式来生成预测结果
'''
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
import os
os.chdir('E:\\coding\\Spyder\\exercise\\titanic')
# Data cleanup
# TRAIN DATA
train_df = pd.read_csv('.\\train.csv', header = 0,encoding = 'utf-8') # Load the train file into a dataframe
# strings 转 integer
#缺失值处理
# female = 0, Male = 1
train_df['Gender'] = train_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Embarked 'C', 'Q', 'S'
if len(train_df.Embarked[ train_df.Embarked.isnull() ]) > 0:
train_df.Embarked[ train_df.Embarked.isnull() ] = train_df.Embarked.dropna().mode().values
Ports = list(enumerate(np.unique(train_df['Embarked']))) # determine all values of Embarked,
Ports_dict = { name : i for i, name in Ports } # set up a dictionary in the form Ports : index
train_df.Embarked = train_df.Embarked.map( lambda x: Ports_dict[x]).astype(int) # Convert all Embark strings to int
# All the ages with no data -> make the median of all Ages
median_age = train_df['Age'].dropna().median()
if len(train_df.Age[ train_df.Age.isnull() ]) > 0:
train_df.loc[ (train_df.Age.isnull()), 'Age'] = median_age
# Remove the Name column, Cabin, Ticket, and Sex (since I copied and filled it to Gender)
train_df = train_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'PassengerId'], axis=1)
# TEST DATA
test_df = pd.read_csv('.\\test.csv', header = 0,encoding = 'utf-8') # Load the test file into a dataframe
pasageId = test_df[['PassengerId']]
# I need to do the same with the test data now, so that the columns are the same as the training data
# I need to convert all strings to integer classifiers:
# female = 0, Male = 1
test_df['Gender'] = test_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Embarked from 'C', 'Q', 'S'
# All missing Embarked -> just make them embark from most common place
if len(test_df.Embarked[ test_df.Embarked.isnull() ]) > 0:
test_df.Embarked[ test_df.Embarked.isnull() ] = test_df.Embarked.dropna().mode().values
# Again convert all Embarked strings to int
test_df.Embarked = test_df.Embarked.map( lambda x: Ports_dict[x]).astype(int)
# All the ages with no data -> make the median of all Ages
median_age = test_df['Age'].dropna().median()
if len(test_df.Age[ test_df.Age.isnull() ]) > 0:
test_df.loc[ (test_df.Age.isnull()), 'Age'] = median_age
# All the missing Fares -> assume median of their respective class
if len(test_df.Fare[ test_df.Fare.isnull() ]) > 0:
median_fare = np.zeros(3)
for f in range(0,3): # loop 0 to 2
median_fare[f] = test_df[ test_df.Pclass == f+1 ]['Fare'].dropna().median()
for f in range(0,3): # loop 0 to 2
test_df.loc[ (test_df.Fare.isnull()) & (test_df.Pclass == f+1 ), 'Fare'] = median_fare[f]
# Collect the test data's PassengerIds before dropping it
ids = test_df['PassengerId'].values
# Remove the Name column, Cabin, Ticket, and Sex (since I copied and filled it to Gender)
test_df = test_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'PassengerId'], axis=1)
# The data is now ready to go. So lets fit to the train, then predict to the test!
# Convert back to a numpy array
train_data = train_df.values
test_data = test_df.values
#print('Training...')
forest = RandomForestClassifier(n_estimators=100)
forest = forest.fit( train_data[0::,1::], train_data[0::,0] )
#print('Predicting...')
output = forest.predict(test_data).astype(int)
from sklearn.metrics import accuracy_score
accuracy_score(y_true, output) ##求出准确率
df = pd.concat([pd.DataFrame(pasageId),pd.DataFrame(output)], ignore_index=True, axis=1)
df.columns = ['PassengerId','Survived']
df.to_csv('titanicPrediction.csv',index = False)
| mit |
goodwordalchemy/thinkstats_notes_and_exercises | code/hinc_soln.py | 1 | 4296 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import hinc
import thinkplot
import thinkstats2
"""This file contains a solution to an exercise in Think Stats:
The distributions of wealth and income are sometimes modeled using
lognormal and Pareto distributions. To see which is better, let's
look at some data.
The Current Population Survey (CPS) is joint effort of the Bureau
of Labor Statistics and the Census Bureau to study income and related
variables. Data collected in 2013 is available from
http://www.census.gov/hhes/www/cpstables/032013/hhinc/toc.htm.
I downloaded hinc06.xls, which is an Excel spreadsheet with
information about household income, and converted it to hinc06.csv,
a CSV file you will find in the repository for this book. You
will also find hinc.py, which reads the CSV file.
Extract the distribution of incomes from this dataset. Are any of the
analytic distributions in this chapter a good model of the data? A
solution to this exercise is in hinc_soln.py.
My solution generates three figures:
1) The CDF of income on a linear scale.
2) The CCDF on a log-log scale along with a Pareto model intended
to match the tail behavior.
3) The CDF on a log-x scale along with a lognormal model chose to
match the median and inter-quartile range.
My conclusions based on these figures are:
1) The Pareto model is probably a reasonable choice for the top
10-20% of incomes.
2) The lognormal model captures the shape of the distribution better,
but the data deviate substantially from the model. With different
choices for sigma, you could match the upper or lower tail, but not
both at the same time.
In summary I would say that neither model captures the whole distribution,
so you might have to
1) look for another analytic model,
2) choose one that captures the part of the distribution that is most
relevent, or
3) avoid using an analytic model altogether.
"""
class SmoothCdf(thinkstats2.Cdf):
"""Represents a CDF based on calculated quantiles.
"""
def Render(self):
"""Because this CDF was not computed from a sample, it
should not be rendered as a step function.
"""
return self.xs, self.ps
def Prob(self, x):
"""Compute CDF(x), interpolating between known values.
"""
return np.interp(x, self.xs, self.ps)
def Value(self, p):
"""Compute inverse CDF(x), interpolating between probabilities.
"""
return np.interp(p, self.ps, self.xs)
def MakeFigures(df):
"""Plots the CDF of income in several forms.
"""
xs, ps = df.income.values, df.ps.values
cdf = SmoothCdf(xs, ps, label='data')
cdf_log = SmoothCdf(np.log10(xs), ps, label='data')
# linear plot
thinkplot.Cdf(cdf)
thinkplot.Show(root='hinc_linear',
xlabel='household income',
ylabel='CDF')
# pareto plot
# for the model I chose parameters by hand to fit the tail
xs, ys = thinkstats2.RenderParetoCdf(xmin=55000, alpha=2.5,
low=0, high=250000)
thinkplot.Plot(xs, 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf, complement=True)
thinkplot.Show(root='hinc_pareto',
xlabel='log10 household income',
ylabel='CCDF',
xscale='log',
yscale='log')
# lognormal plot
# for the model I estimate mu and sigma using
# percentile-based statistics
median = cdf_log.Percentile(50)
iqr = cdf_log.Percentile(75) - cdf_log.Percentile(25)
std = iqr / 1.349
# choose std to match the upper tail
std = 0.35
print(median, std)
xs, ps = thinkstats2.RenderNormalCdf(median, std, low=3.5, high=5.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Show(root='hinc_normal',
xlabel='log10 household income',
ylabel='CDF')
def main():
df = hinc.ReadData()
MakeFigures(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
dingocuster/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 69 | 8605 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
Roboticmechart22/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/stochasticSynthesisFrame.py | 24 | 2966 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
from scipy.fftpack import fft, ifft
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
def stochasticModelFrame(x, w, N, stocf) :
# x: input array sound, w: analysis window, N: FFT size,
# stocf: decimation factor of mag spectrum for stochastic analysis
hN = N/2+1 # size of positive spectrum
hM = (w.size)/2 # half analysis window size
pin = hM # initialize sound pointer in middle of analysis window
fftbuffer = np.zeros(N) # initialize buffer for FFT
yw = np.zeros(w.size) # initialize output sound frame
w = w / sum(w) # normalize analysis window
#-----analysis-----
xw = x[pin-hM:pin+hM] * w # window the input sound
X = fft(xw) # compute FFT
mX = 20 * np.log10( abs(X[:hN]) ) # magnitude spectrum of positive frequencies
mXenv = resample(np.maximum(-200, mX), mX.size*stocf) # decimate the mag spectrum
pX = np.angle(X[:hN])
#-----synthesis-----
mY = resample(mXenv, hN) # interpolate to original size
pY = 2*np.pi*np.random.rand(hN) # generate phase random values
Y = np.zeros(N, dtype = complex)
Y[:hN] = 10**(mY/20) * np.exp(1j*pY) # generate positive freq.
Y[hN:] = 10**(mY[-2:0:-1]/20) * np.exp(-1j*pY[-2:0:-1]) # generate negative freq.
fftbuffer = np.real( ifft(Y) ) # inverse FFT
y = fftbuffer*N/2
return mX, pX, mY, pY, y
# example call of stochasticModel function
if __name__ == '__main__':
(fs, x) = UF.wavread('../../../sounds/ocean.wav')
w = np.hanning(1024)
N = 1024
stocf = 0.1
maxFreq = 10000.0
lastbin = N*maxFreq/fs
first = 1000
last = first+w.size
mX, pX, mY, pY, y = stochasticModelFrame(x[first:last], w, N, stocf)
plt.figure(1, figsize=(9, 5))
plt.subplot(3,1,1)
plt.plot(float(fs)*np.arange(mY.size)/N, mY, 'r', lw=1.5, label="mY")
plt.axis([0, maxFreq, -78, max(mX)+0.5])
plt.title('mY (stochastic approximation of mX)')
plt.subplot(3,1,2)
plt.plot(float(fs)*np.arange(pY.size)/N, pY-np.pi, 'c', lw=1.5, label="pY")
plt.axis([0, maxFreq, -np.pi, np.pi])
plt.title('pY (random phases)')
plt.subplot(3,1,3)
plt.plot(np.arange(first, last)/float(fs), y, 'b', lw=1.5)
plt.axis([first/float(fs), last/float(fs), min(y), max(y)])
plt.title('yst')
plt.tight_layout()
plt.savefig('stochasticSynthesisFrame.png')
plt.show()
| agpl-3.0 |
macks22/scikit-learn | sklearn/utils/tests/test_class_weight.py | 140 | 11909 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
yanchen036/tensorflow | tensorflow/examples/learn/iris_custom_model.py | 43 | 3449 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
wzbozon/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
WarrenWeckesser/scipy | scipy/signal/bsplines.py | 12 | 19509 | from numpy import (logical_and, asarray, pi, zeros_like,
piecewise, array, arctan2, tan, zeros, arange, floor)
from numpy.core.umath import (sqrt, exp, greater, less, cos, add, sin,
less_equal, greater_equal)
# From splinemodule.c
from .spline import cspline2d, sepfir2d
from scipy.special import comb
from scipy._lib._util import float_factorial
__all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic',
'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval']
def spline_filter(Iin, lmbda=5.0):
"""Smoothing spline (cubic) filtering of a rank-2 array.
Filter an input data set, `Iin`, using a (cubic) smoothing spline of
fall-off `lmbda`.
Parameters
----------
Iin : array_like
input data set
lmbda : float, optional
spline smooghing fall-off value, default is `5.0`.
Returns
-------
res : ndarray
filterd input data
Examples
--------
We can filter an multi dimentional signal (ex: 2D image) using cubic
B-spline filter:
>>> from scipy.signal import spline_filter
>>> import matplotlib.pyplot as plt
>>> orig_img = np.eye(20) # create an image
>>> orig_img[10, :] = 1.0
>>> sp_filter = spline_filter(orig_img, lmbda=0.1)
>>> f, ax = plt.subplots(1, 2, sharex=True)
>>> for ind, data in enumerate([[orig_img, "original image"],
... [sp_filter, "spline filter"]]):
... ax[ind].imshow(data[0], cmap='gray_r')
... ax[ind].set_title(data[1])
>>> plt.tight_layout()
>>> plt.show()
"""
intype = Iin.dtype.char
hcol = array([1.0, 4.0, 1.0], 'f') / 6.0
if intype in ['F', 'D']:
Iin = Iin.astype('F')
ckr = cspline2d(Iin.real, lmbda)
cki = cspline2d(Iin.imag, lmbda)
outr = sepfir2d(ckr, hcol, hcol)
outi = sepfir2d(cki, hcol, hcol)
out = (outr + 1j * outi).astype(intype)
elif intype in ['f', 'd']:
ckr = cspline2d(Iin, lmbda)
out = sepfir2d(ckr, hcol, hcol)
out = out.astype(intype)
else:
raise TypeError("Invalid data type for Iin")
return out
_splinefunc_cache = {}
def _bspline_piecefunctions(order):
"""Returns the function defined over the left-side pieces for a bspline of
a given order.
The 0th piece is the first one less than 0. The last piece is a function
identical to 0 (returned as the constant 0). (There are order//2 + 2 total
pieces).
Also returns the condition functions that when evaluated return boolean
arrays for use with `numpy.piecewise`.
"""
try:
return _splinefunc_cache[order]
except KeyError:
pass
def condfuncgen(num, val1, val2):
if num == 0:
return lambda x: logical_and(less_equal(x, val1),
greater_equal(x, val2))
elif num == 2:
return lambda x: less_equal(x, val2)
else:
return lambda x: logical_and(less(x, val1),
greater_equal(x, val2))
last = order // 2 + 2
if order % 2:
startbound = -1.0
else:
startbound = -0.5
condfuncs = [condfuncgen(0, 0, startbound)]
bound = startbound
for num in range(1, last - 1):
condfuncs.append(condfuncgen(1, bound, bound - 1))
bound = bound - 1
condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0))
# final value of bound is used in piecefuncgen below
# the functions to evaluate are taken from the left-hand side
# in the general expression derived from the central difference
# operator (because they involve fewer terms).
fval = float_factorial(order)
def piecefuncgen(num):
Mk = order // 2 - num
if (Mk < 0):
return 0 # final function is 0
coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
for k in range(Mk + 1)]
shifts = [-bound - k for k in range(Mk + 1)]
def thefunc(x):
res = 0.0
for k in range(Mk + 1):
res += coeffs[k] * (x + shifts[k]) ** order
return res
return thefunc
funclist = [piecefuncgen(k) for k in range(last)]
_splinefunc_cache[order] = (funclist, condfuncs)
return funclist, condfuncs
def bspline(x, n):
"""B-spline basis function of order n.
Parameters
----------
x : array_like
a knot vector
n : int
The order of the spline. Must be non-negative, i.e., n >= 0
Returns
-------
res : ndarray
B-spline basis function values
See Also
--------
cubic : A cubic B-spline.
quadratic : A quadratic B-spline.
Notes
-----
Uses numpy.piecewise and automatic function-generator.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = -abs(asarray(x))
# number of pieces on the left-side is (n+1)/2
funclist, condfuncs = _bspline_piecefunctions(n)
condlist = [func(ax) for func in condfuncs]
return piecewise(ax, condlist, funclist)
def gauss_spline(x, n):
r"""Gaussian approximation to B-spline basis function of order n.
Parameters
----------
x : array_like
a knot vector
n : int
The order of the spline. Must be non-negative, i.e., n >= 0
Returns
-------
res : ndarray
B-spline basis function values approximated by a zero-mean Gaussian
function.
Notes
-----
The B-spline basis function can be approximated well by a zero-mean
Gaussian function with standard-deviation equal to :math:`\sigma=(n+1)/12`
for large `n` :
.. math:: \frac{1}{\sqrt {2\pi\sigma^2}}exp(-\frac{x^2}{2\sigma})
References
----------
.. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen
F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In:
Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational
Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer
Science, vol 4485. Springer, Berlin, Heidelberg
.. [2] http://folk.uio.no/inf3330/scripting/doc/python/SciPy/tutorial/old/node24.html
Examples
--------
We can calculate B-Spline basis functions approximated by a gaussian
distribution:
>>> from scipy.signal import gauss_spline, bspline
>>> knots = np.array([-1.0, 0.0, -1.0])
>>> gauss_spline(knots, 3)
array([0.15418033, 0.6909883, 0.15418033]) # may vary
>>> bspline(knots, 3)
array([0.16666667, 0.66666667, 0.16666667]) # may vary
"""
x = asarray(x)
signsq = (n + 1) / 12.0
return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq)
def cubic(x):
"""A cubic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``.
Parameters
----------
x : array_like
a knot vector
Returns
-------
res : ndarray
Cubic B-spline basis function values
See Also
--------
bspline : B-spline basis function of order n
quadratic : A quadratic B-spline.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 1)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1)
cond2 = ~cond1 & less(ax, 2)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = 1.0 / 6 * (2 - ax2) ** 3
return res
def quadratic(x):
"""A quadratic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``.
Parameters
----------
x : array_like
a knot vector
Returns
-------
res : ndarray
Quadratic B-spline basis function values
See Also
--------
bspline : B-spline basis function of order n
cubic : A cubic B-spline.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 0.5)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 0.75 - ax1 ** 2
cond2 = ~cond1 & less(ax, 1.5)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = (ax2 - 1.5) ** 2 / 2.0
return res
def _coeff_smooth(lam):
xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam)
omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi))
rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam)
rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi)
return rho, omeg
def _hc(k, cs, rho, omega):
return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) *
greater(k, -1))
def _hs(k, cs, rho, omega):
c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) /
(1 - 2 * rho * rho * cos(2 * omega) + rho ** 4))
gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega)
ak = abs(k)
return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak))
def _cubic_smooth_coeff(signal, lamb):
rho, omega = _coeff_smooth(lamb)
cs = 1 - 2 * rho * cos(omega) + rho * rho
K = len(signal)
yp = zeros((K,), signal.dtype.char)
k = arange(K)
yp[0] = (_hc(0, cs, rho, omega) * signal[0] +
add.reduce(_hc(k + 1, cs, rho, omega) * signal))
yp[1] = (_hc(0, cs, rho, omega) * signal[0] +
_hc(1, cs, rho, omega) * signal[1] +
add.reduce(_hc(k + 2, cs, rho, omega) * signal))
for n in range(2, K):
yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] -
rho * rho * yp[n - 2])
y = zeros((K,), signal.dtype.char)
y[K - 1] = add.reduce((_hs(k, cs, rho, omega) +
_hs(k + 1, cs, rho, omega)) * signal[::-1])
y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) +
_hs(k + 2, cs, rho, omega)) * signal[::-1])
for n in range(K - 3, -1, -1):
y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] -
rho * rho * y[n + 2])
return y
def _cubic_coeff(signal):
zi = -2 + sqrt(3)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 6.0
def _quadratic_coeff(signal):
zi = -3 + 2 * sqrt(2.0)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype.char)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 8.0
def cspline1d(signal, lamb=0.0):
"""
Compute cubic spline coefficients for rank-1 array.
Find the cubic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient, default is 0.0.
Returns
-------
c : ndarray
Cubic spline coefficients.
See Also
--------
cspline1d_eval : Evaluate a cubic spline at the new set of points.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a cubic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import cspline1d, cspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = cspline1d_eval(cspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
if lamb != 0.0:
return _cubic_smooth_coeff(signal, lamb)
else:
return _cubic_coeff(signal)
def qspline1d(signal, lamb=0.0):
"""Compute quadratic spline coefficients for rank-1 array.
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient (must be zero for now).
Returns
-------
c : ndarray
Quadratic spline coefficients.
See Also
--------
qspline1d_eval : Evaluate a quadratic spline at the new set of points.
Notes
-----
Find the quadratic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 .
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a quadratic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import qspline1d, qspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = qspline1d_eval(qspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
if lamb != 0.0:
raise ValueError("Smoothing quadratic splines not supported yet.")
else:
return _quadratic_coeff(signal)
def cspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a cubic spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of:
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
Parameters
----------
cj : ndarray
cublic spline coefficients
newx : ndarray
New set of points.
dx : float, optional
Old sample-spacing, the default value is 1.0.
x0 : int, optional
Old origin, the default value is 0.
Returns
-------
res : ndarray
Evaluated a cubic spline points.
See Also
--------
cspline1d : Compute cubic spline coefficients for rank-1 array.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a cubic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import cspline1d, cspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = cspline1d_eval(cspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
newx = (asarray(newx) - x0) / float(dx)
res = zeros_like(newx, dtype=cj.dtype)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = cspline1d_eval(cj, -newx[cond1])
res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx, dtype=cj.dtype)
jlower = floor(newx - 2).astype(int) + 1
for i in range(4):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * cubic(newx - thisj)
res[cond3] = result
return res
def qspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a quadratic spline at the new set of points.
Parameters
----------
cj : ndarray
Quadratic spline coefficients
newx : ndarray
New set of points.
dx : float, optional
Old sample-spacing, the default value is 1.0.
x0 : int, optional
Old origin, the default value is 0.
Returns
-------
res : ndarray
Evaluated a quadratic spline points.
See Also
--------
qspline1d : Compute quadratic spline coefficients for rank-1 array.
Notes
-----
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of::
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a quadratic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import qspline1d, qspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = qspline1d_eval(qspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
newx = (asarray(newx) - x0) / dx
res = zeros_like(newx)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = qspline1d_eval(cj, -newx[cond1])
res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx)
jlower = floor(newx - 1.5).astype(int) + 1
for i in range(3):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * quadratic(newx - thisj)
res[cond3] = result
return res
| bsd-3-clause |
nhejazi/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 5 | 49337 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import compute_class_weight
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import raises
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_lr_liblinear_warning():
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
lr = LogisticRegression(solver='liblinear', n_jobs=2)
assert_warns_message(UserWarning,
"'n_jobs' > 1 does not have any effect when"
" 'solver' is set to 'liblinear'. Got 'n_jobs'"
" = 2.",
lr.fit, iris.data, target)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42),
LogisticRegression(C=len(iris.data), solver='saga', tol=1e-2,
multi_class='ovr', random_state=42)
]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ('Logistic Regression supports only liblinear, newton-cg, '
'lbfgs, sag and saga solvers, got wrong_name')
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# only 'liblinear' solver
msg = "Solver liblinear does not support a multinomial backend."
lr = LR(solver='liblinear', multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
for solver in ['newton-cg', 'lbfgs', 'sag', 'saga']:
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
clf = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000)
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ['sag', 'saga']:
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
max_iter=1000,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
solver=solver,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20, random_state=0)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20, random_state=0)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1,))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_multinomial_logistic_regression_string_inputs():
# Test with string labels for LogisticRegression(CV)
n_samples, n_features, n_classes = 50, 5, 3
X_ref, y = make_classification(n_samples=n_samples, n_features=n_features,
n_classes=n_classes, n_informative=3,
random_state=0)
y_str = LabelEncoder().fit(['bar', 'baz', 'foo']).inverse_transform(y)
# For numerical labels, let y values be taken from set (-1, 0, 1)
y = np.array(y) - 1
# Test for string labels
lr = LogisticRegression(solver='lbfgs', multi_class='multinomial')
lr_cv = LogisticRegressionCV(solver='lbfgs', multi_class='multinomial')
lr_str = LogisticRegression(solver='lbfgs', multi_class='multinomial')
lr_cv_str = LogisticRegressionCV(solver='lbfgs', multi_class='multinomial')
lr.fit(X_ref, y)
lr_cv.fit(X_ref, y)
lr_str.fit(X_ref, y_str)
lr_cv_str.fit(X_ref, y_str)
assert_array_almost_equal(lr.coef_, lr_str.coef_)
assert_equal(sorted(lr_str.classes_), ['bar', 'baz', 'foo'])
assert_array_almost_equal(lr_cv.coef_, lr_cv_str.coef_)
assert_equal(sorted(lr_str.classes_), ['bar', 'baz', 'foo'])
assert_equal(sorted(lr_cv_str.classes_), ['bar', 'baz', 'foo'])
# The predictions should be in original labels
assert_equal(sorted(np.unique(lr_str.predict(X_ref))),
['bar', 'baz', 'foo'])
assert_equal(sorted(np.unique(lr_cv_str.predict(X_ref))),
['bar', 'baz', 'foo'])
# Make sure class weights can be given with string labels
lr_cv_str = LogisticRegression(
solver='lbfgs', class_weight={'bar': 1, 'baz': 2, 'foo': 0},
multi_class='multinomial').fit(X_ref, y_str)
assert_equal(sorted(np.unique(lr_cv_str.predict(X_ref))), ['bar', 'baz'])
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# The cv indices from stratified kfold (where stratification is done based
# on the fine-grained iris classes, i.e, before the classes 0 and 1 are
# conflated) is used for both clf and clf1
n_cv = 2
cv = StratifiedKFold(n_cv)
precomputed_folds = list(cv.split(train, target))
# Train clf on the original dataset where classes 0 and 1 are separated
clf = LogisticRegressionCV(cv=precomputed_folds)
clf.fit(train, target)
# Conflate classes 0 and 1 and train clf1 on this modified dataset
clf1 = LogisticRegressionCV(cv=precomputed_folds)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
# Ensure that what OvR learns for class2 is same regardless of whether
# classes 0 and 1 are separated or not
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10,))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
max_iter = 2000 if solver in ['sag', 'saga'] else 15
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=max_iter,
random_state=42, tol=1e-5 if solver in ['sag', 'saga'] else 1e-2,
cv=2)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10,))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
saga = LogisticRegression(solver='saga', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
saga.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, sag.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, lib.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-7
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
saga = LogisticRegression(solver='saga', fit_intercept=False, tol=tol,
max_iter=10000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
saga.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, sag.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, lib.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
for weight in [{0: 0.1, 1: 0.2}, {0: 0.1, 1: 0.2, 2: 0.5}]:
n_classes = len(weight)
for class_weight in (weight, 'balanced'):
X, y = make_classification(n_samples=30, n_features=3,
n_repeated=0,
n_informative=3, n_redundant=0,
n_classes=n_classes, random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', Cs=1,
fit_intercept=False,
class_weight=class_weight)
clf_ncg = LogisticRegressionCV(solver='newton-cg', Cs=1,
fit_intercept=False,
class_weight=class_weight)
clf_lib = LogisticRegressionCV(solver='liblinear', Cs=1,
fit_intercept=False,
class_weight=class_weight)
clf_sag = LogisticRegressionCV(solver='sag', Cs=1,
fit_intercept=False,
class_weight=class_weight,
tol=1e-5, max_iter=10000,
random_state=0)
clf_saga = LogisticRegressionCV(solver='saga', Cs=1,
fit_intercept=False,
class_weight=class_weight,
tol=1e-5, max_iter=10000,
random_state=0)
clf_lbf.fit(X, y)
clf_ncg.fit(X, y)
clf_lib.fit(X, y)
clf_sag.fit(X, y)
clf_saga.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_ncg.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_saga.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
sample_weight = y + 1
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
for solver in ['lbfgs', 'liblinear']:
clf_sw_none = LR(solver=solver, fit_intercept=False,
random_state=42)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver=solver, fit_intercept=False,
random_state=42)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(
clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False, random_state=42)
clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False, random_state=42)
clf_sw_n.fit(X, y, sample_weight=sample_weight)
clf_sw_sag = LR(solver='sag', fit_intercept=False, tol=1e-10,
random_state=42)
# ignore convergence warning due to small dataset
with ignore_warnings():
clf_sw_sag.fit(X, y, sample_weight=sample_weight)
clf_sw_liblinear = LR(solver='liblinear', fit_intercept=False,
random_state=42)
clf_sw_liblinear.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_liblinear.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
for solver in ['lbfgs', 'liblinear']:
clf_cw_12 = LR(solver=solver, fit_intercept=False,
class_weight={0: 1, 1: 2}, random_state=42)
clf_cw_12.fit(X, y)
clf_sw_12 = LR(solver=solver, fit_intercept=False, random_state=42)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
# Test the above for l1 penalty and l2 penalty with dual=True.
# since the patched liblinear code is different.
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l1", tol=1e-5, random_state=42)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l1", tol=1e-5,
random_state=42)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l2", dual=True, random_state=42)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l2", dual=True,
random_state=42)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
def _compute_class_weight_dictionary(y):
# helper for returning a dictionary instead of an array
classes = np.unique(y)
class_weight = compute_class_weight("balanced", classes, y)
class_weight_dict = dict(zip(classes, class_weight))
return class_weight_dict
def test_logistic_regression_class_weights():
# Multinomial case: remove 90% of class 0
X = iris.data[45:, :]
y = iris.target[45:]
solvers = ("lbfgs", "newton-cg")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4)
# Binary case: remove 90% of class 0 and 100% of class 2
X = iris.data[45:100, :]
y = iris.target[45:100]
solvers = ("lbfgs", "newton-cg", "liblinear")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20, random_state=0)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
# 'lbfgs' is used as a referenced
solver = 'lbfgs'
ref_i = LogisticRegression(solver=solver, multi_class='multinomial')
ref_w = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
ref_i.fit(X, y)
ref_w.fit(X, y)
assert_array_equal(ref_i.coef_.shape, (n_classes, n_features))
assert_array_equal(ref_w.coef_.shape, (n_classes, n_features))
for solver in ['sag', 'saga', 'newton-cg']:
clf_i = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000, tol=1e-7,
)
clf_w = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000, tol=1e-7,
fit_intercept=False)
clf_i.fit(X, y)
clf_w.fit(X, y)
assert_array_equal(clf_i.coef_.shape, (n_classes, n_features))
assert_array_equal(clf_w.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and the other solvers
assert_almost_equal(ref_i.coef_, clf_i.coef_, decimal=3)
assert_almost_equal(ref_w.coef_, clf_w.coef_, decimal=3)
assert_almost_equal(ref_i.intercept_, clf_i.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
clf_path = LogisticRegressionCV(solver=solver, max_iter=2000, tol=1e-6,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, ref_i.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, ref_i.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5, random_state=0)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5, random_state=0)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_saga_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5, random_state=0)
clf = LogisticRegressionCV(solver='saga')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_l1():
# Because liblinear penalizes the intercept and saga does not, we do not
# fit the intercept to make it possible to compare the coefficients of
# the two models at convergence.
rng = np.random.RandomState(42)
n_samples = 50
X, y = make_classification(n_samples=n_samples, n_features=20,
random_state=0)
X_noise = rng.normal(size=(n_samples, 3))
X_constant = np.ones(shape=(n_samples, 2))
X = np.concatenate((X, X_noise, X_constant), axis=1)
lr_liblinear = LogisticRegression(penalty="l1", C=1.0, solver='liblinear',
fit_intercept=False,
tol=1e-10)
lr_liblinear.fit(X, y)
lr_saga = LogisticRegression(penalty="l1", C=1.0, solver='saga',
fit_intercept=False,
max_iter=1000, tol=1e-10)
lr_saga.fit(X, y)
assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_)
# Noise and constant features should be regularized to zero by the l1
# penalty
assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5))
assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5))
def test_logreg_l1_sparse_data():
# Because liblinear penalizes the intercept and saga does not, we do not
# fit the intercept to make it possible to compare the coefficients of
# the two models at convergence.
rng = np.random.RandomState(42)
n_samples = 50
X, y = make_classification(n_samples=n_samples, n_features=20,
random_state=0)
X_noise = rng.normal(scale=0.1, size=(n_samples, 3))
X_constant = np.zeros(shape=(n_samples, 2))
X = np.concatenate((X, X_noise, X_constant), axis=1)
X[X < 1] = 0
X = sparse.csr_matrix(X)
lr_liblinear = LogisticRegression(penalty="l1", C=1.0, solver='liblinear',
fit_intercept=False,
tol=1e-10)
lr_liblinear.fit(X, y)
lr_saga = LogisticRegression(penalty="l1", C=1.0, solver='saga',
fit_intercept=False,
max_iter=1000, tol=1e-10)
lr_saga.fit(X, y)
assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_)
# Noise and constant features should be regularized to zero by the l1
# penalty
assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5))
assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5))
# Check that solving on the sparse and dense data yield the same results
lr_saga_dense = LogisticRegression(penalty="l1", C=1.0, solver='saga',
fit_intercept=False,
max_iter=1000, tol=1e-10)
lr_saga_dense.fit(X.toarray(), y)
assert_array_almost_equal(lr_saga.coef_, lr_saga_dense.coef_)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilities using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilities using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag', 'saga', 'lbfgs']
for max_iter in range(1, 5):
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'liblinear' and multi_class == 'multinomial':
continue
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
multi_class=multi_class,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'saga', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag', 'saga'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag', 'saga', 'lbfgs']
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
def test_saga_vs_liblinear():
iris = load_iris()
X, y = iris.data, iris.target
X = np.concatenate([X] * 10)
y = np.concatenate([y] * 10)
X_bin = X[y <= 1]
y_bin = y[y <= 1] * 2 - 1
X_sparse, y_sparse = make_classification(n_samples=50, n_features=20,
random_state=0)
X_sparse = sparse.csr_matrix(X_sparse)
for (X, y) in ((X_bin, y_bin), (X_sparse, y_sparse)):
for penalty in ['l1', 'l2']:
n_samples = X.shape[0]
# alpha=1e-3 is time consuming
for alpha in np.logspace(-1, 1, 3):
saga = LogisticRegression(
C=1. / (n_samples * alpha),
solver='saga',
multi_class='ovr',
max_iter=200,
fit_intercept=False,
penalty=penalty, random_state=0, tol=1e-24)
liblinear = LogisticRegression(
C=1. / (n_samples * alpha),
solver='liblinear',
multi_class='ovr',
max_iter=200,
fit_intercept=False,
penalty=penalty, random_state=0, tol=1e-24)
saga.fit(X, y)
liblinear.fit(X, y)
# Convergence for alpha=1e-3 is very slow
assert_array_almost_equal(saga.coef_, liblinear.coef_, 3)
def test_dtype_match():
# Test that np.float32 input data is not cast to np.float64 when possible
X_32 = np.array(X).astype(np.float32)
y_32 = np.array(Y1).astype(np.float32)
X_64 = np.array(X).astype(np.float64)
y_64 = np.array(Y1).astype(np.float64)
X_sparse_32 = sp.csr_matrix(X, dtype=np.float32)
for solver in ['newton-cg']:
for multi_class in ['ovr', 'multinomial']:
# Check type consistency
lr_32 = LogisticRegression(solver=solver, multi_class=multi_class)
lr_32.fit(X_32, y_32)
assert_equal(lr_32.coef_.dtype, X_32.dtype)
# check consistency with sparsity
lr_32_sparse = LogisticRegression(solver=solver,
multi_class=multi_class)
lr_32_sparse.fit(X_sparse_32, y_32)
assert_equal(lr_32_sparse.coef_.dtype, X_sparse_32.dtype)
# Check accuracy consistency
lr_64 = LogisticRegression(solver=solver, multi_class=multi_class)
lr_64.fit(X_64, y_64)
assert_equal(lr_64.coef_.dtype, X_64.dtype)
assert_almost_equal(lr_32.coef_, lr_64.coef_.astype(np.float32))
| bsd-3-clause |
DavidTingley/ephys-processing-pipeline | installation/klustaviewa-0.3.0/build/lib.linux-x86_64-2.7/kwiklib/dataio/tests/test_kwikloader.py | 2 | 6909 | """Unit tests for loader module."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
from collections import Counter
import numpy as np
import numpy.random as rnd
import pandas as pd
import shutil
from nose.tools import with_setup
from mock_data import setup as setup_klusters
from mock_data import (teardown, TEST_FOLDER, nspikes, nclusters, nsamples,
nchannels, fetdim)
from kwiklib.dataio import (KwikLoader, Experiment, klusters_to_kwik,
check_dtype, check_shape, get_array, select, get_indices)
# -----------------------------------------------------------------------------
# Fixtures
# -----------------------------------------------------------------------------
def setup():
setup_klusters()
klusters_to_kwik(filename='test', dir=TEST_FOLDER)
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def test_kwik_loader_1():
# Open the mock data.
dir = TEST_FOLDER
xmlfile = os.path.join(dir, 'test.xml')
l = KwikLoader(filename=xmlfile)
# Get full data sets.
features = l.get_features()
# features_some = l.get_some_features()
masks = l.get_masks()
waveforms = l.get_waveforms()
clusters = l.get_clusters()
spiketimes = l.get_spiketimes()
nclusters = len(Counter(clusters))
# probe = l.get_probe()
cluster_colors = l.get_cluster_colors()
cluster_groups = l.get_cluster_groups()
group_colors = l.get_group_colors()
group_names = l.get_group_names()
cluster_sizes = l.get_cluster_sizes()
# Check the shape of the data sets.
# ---------------------------------
assert check_shape(features, (nspikes, nchannels * fetdim + 1))
# assert features_some.shape[1] == nchannels * fetdim + 1
assert check_shape(masks, (nspikes, nchannels * fetdim + 1))
assert check_shape(waveforms, (nspikes, nsamples, nchannels))
assert check_shape(clusters, (nspikes,))
assert check_shape(spiketimes, (nspikes,))
# assert check_shape(probe, (nchannels, 2))
assert check_shape(cluster_colors, (nclusters,))
assert check_shape(cluster_groups, (nclusters,))
assert check_shape(group_colors, (4,))
assert check_shape(group_names, (4,))
assert check_shape(cluster_sizes, (nclusters,))
# Check the data type of the data sets.
# -------------------------------------
assert check_dtype(features, np.float32)
assert check_dtype(masks, np.float32)
# HACK: Panel has no dtype(s) attribute
# assert check_dtype(waveforms, np.float32)
assert check_dtype(clusters, np.int32)
assert check_dtype(spiketimes, np.float64)
# assert check_dtype(probe, np.float32)
assert check_dtype(cluster_colors, np.int32)
assert check_dtype(cluster_groups, np.int32)
assert check_dtype(group_colors, np.int32)
assert check_dtype(group_names, object)
assert check_dtype(cluster_sizes, np.int32)
l.close()
def test_kwik_loader_control():
# Open the mock data.
dir = TEST_FOLDER
xmlfile = os.path.join(dir, 'test.xml')
l = KwikLoader(filename=xmlfile)
# Take all spikes in cluster 3.
spikes = get_indices(l.get_clusters(clusters=3))
# Put them in cluster 4.
l.set_cluster(spikes, 4)
spikes_new = get_indices(l.get_clusters(clusters=4))
# Ensure all spikes in old cluster 3 are now in cluster 4.
assert np.all(np.in1d(spikes, spikes_new))
# Change cluster groups.
clusters = [2, 3, 4]
group = 0
l.set_cluster_groups(clusters, group)
groups = l.get_cluster_groups(clusters)
assert np.all(groups == group)
# Change cluster colors.
clusters = [2, 3, 4]
color = 12
l.set_cluster_colors(clusters, color)
colors = l.get_cluster_colors(clusters)
assert np.all(colors == color)
# Change group name.
group = 0
name = l.get_group_names(group)
name_new = 'Noise new'
assert name == 'Noise'
l.set_group_names(group, name_new)
assert l.get_group_names(group) == name_new
# Change group color.
groups = [1, 2]
colors = l.get_group_colors(groups)
color_new = 10
l.set_group_colors(groups, color_new)
assert np.all(l.get_group_colors(groups) == color_new)
# Add cluster and group.
spikes = get_indices(l.get_clusters(clusters=3))[:10]
# Create new group 100.
l.add_group(100, 'New group', 10)
# Create new cluster 10000 and put it in group 100.
l.add_cluster(10000, 100, 10)
# Put some spikes in the new cluster.
l.set_cluster(spikes, 10000)
clusters = l.get_clusters(spikes=spikes)
assert np.all(clusters == 10000)
groups = l.get_cluster_groups(10000)
assert groups == 100
l.set_cluster(spikes, 2)
# Remove the new cluster and group.
l.remove_cluster(10000)
l.remove_group(100)
assert np.all(~np.in1d(10000, l.get_clusters()))
assert np.all(~np.in1d(100, l.get_cluster_groups()))
l.close()
@with_setup(setup)
def test_kwik_save():
"""WARNING: this test should occur at the end of the module since it
changes the mock data sets."""
# Open the mock data.
dir = TEST_FOLDER
xmlfile = os.path.join(dir, 'test.xml')
l = KwikLoader(filename=xmlfile)
clusters = l.get_clusters()
cluster_colors = l.get_cluster_colors()
cluster_groups = l.get_cluster_groups()
group_colors = l.get_group_colors()
group_names = l.get_group_names()
# Set clusters.
indices = get_indices(clusters)
l.set_cluster(indices[::2], 2)
l.set_cluster(indices[1::2], 3)
# Set cluster info.
cluster_indices = l.get_clusters_unique()
l.set_cluster_colors(cluster_indices[::2], 10)
l.set_cluster_colors(cluster_indices[1::2], 20)
l.set_cluster_groups(cluster_indices[::2], 1)
l.set_cluster_groups(cluster_indices[1::2], 0)
# Save.
l.remove_empty_clusters()
l.save()
clusters = l.get_clusters()
cluster_colors = l.get_cluster_colors()
cluster_groups = l.get_cluster_groups()
group_colors = l.get_group_colors()
group_names = l.get_group_names()
assert np.all(clusters[::2] == 2)
assert np.all(clusters[1::2] == 3)
assert np.all(cluster_colors[::2] == 10)
assert np.all(cluster_colors[1::2] == 20)
print cluster_groups
assert np.all(cluster_groups[::2] == 1)
assert np.all(cluster_groups[1::2] == 0)
l.close()
| gpl-3.0 |
bhargav/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 25 | 45729 | from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_greater(np.mean(rgs.predict(iris.data).round() == iris.target),
0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity',
include_self=True)
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity',
include_self=True)
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
def custom_metric(x1, x2):
return np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto',
metric=custom_metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute',
metric=custom_metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_same_knn_parallel():
X, y = datasets.make_classification(n_samples=30, n_features=5,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
def check_same_knn_parallel(algorithm):
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
graph = clf.kneighbors_graph(X_test, mode='distance').toarray()
clf.set_params(n_jobs=3)
clf.fit(X_train, y_train)
y_parallel = clf.predict(X_test)
dist_parallel, ind_parallel = clf.kneighbors(X_test)
graph_parallel = \
clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y, y_parallel)
assert_array_almost_equal(dist, dist_parallel)
assert_array_equal(ind, ind_parallel)
assert_array_almost_equal(graph, graph_parallel)
for algorithm in ALGORITHMS:
yield check_same_knn_parallel, algorithm
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
imaculate/scikit-learn | sklearn/utils/validation.py | 15 | 25983 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..utils.fixes import signature
from .deprecation import deprecated
from ..exceptions import DataConversionWarning as _DataConversionWarning
from ..exceptions import NonBLASDotWarning as _NonBLASDotWarning
from ..exceptions import NotFittedError as _NotFittedError
@deprecated("DataConversionWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class DataConversionWarning(_DataConversionWarning):
pass
@deprecated("NonBLASDotWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class NonBLASDotWarning(_NonBLASDotWarning):
pass
@deprecated("NotFittedError has been moved into the sklearn.exceptions module."
" It will not be available here from version 0.19")
class NotFittedError(_NotFittedError):
pass
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', _NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent representation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2D numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
if ensure_min_samples >= 2:
raise ValueError("%s expects at least 2 samples provided "
"in a 2 dimensional array-like input"
% estimator_name)
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, shape_repr, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, shape_repr, ensure_min_features,
context))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s%s."
% (dtype_orig, array.dtype, context))
warnings.warn(msg, _DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2d and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X. This parameter
does not influence whether y can have np.inf or np.nan values.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
_DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
# FIXME NotFittedError_ --> NotFittedError in 0.19
raise _NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
costypetrisor/scikit-learn | sklearn/linear_model/tests/test_base.py | 120 | 10082 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
| bsd-3-clause |
laurendiperna/data-science-from-scratch | code/linear_algebra.py | 49 | 3637 | # -*- coding: iso-8859-15 -*-
from __future__ import division # want 3 / 2 == 1.5
import re, math, random # regexes, math functions, random numbers
import matplotlib.pyplot as plt # pyplot
from collections import defaultdict, Counter
from functools import partial
#
# functions for working with vectors
#
def vector_add(v, w):
"""adds two vectors componentwise"""
return [v_i + w_i for v_i, w_i in zip(v,w)]
def vector_subtract(v, w):
"""subtracts two vectors componentwise"""
return [v_i - w_i for v_i, w_i in zip(v,w)]
def vector_sum(vectors):
return reduce(vector_add, vectors)
def scalar_multiply(c, v):
return [c * v_i for v_i in v]
def vector_mean(vectors):
"""compute the vector whose i-th element is the mean of the
i-th elements of the input vectors"""
n = len(vectors)
return scalar_multiply(1/n, vector_sum(vectors))
def dot(v, w):
"""v_1 * w_1 + ... + v_n * w_n"""
return sum(v_i * w_i for v_i, w_i in zip(v, w))
def sum_of_squares(v):
"""v_1 * v_1 + ... + v_n * v_n"""
return dot(v, v)
def magnitude(v):
return math.sqrt(sum_of_squares(v))
def squared_distance(v, w):
return sum_of_squares(vector_subtract(v, w))
def distance(v, w):
return math.sqrt(squared_distance(v, w))
#
# functions for working with matrices
#
def shape(A):
num_rows = len(A)
num_cols = len(A[0]) if A else 0
return num_rows, num_cols
def get_row(A, i):
return A[i]
def get_column(A, j):
return [A_i[j] for A_i in A]
def make_matrix(num_rows, num_cols, entry_fn):
"""returns a num_rows x num_cols matrix
whose (i,j)-th entry is entry_fn(i, j)"""
return [[entry_fn(i, j) for j in range(num_cols)]
for i in range(num_rows)]
def is_diagonal(i, j):
"""1's on the 'diagonal', 0's everywhere else"""
return 1 if i == j else 0
identity_matrix = make_matrix(5, 5, is_diagonal)
# user 0 1 2 3 4 5 6 7 8 9
#
friendships = [[0, 1, 1, 0, 0, 0, 0, 0, 0, 0], # user 0
[1, 0, 1, 1, 0, 0, 0, 0, 0, 0], # user 1
[1, 1, 0, 1, 0, 0, 0, 0, 0, 0], # user 2
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0], # user 3
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0], # user 4
[0, 0, 0, 0, 1, 0, 1, 1, 0, 0], # user 5
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 6
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 7
[0, 0, 0, 0, 0, 0, 1, 1, 0, 1], # user 8
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]] # user 9
#####
# DELETE DOWN
#
def matrix_add(A, B):
if shape(A) != shape(B):
raise ArithmeticError("cannot add matrices with different shapes")
num_rows, num_cols = shape(A)
def entry_fn(i, j): return A[i][j] + B[i][j]
return make_matrix(num_rows, num_cols, entry_fn)
def make_graph_dot_product_as_vector_projection(plt):
v = [2, 1]
w = [math.sqrt(.25), math.sqrt(.75)]
c = dot(v, w)
vonw = scalar_multiply(c, w)
o = [0,0]
plt.arrow(0, 0, v[0], v[1],
width=0.002, head_width=.1, length_includes_head=True)
plt.annotate("v", v, xytext=[v[0] + 0.1, v[1]])
plt.arrow(0 ,0, w[0], w[1],
width=0.002, head_width=.1, length_includes_head=True)
plt.annotate("w", w, xytext=[w[0] - 0.1, w[1]])
plt.arrow(0, 0, vonw[0], vonw[1], length_includes_head=True)
plt.annotate(u"(v•w)w", vonw, xytext=[vonw[0] - 0.1, vonw[1] + 0.1])
plt.arrow(v[0], v[1], vonw[0] - v[0], vonw[1] - v[1],
linestyle='dotted', length_includes_head=True)
plt.scatter(*zip(v,w,o),marker='.')
plt.axis('equal')
plt.show()
| unlicense |
exepulveda/swfc | python/clustering_2d.py | 1 | 4721 | import sys
import random
import logging
import collections
import math
import sys
import argparse
sys.path += ['..']
import clusteringlib as cl
import numpy as np
import scipy.stats
import clustering_ga
from scipy.spatial.distance import pdist
from sklearn.cluster import KMeans
from cluster_utils import fix_weights
CHECK_VALID = False
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-t', "--target", action='store_true', dest="target", required=False, help="use target distance on recovery")
parser.add_argument('-f', "--force", action='store_true', dest="force", required=False, help="force weights")
from case_study_2d import attributes,setup_case_study,setup_distances
if __name__ == "__main__":
args = parser.parse_args()
locations,ore_indices,locations_ore,data_ore,min_values,max_values,scale,var_types,categories = setup_case_study()
data = data_ore.copy()
N,ND = data.shape
m = 2.0
force=None
#targets = np.asfortranarray(np.percentile(values[:,-1], [15,50,85]),dtype=np.float32)
targets = None
print('targets',targets)
seed = 1634120
verbose=1
lambda_value = 0.20
ngen=100
npop=200
cxpb=0.8
mutpb=0.4
stop_after=10
filename_template = "../results/final_2d_{tag}_fc_{nc}.csv"
fc_clusters_all = np.empty(len(locations))
NC = 4
np.random.seed(seed)
random.seed(seed)
cl.utils.set_seed(seed)
setup_distances(scale,var_types,categories,targets=targets)
#initial centroids
kmeans_method = KMeans(n_clusters=NC,random_state=seed)
kmeans_method.fit(data)
current_centroids = np.asfortranarray(np.empty((NC,ND)))
current_centroids[:,:] = kmeans_method.cluster_centers_
for i in range(ND):
current_centroids[:,i] = np.clip(current_centroids[:,i],min_values[i],max_values[i])
#initial weights are uniform
weights = np.asfortranarray(np.ones((NC,ND),dtype=np.float32)/ ND)
if args.target:
for c in range(NC):
weights[c,:] = fix_weights(weights[c,:],force=force)
for k in range(100):
best_centroids,best_u,best_energy_centroids,best_jm,current_temperature,evals = clustering_ga.optimize_centroids(
data,
current_centroids,
weights,
m,
lambda_value,
var_types,
{},
ngen=ngen,npop=npop,cxpb=cxpb,mutpb=mutpb,stop_after=stop_after,
min_values = min_values,
max_values = max_values,
verbose=verbose)
#print("centroids",best_centroids,best_energy_centroids,"jm",best_jm)
u = best_u
N,NC = u.shape
clusters = np.argmax(u,axis=1)
centroids = best_centroids.copy()
#print("centroids",centroids)
#print("u",u)
counter = collections.Counter(clusters)
#print("number of clusters: ",counter.most_common())
best_weights,best_u,best_energy_weights,evals = clustering_ga.optimize_weights(
data,
centroids,
weights,
m,
lambda_value,
ngen=ngen,npop=npop,cxpb=cxpb,mutpb=mutpb,stop_after=stop_after,
force=force,
verbose=verbose)
clusters = np.argmax(best_u,axis=1)
weights = best_weights.copy()
current_centroids = best_centroids.copy()
#print(lambda_value,k,best_energy_centroids,best_energy_weights,"jm",best_jm)
#print('2D FC iteration',k,best_energy_centroids,best_energy_weights)
if abs(best_energy_centroids - best_energy_weights) < 1e-2:
break
centroid = np.asfortranarray(best_centroids,dtype=np.float32)
weights = np.asfortranarray(best_weights,dtype=np.float32)
clusters = np.asfortranarray(clusters,dtype=np.int8)
ret_fc = cl.clustering.dbi_index(centroid,data,clusters,weights)
ret_sill= cl.clustering.silhouette_index(data,clusters,weights)
print("2D FC Index:",NC,ret_fc,ret_sill,sep=',')
cl.distances.reset()
#save data
np.savetxt(filename_template.format(tag='clusters',nc=NC),clusters,delimiter=",",fmt="%.4f")
np.savetxt(filename_template.format(tag='centroids',nc=NC),current_centroids,delimiter=",",fmt="%.4f")
np.savetxt(filename_template.format(tag='u',nc=NC),best_u,delimiter=",",fmt="%.4f")
np.savetxt(filename_template.format(tag='weights',nc=NC),best_weights,delimiter=",",fmt="%.4f")
| gpl-3.0 |
powellb/seapy | seapy/model/grid.py | 1 | 30616 | #!/usr/bin/env python
"""
grid
This module handles general model grid information, whether from ROMS or
other models; however, it is mostly geared towards ROMS
Written by Brian Powell on 10/09/13
Copyright (c)2010--2021 University of Hawaii under the MIT-License.
**Examples**
>>> grid = seapy.model.asgrid("grid_file.nc")
"""
import os
import re
import seapy
import numpy as np
import scipy.spatial
import matplotlib.path
import netCDF4
from warnings import warn
def asgrid(grid):
"""
Return either an existing or new grid object. This decorator will ensure that
the variable being used is a seapy.model.grid. If it is not, it will attempt
to construct a new grid with the variable passed.
Parameters
----------
grid: string, list, netCDF4 Dataset, or model.seapy.grid
Input variable to cast. If it is already a grid, it will return it;
otherwise, it attempts to construct a new grid.
Returns
-------
seapy.model.grid
"""
if grid is None:
raise AttributeError("No grid was specified")
if isinstance(grid, seapy.model.grid):
return grid
if isinstance(grid, netCDF4._netCDF4.Dataset):
return seapy.model.grid(nc=grid)
else:
return seapy.model.grid(filename=grid)
class grid:
def __init__(self, filename=None, nc=None, lat=None, lon=None, z=None,
depths=True, cgrid=False):
"""
Class to wrap around a numerical model grid for oceanography.
It attempts to track latitude, longitude, z, and other
parameters. A grid can be constructed by specifying a filename or
by specifying lat, lon, and z.
Parameters
----------
filename: filename or list, optional
name to load to build data structure
or
nc: netCDF4 Dataset, optional
If a file is already open, pass the reference.
lat: ndarray,
latitude values of grid
lon: ndarray,
longitude values of grid
z : ndarray,
z-level depths of grid
Options
-------
depths: ndarray,
Set the depths of the grid [True]
cgrid: bool,
Whether the grid is an Arakawa C-Grid [False]
"""
self.filename = filename
self.cgrid = cgrid
self._nc = nc
if (self.filename or self._nc) is not None:
self._initfile()
self._isroms = True if \
(len(list(set(("s_rho", "pm", "pn", "theta_s", "theta_b",
"vtransform", "vstretching")).intersection(
set(self.__dict__)))) > 0) else False
self.cgrid = True if self._isroms else self.cgrid
else:
self._nc = None
self._isroms = False
self.lat_rho = lat
self.lon_rho = lon
self.z = z
self.cgrid = False
self._verify_shape()
if depths:
self.set_dims()
self.set_depth()
self.set_thickness()
self.set_mask_h()
self.ijinterp = None
self.llinterp = None
def _initfile(self):
"""
Using an input file, try to load as much information
as can be found in the given file.
Parameters
----------
None
Returns
-------
None : sets attributes in grid
"""
# Define a dictionary to go through and convert netcdf variables
# to internal class attributes
gvars = {"lat_rho": ["lat_rho", "lat", "latitude", "y_rho", "geolat_t"],
"lon_rho": ["lon_rho", "lon", "longitude", "x_rho", "geolon_t"],
"lat_u": ["lat_u", "y_u", "geolat_u"],
"lon_u": ["lon_u", "x_u", "geolon_u"],
"lat_v": ["lat_v", "y_v", "geolat_u"],
"lon_v": ["lon_v", "x_v", "geolon_u"],
"mask_rho": ["mask_rho", "mask"],
"mask_u": ["mask_u"],
"mask_v": ["mask_v"],
"angle": ["angle"],
"h": ["h"],
"n": ["n"],
"theta_s": ["theta_s"],
"theta_b": ["theta_b"],
"tcline": ["tcline"],
"hc": ["hc"],
"vtransform": ["vtransform"],
"vstretching": ["vstretching"],
"s_rho": ["s_rho"],
"cs_r": ["cs_r"],
"f": ["f"],
"pm": ["pm"],
"pn": ["pn"],
"z": ["z", "depth", "lev", "st_ocean"],
"wtype_grid": ["mask_rho"],
"rdrag": ["rdrag"],
"rdrag2": ["rdrag2"],
"diff_factor": ["diff_factor"],
"visc_factor": ["visc_factor"]
}
# Open the file
close = False
if self._nc is None:
close = True
self._nc = seapy.netcdf(self.filename)
try:
self.name = re.search("[^\.]*",
os.path.basename(self.filename)).group()
except:
self.name = "untitled"
self.key = {}
ncvars = {v.lower(): v for v in self._nc.variables.keys()}
for var in gvars:
for inp in gvars[var]:
if inp in ncvars:
self.key[var] = inp
self.__dict__[var] = self._nc.variables[ncvars[inp]][:]
break
if close:
# Close the file
self._nc.close()
self._nc = None
def _verify_shape(self):
"""
Verify the dimensionality of the system, create variables that
can be generated from the others if they aren't already loaded
Parameters
----------
None
Returns
-------
None : sets attributes in grid
"""
# Check that we have the minimum required data
if ("lat_rho" or "lon_rho") not in self.__dict__:
raise AttributeError(
"grid does not have attribute lat_rho or lon_rho")
# Check that it is formatted into 2-D
self.spatial_dims = self.lat_rho.ndim
if self.lat_rho.ndim == 1 and self.lon_rho.ndim == 1:
[self.lon_rho, self.lat_rho] = np.meshgrid(self.lon_rho,
self.lat_rho)
# Compute the dimensions
self.ln = int(self.lat_rho.shape[0])
self.lm = int(self.lat_rho.shape[1])
self.shape = (self.ln, self.lm)
if self.cgrid:
self.shape_u = (self.ln, self.lm - 1)
self.shape_v = (self.ln - 1, self.lm)
else:
self.shape_u = self.shape_v = self.shape
def __repr__(self):
return "{:s}: {:d}x{:d}x{:d}".format("C-Grid" if self.cgrid
else "A-Grid", self.n, self.ln, self.lm)
def __str__(self):
return "\n".join((self.filename if self.filename else "Constructed",
"{:d}x{:d}x{:d}: {:s} with {:s}".format(
self.n, self.ln, self.lm,
"C-Grid" if self.cgrid else "A-Grid",
"S-level" if self._isroms else "Z-Level"),
"Available: " + ",".join(sorted(
list(self.__dict__.keys())))))
def east(self):
"""
Test the longitude convention of the grid. If there are negative
values, then east is False. If there are only positive values then
assume that east is True.
Parameters
----------
None
Returns
-------
east : bool,
True - The convention is all positive values to the east
False - The convention is positive values east and negative west
"""
return np.min(self.lon_rho > 0)
def set_east(self, east=False):
"""
When working with various other grids, we may want the longitudes
to be consistent. This can be changed by setting the east to be
either True or False. If False, then longitudes will be positive east
and negative west. If True, only positive east.
Parameters
----------
east : bool,
True - all longitudes are positive
False - longitudes are positive east and negative west
Returns
-------
None : sets attributes in grid
"""
try:
if east:
self.lon_rho[self.lon_rho < 0] += 360.0
self.lon_u[self.lon_u < 0] += 360.0
self.lon_v[self.lon_v < 0] += 360.0
else:
self.lon_rho[self.lon_rho > 180] -= 360.0
self.lon_u[self.lon_u > 180] -= 360.0
self.lon_v[self.lon_v > 180] -= 360.0
except:
pass
def set_dims(self):
"""
Compute the dimension attributes of the grid based upon the information provided.
Parameters
----------
None
Returns
-------
None : sets attributes in grid
"""
# If C-Grid, set the dimensions for consistency
if self.cgrid:
self.eta_rho = self.ln
self.eta_u = self.ln
self.eta_v = self.ln - 1
self.xi_rho = self.lm
self.xi_u = self.lm - 1
self.xi_v = self.lm
# Set the number of layers
if "n" not in self.__dict__:
if "s_rho" in self.__dict__:
self.n = int(self.s_rho.size)
elif "z" in self.__dict__:
self.n = int(self.z.size)
else:
self.n = 1
self.z = np.zeros(self.lat_rho.shape)
else:
self.n = int(self.n)
# Generate the u- and v-grids
if ("lat_u" or "lon_u") not in self.__dict__:
if self.cgrid:
self.lat_u = 0.5 * \
(self.lat_rho[:, 1:] - self.lat_rho[:, 0:-1])
self.lon_u = 0.5 * \
(self.lon_rho[:, 1:] - self.lon_rho[:, 0:-1])
else:
self.lat_u = self.lat_rho
self.lon_u = self.lon_rho
if ("lat_v" or "lon_v") not in self.__dict__:
if self.cgrid:
self.lat_v = 0.5 * \
(self.lat_rho[1:, :] - self.lat_rho[0:-1, :])
self.lon_v = 0.5 * \
(self.lon_rho[1:, :] - self.lon_rho[0:-1, :])
else:
self.lat_v = self.lat_rho
self.lon_v = self.lon_rho
if "mask_rho" in self.__dict__:
if "mask_u" not in self.__dict__:
if self.cgrid:
self.mask_u = self.mask_rho[:, 1:] * self.mask_rho[:, 0:-1]
else:
self.mask_u = self.mask_rho
if "mask_v" not in self.__dict__:
if self.cgrid:
self.mask_v = self.mask_rho[1:, :] * self.mask_rho[0:-1, :]
else:
self.mask_v = self.mask_rho
# Compute the resolution
if "pm" in self.__dict__:
self.dm = 1.0 / self.pm
else:
self.dm = np.ones(self.lon_rho.shape, dtype=np.float32)
self.dm[:, 0:-1] = seapy.earth_distance(self.lon_rho[:, 1:],
self.lat_rho[:, 1:],
self.lon_rho[:, 0:-1],
self.lat_rho[:, 0:-1]).astype(np.float32)
self.dm[:, -1] = self.dm[:, -2]
self.pm = 1.0 / self.dm
if "pn" in self.__dict__:
self.dn = 1.0 / self.pn
else:
self.dn = np.ones(self.lat_rho.shape, dtype=np.float32)
self.dn[0:-1, :] = seapy.earth_distance(self.lon_rho[1:, :],
self.lat_rho[1:, :],
self.lon_rho[0:-1, :],
self.lat_rho[0:-1, :]).astype(np.float32)
self.dn[-1, :] = self.dn[-2, :]
self.pn = 1.0 / self.dn
# Compute the Coriolis
if "f" not in self.__dict__:
omega = 2 * np.pi * seapy.secs2day
self.f = 2 * omega * np.sin(np.radians(self.lat_rho))
# Set the grid index coordinates
self.I, self.J = np.meshgrid(
np.arange(0, self.lm), np.arange(0, self.ln))
def set_mask_h(self, fld=None):
"""
Compute the mask and h array from a z-level model
Parameters
----------
fld : np.array
3D array of values (such as temperature) to analyze to determine
where the bottom and land lie
Returns
-------
None : sets mask and h attributes in grid
"""
if hasattr(self, "mask_rho") or self.cgrid:
return
if fld is None and self.filename is not None:
if self._nc is None:
self._nc = seapy.netcdf(self.filename)
# Try to load a field from the file
for f in ["temp", "temperature", "water_temp", "fed"]:
if f in self._nc.variables:
fld = self._nc.variables[f][0, :, :, :]
fld = np.ma.array(fld, mask=np.isnan(fld))
break
# Close the file
self._nc.close()
# If we don't have a field to examine, then we cannot compute the
# mask and bathymetry
if fld is None:
warn("Missing 3D field to evaluate.")
return
# Next, we go over the field to examine the depths and mask
self.h = np.zeros(self.lat_rho.shape)
self.mask_rho = np.zeros(self.lat_rho.shape)
for k in range(self.z.size):
water = np.nonzero(np.logical_not(fld.mask[k, :, :]))
self.h[water] = self.z[k]
if k == 0:
self.mask_rho[water] = 1.0
self.mask_u = self.mask_v = self.mask_rho
def set_depth(self, force=False):
"""
Compute the depth of each cell for the model grid.
Parameters
----------
force : boolean, default False
If True, force the update of the depths
Returns
-------
None : sets depth attributes in grid
"""
try:
if self._isroms:
if "s_rho" not in self.__dict__ or \
"cs_r" not in self.__dict__ or force:
self.s_rho, self.cs_r = seapy.roms.stretching(
self.vstretching, self.theta_s, self.theta_b,
self.hc, self.n)
self.depth_rho = seapy.roms.depth(
self.vtransform, self.h, self.hc, self.s_rho, self.cs_r)
self.depth_u = seapy.model.rho2u(self.depth_rho).filled(0)
self.depth_v = seapy.model.rho2v(self.depth_rho).filled(0)
else:
d = self.z.copy()
l = np.nonzero(d > 0)
d[l] = -d[l]
if self.n > 1:
self.depth_rho = np.kron(np.kron(
d, np.ones(self.lon_rho.shape[1])),
np.ones(self.lon_rho.shape[0])).reshape(
[self.z.size, self.lon_rho.shape[0],
self.lon_rho.shape[1]])
else:
self.depth_rho = self.z
if self.cgrid:
self.depth_u = seapy.model.rho2u(self.depth_rho).filled(0)
self.depth_v = seapy.model.rho2v(self.depth_rho).filled(0)
else:
self.depth_u = self.depth_rho
self.depth_v = self.depth_rho
except (AttributeError, ValueError):
warn("could not compute grid depths.")
pass
def set_thickness(self):
"""
Compute the thickness of each cell for the model grid.
Parameters
----------
None
Returns
-------
None : sets thick attributes in grid
"""
if "n" not in self.__dict__:
self.set_dims()
if self.n == 1:
return
try:
if self._isroms:
s_w, cs_w = seapy.roms.stretching(
self.vstretching, self.theta_s, self.theta_b, self.hc,
self.n, w_grid=True)
self.thick_rho = seapy.roms.thickness(
self.vtransform, self.h, self.hc, s_w, cs_w)
self.thick_u = seapy.model.rho2u(self.thick_rho)
self.thick_v = seapy.model.rho2v(self.thick_rho)
else:
d = np.abs(self.z.copy())
w = d * 0
# Check which way the depths are going
if d[0] < d[-1]:
w[0] = d[0]
w[1:] = d[1:] - d[0:-1]
else:
w[-1] = d[-1]
w[0:-1] = d[0:-1] - d[1:]
self.thick_rho = np.kron(np.kron(w,
np.ones(self.lon_rho.shape[1])),
np.ones(self.lon_rho.shape[0])).reshape(
[self.z.size, self.lon_rho.shape[0],
self.lon_rho.shape[1]])
if self.cgrid:
self.thick_u = seapy.model.rho2u(self.thick_rho)
self.thick_v = seapy.model.rho2v(self.thick_rho)
else:
self.thick_u = self.thick_rho
self.thick_v = self.thick_rho
except AttributeError:
warn("could not compute grid thicknesses.")
pass
def plot_trace(self, basemap=None, **kwargs):
"""
Trace the boundary of the grid onto a map projection
Parameters
----------
basemap: basemap instance
The basemap instance to use for drawing
**kwargs: optional
Arguments to pass to the plot routine
Returns
-------
None
"""
lon = np.concatenate([self.lon_rho[0, :], self.lon_rho[:, -1],
self.lon_rho[-1, ::-1], self.lon_rho[::-1, 0]])
lat = np.concatenate([self.lat_rho[0, :], self.lat_rho[:, -1],
self.lat_rho[-1, ::-1], self.lat_rho[::-1, 0]])
if basemap:
x, y = basemap(lon, lat)
basemap.plot(x, y, **kwargs)
else:
from matplotlib import pyplot
pyplot.plot(lon, lat, **kwargs)
def plot_depths(self, row=None, col=None, ax=None):
"""
Plot the depths of a model grid along a row or column transect.
If the bathymetry is known, it is plotted also.
Parameters
----------
row : int, optional
The row number to plot
col : int, optional
The column number to plot
ax : matplotlib.axes, optional
The axes to use for the figure
Returns
-------
ax : matplotlib.axes
The axes containing the plot
"""
import matplotlib.pyplot as plt
# Create the axes if we don't have any
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
# ax.set_bg_color('darkseagreen')
# Get the data
if row:
sz = np.s_[:, row, :]
s = np.s_[row, :]
x = self.lon_rho[s]
label = "Longitude"
elif col:
sz = np.s_[:, :, col]
s = np.s_[:, col]
x = self.lat_rho[s]
label = "Latitude"
else:
warn("You must specify a row or column")
return
# If it is ROMS, we should plot the top and bottom of the cells
if self._isroms:
sr, csr = seapy.roms.stretching(
self.vstretching, self.theta_s, self.theta_b,
self.hc, self.n, w_grid=True)
dep = np.ma.masked_where(seapy.adddim(self.mask_rho[s],
self.n + 1) == 0,
seapy.roms.depth(self.vtransform,
self.h[s], self.hc,
sr, csr,
w_grid=True))
else:
dep = np.ma.masked_where(seapy.adddim(self.mask_rho[s],
self.n) == 0,
self.depth_rho[sz])
h = -self.h[s]
# Begin with the bathymetric data
ax.fill_between(x, h, np.min(h), facecolor="darkseagreen",
interpolate=True)
# Plot the layers
ax.plot(x, dep.T, color="grey")
# Labels
ax.set_xlabel(label + " [deg]")
ax.set_ylabel("Depth [m]")
# Make it tight
plt.autoscale(ax, tight=True)
return ax
def to_netcdf(self, nc):
"""
Write all available grid information into the records present in the
netcdf file. This is used to pre-fill boundary, initial, etc. files
that require some of the grid information.
Parameters
----------
nc : netCDF4
File to fill all known records from the grid information
Returns
-------
None
"""
for var in nc.variables:
if hasattr(self, var.lower()):
nc.variables[var][:] = getattr(self, var.lower())
def nearest(self, lon, lat, grid="rho"):
"""
Find the indices nearest to each point in the given list of
longitudes and latitudes.
Parameters
----------
lon : ndarray,
longitude of points to find
lat : ndarray
latitude of points to find
grid : string, optional,
"rho", "u", or "v" grid to search
Returns
-------
indices : tuple of ndarray
The indices for each dimension of the grid that are closest
to the lon/lat points specified
"""
glat = getattr(self, "lat_" + grid)
glon = getattr(self, "lon_" + grid)
xy = np.dstack([glat.ravel(), glon.ravel()])[0]
pts = np.dstack([np.atleast_1d(lat), np.atleast_1d(lon)])[0]
grid_tree = scipy.spatial.cKDTree(xy)
dist, idx = grid_tree.query(pts)
return np.unravel_index(idx, glat.shape)
def ij(self, points):
"""
Compute the fractional i,j indices of the grid from a
set of lon, lat points.
Parameters
----------
points : list of tuples
longitude, latitude points to compute i, j indicies
Returns
-------
out : tuple of numpy masked array (with netcdf-type indexing),
list of j,i indices for the given lon, lat points. NOTE: values
that lie on the mask_rho are masked; however, if you wish to
ignore masking, you can use the data field (i.data) directly.
Values that do not lie within the grid are masked and stored as
np.nan.
Examples
--------
>>> a = ([-158, -160.5, -155.5], [20, 22.443, 19.5])
>>> idx = g.ij(a)
"""
from seapy.external.hindices import hindices
# Interpolate the lat/lons onto the I, J
xgrid, ygrid = np.ma.masked_equal(hindices(self.angle.T,
self.lon_rho.T, self.lat_rho.T,
points[0], points[1]), -999.0)
mask = self.mask_rho[(ygrid.filled(0).astype(int),
xgrid.filled(0).astype(int))]
xgrid[mask == 0] = np.ma.masked
ygrid[mask == 0] = np.ma.masked
return (ygrid, xgrid)
def ijk(self, points, depth_adjust=False):
"""
Compute the fractional i, j, k indices of the grid from a
set of lon, lat, depth points.
Parameters
----------
points : list of tuples,
longitude, latitude, depth points to compute i, j, k indicies.
NOTE: depth is in meters (defaults to negative)
depth_adjust : bool,
If True, depths that are deeper (shallower) than the grid are set
to the bottom (top) layer, 0 (N). If False, a nan value is used for
values beyond the grid depth. Default is False.
Returns
-------
out : tuple of numpy.maskedarray (with netcdf-type indexing),
list of k, j, i indices for the given lon, lat, depth points
Examples
--------
>>> a = ([-158, -160.5, -155.5], [20, 22.443, 19.5], [-10 -200 0])
>>> idx = g.ijk(a)
"""
# NOTE: Attempted to use a 3D griddata, but it took over 2 minutes
# for each call, resulting in a 6minute runtime for this method
# Reverted to 2D i,j indices, then looping a 1-D interpolation
# to get depths for increased-speed (though this method is still slow)
from scipy.interpolate import interp1d
# Get the i,j points
(j, i) = self.ij((points[0], points[1]))
k = j * np.ma.masked
grid_k = np.arange(0, self.n)
depth = np.asanyarray(points[2])
depth[depth > 0] *= -1
# Determine the unique points
good = np.where(~np.logical_or(i.mask, j.mask))[0]
ii = np.floor(i[good]).astype(int)
jj = np.floor(j[good]).astype(int)
idx = seapy.unique_rows((jj, ii))
fill_value = 0 if depth_adjust else np.nan
for n in idx:
pts = np.where(np.logical_and(jj == jj[n], ii == ii[n]))
griddep = self.depth_rho[:, jj[n], ii[n]]
if griddep[0] < griddep[-1]:
griddep[-1] = 0.0
else:
griddep[0] = 0.0
fi = interp1d(griddep, grid_k, bounds_error=False,
fill_value=fill_value)
k[good[pts]] = fi(depth[good][pts])
# Mask bad points
l = np.isnan(k.data)
i[l] = np.ma.masked
j[l] = np.ma.masked
k[l] = np.ma.masked
return (k, j, i)
def latlon(self, indices):
"""
Compute the latitude and longitude from the given (i,j) indices
of the grid
Parameters
----------
indices : list of tuples
i, j points to compute latitude and longitude
Returns
-------
out : tuple of ndarray
list of lat,lon points from the given i,j indices
Examples
--------
>>> a = [(23.4, 16.5), (3.66, 22.43)]
>>> idx = g.latlon(a)
"""
from scipy.interpolate import RegularGridInterpolator
lati = RegularGridInterpolator((self.I[0, :], self.J[:, 0]),
self.lat_rho.T)
loni = RegularGridInterpolator((self.I[0, :], self.J[:, 0]),
self.lon_rho.T)
return (lati(indices), loni(indices))
def rfactor(self):
"""
Return the 2D field of the r-factor for the given grid.
Parameters
----------
None
Returns
-------
ndarray:
array of r-factors size of the grid
"""
hx = np.zeros(self.shape)
hy = hx.copy()
r = hx.copy()
hx[:, :-1] = np.abs(np.diff(self.h, axis=1) /
(self.h[:, 1:] + self.h[:, :-1]))
hy[:-1, :] = np.abs(np.diff(self.h, axis=0) /
(self.h[1:, :] + self.h[:-1, :]))
hx[:, :-1] *= self.mask_u
hy[:-1, :] *= self.mask_v
r[:-1, :-1] = np.maximum(np.maximum(hx[:-1, :-1], hx[:-1, 1:]),
np.maximum(hy[:-1, :-1], hy[1:, :-1]))
r[:, -1] = r[:, -2]
r[-1, :] = r[-2, :]
hx = hy = 0
return r * self.mask_rho
def dHdxy(self):
"""
Calculate the spatial derivative of water depth in each direction
(xi and eta).
Parameters
----------
None
Returns
-------
dHdxi : ndarray,
Slope in x-direction
dHdeta : ndarray,
Slope in eta-direction
"""
dHdxi = np.zeros(self.h.shape)
dHdeta = np.zeros(self.h.shape)
dHdxi[:, :-1] = -np.diff(self.h, axis=1) * self.pm[:, 1:]
dHdxi[:, -1] = dHdxi[:, -2]
dHdeta[:-1, :] = -np.diff(self.h, axis=0) * self.pn[1:, :]
dHdeta[-1, :] = dHdeta[-2, :]
return dHdxi, dHdeta
def mask_poly(self, vertices, lat_lon=False, radius=0.0):
"""
Create an np.masked_array of the same shape as the grid with values
masked if they are not within the given polygon vertices
Parameters
----------
vertices: list of tuples,
points that define the vertices of the polygon
lat_lon : bool, optional,
If True, the vertices are a list of lon, lat points rather
than indexes
Returns
-------
mask : np.masked_array
mask of values that are located within the polygon
Examples
--------
>>> vertices = [ (1,2), (4,5), (1,3) ]
>>> mask = grid.mask_poly(vertices)
"""
# If lat/lon vertices are given, we need to put these onto
# the grid coordinates
if lat_lon:
points = self.ij(vertices, asint=True)
vertices = list(zip(points[0], points[1]))
# Now, with grid coordinates, test the grid against the vertices
poly = matplotlib.path.Path(vertices)
inside = poly.contains_points(np.vstack((self.J.flatten(),
self.I.flatten())).T,
radius=radius)
return np.ma.masked_where(inside.reshape(self.lat_rho.shape),
np.ones(self.lat_rho.shape))
| mit |
suiyuan2009/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py | 52 | 69800 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key,
metrics)
class _CheckCallsHead(head_lib.Head):
"""Head that checks whether head_ops is called."""
def __init__(self):
self._head_ops_called_times = 0
@property
def logits_dimension(self):
return 1
def create_model_fn_ops(
self, mode, features, labels=None, train_op_fn=None, logits=None,
logits_input=None, scope=None):
"""See `_Head`."""
self._head_ops_called_times += 1
loss = losses.mean_squared_error(labels, logits)
return model_fn.ModelFnOps(
mode,
predictions={'loss': loss},
loss=loss,
train_op=train_op_fn(loss),
eval_metric_ops={'loss': loss})
@property
def head_ops_called_times(self):
return self._head_ops_called_times
class _StepCounterHook(session_run_hook.SessionRunHook):
"""Counts the number of training steps."""
def __init__(self):
self._steps = 0
def after_run(self, run_context, run_values):
del run_context, run_values
self._steps += 1
@property
def steps(self):
return self._steps
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(features, labels,
model_fn.ModeKeys.TRAIN,
params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep language embeddings constant, whereas wire
# embeddings will be trained.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
with ops.Graph().as_default():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
training_util.create_global_step()
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, model_fn.ModeKeys.TRAIN, params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
language_initial_value = sess.run(language_var)
for _ in range(2):
_, language_value = sess.run([model_ops.train_op, language_var])
self.assertAllClose(language_value, language_initial_value)
# We could also test that wire_value changed, but that test would be flaky.
class DNNLinearCombinedEstimatorTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedEstimator)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedEstimator(
head=_CheckCallsHead(),
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testCheckCallsHead(self):
"""Tests binary classification using matrix data as input."""
head = _CheckCallsHead()
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [feature_column.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
estimator = dnn_linear_combined.DNNLinearCombinedEstimator(
head,
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(1, head.head_ops_called_times)
estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(2, head.head_ops_called_times)
estimator.predict(input_fn=test_data.iris_input_multiclass_fn)
self.assertEqual(3, head.head_ops_called_times)
class DNNLinearCombinedClassifierTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedClassifier)
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testNoDnnHiddenUnits(self):
def _input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
with self.assertRaisesRegexp(
ValueError,
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified'):
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[age, language])
classifier.fit(input_fn=_input_fn, steps=2)
def testSyncReplicasOptimizerUnsupported(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer(
opt=adagrad.AdagradOptimizer(learning_rate=0.1),
replicas_to_aggregate=1,
total_num_replicas=1)
sync_hook = sync_optimizer.make_session_run_hook(is_chief=True)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=sync_optimizer)
with self.assertRaisesRegexp(
ValueError,
'SyncReplicasOptimizer is not supported in DNNLinearCombined model'):
classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=100,
monitors=[sync_hook])
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
dnn_feature_columns=feature_columns,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertTrue(callable(classifier.params['input_layer_partitioner']))
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(
iris.data[:, i], dtype=dtypes.float32), [-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(cont_features[i],
test_data.get_quantile_based_buckets(
iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testEstimatorWithCoreFeatureColumns(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(iris.data[:, i], dtype=dtypes.float32),
[-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [fc_core.numeric_column(str(i)) for i in range(4)]
linear_features = [
fc_core.bucketized_column(
cont_features[i],
sorted(set(test_data.get_quantile_based_buckets(
iris.data[:, i], 10)))) for i in range(4)
]
linear_features.append(
fc_core.categorical_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
embedding_features = [
feature_column.embedding_column(
sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = base.load_iris()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=[language_column],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
labels = constant_op.constant([[1], [0], [0], [0]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = training_util.get_global_step()
learning_rate = learning_rate_decay.exponential_decay(
learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return adagrad.AdagradOptimizer(learning_rate=learning_rate)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)}
return features, labels
def _input_fn_predict():
y = input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32), num_epochs=1)
features = {'x': y}
return features
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict_classes(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testVariableQuery(self):
"""Tests get_variable_names and get_variable_value."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[
feature_column.real_valued_column('age'),
language,
],
dnn_feature_columns=[
feature_column.embedding_column(
language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
classifier.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
self.assertIn('binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value(
'binary_logistic_head/centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testGlobalStepLinearOnly(self):
"""Tests global step update for linear-only model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNOnly(self):
"""Tests global step update for dnn-only model."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNLinearCombinedBug(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=False)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
global_step = classifier.get_variable_value('global_step')
if global_step == 100:
# Expected is 100, but because of the global step increment bug, is 50.
self.assertEqual(50, step_counter.steps)
else:
# Occasionally, training stops when global_step == 101, due to a race
# condition.
self.assertEqual(51, step_counter.steps)
def testGlobalStepDNNLinearCombinedBugFixed(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=True)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/age/weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/age/weight')))
self.assertEquals(
100, len(classifier.get_variable_value('linear/language/weights')))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 99)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/bias_weight')))
self.assertEquals(
99, len(classifier.get_variable_value('linear/language/weights')))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
self.assertNotIn('linear/bias_weight', variable_names)
self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names)
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
class DNNLinearCombinedRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict_scores(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
regressor.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {
'x':
input_lib.limit_epochs(
constant_op.constant([[100.], [3.], [2.], [2.]]),
num_epochs=num_epochs)
}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=run_config.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor = new_regressor()
regressor.fit(input_fn=_input_fn, steps=10)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor = new_regressor()
predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = constant_op.constant([[1000.], [30.], [20.], [20.]])
features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=110)
estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=110)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
test.main()
| apache-2.0 |
ch3ll0v3k/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 75 | 34122 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), stoplist)
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
rosswhitfield/mantid | qt/applications/workbench/workbench/plotting/mantidfigurecanvas.py | 3 | 2785 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
"""
Qt-based matplotlib canvas
"""
from qtpy.QtCore import Qt
from qtpy.QtGui import QPen
from matplotlib.backends.backend_qt5agg import ( # noqa: F401
FigureCanvasQTAgg, draw_if_interactive, show)
from mantid.plots.mantidimage import MantidImage, ImageIntensity
class MantidFigureCanvas(FigureCanvasQTAgg):
def __init__(self, figure):
super().__init__(figure=figure)
self._pen_color = Qt.black
self._pen_thickness = 1.5
# options controlling the pen used by tools that manipulate the graph - e.g the zoom box
@property
def pen_color(self):
return self._pen_color
@pen_color.setter
def pen_color(self, color):
self._pen_color = color
@property
def pen_thickness(self):
return self._pen_thickness
@pen_thickness.setter
def pen_thickness(self, thickness):
self._pen_thickness = thickness
# Method used by the zoom box tool on the matplotlib toolbar
def drawRectangle(self, rect):
self.update_pen_color()
# Draw the zoom rectangle to the QPainter. _draw_rect_callback needs
# to be called at the end of paintEvent.
if rect is not None:
def _draw_rect_callback(painter):
pen = QPen(self.pen_color, self.pen_thickness / self._dpi_ratio, Qt.DotLine)
painter.setPen(pen)
painter.drawRect(*(pt / self._dpi_ratio for pt in rect))
else:
def _draw_rect_callback(painter):
return
self._draw_rect_callback = _draw_rect_callback
self.update()
def update_pen_color(self):
"""Update the pen color used to draw tool in the matplotlib toolbar, e.g
the zoombox. The color is automatically determined
by considering how dark, or light the image is and setting a pen appropriately.
Only works if the figure contains a MantidImage.
"""
for ax in self.figure.get_axes():
for img in ax.get_images():
if (not isinstance(img, MantidImage)):
continue
intensity = img.calculate_greyscale_intensity()
if intensity == ImageIntensity.DARK:
color = Qt.white
else:
color = Qt.black
self.pen_color = color
# break after we find the first MantidImage
break
| gpl-3.0 |
dhuppenkothen/atrytone | code/display.py | 2 | 2400 | import numpy as np
import matplotlib.pyplot as plt
import os
import argparse
#data = loadtxt('../data/test_data3.dat')
#posterior_sample = atleast_2d(loadtxt('posterior_sample.txt'))
def display_samples(filename, posterior_dir, save_frames=True):
"""
Utility function for displaying samples from the posterior along
with the data.
Parameters
----------
filename: str
full path of the file containing the data
posterior_dir: str
The directory containing the posterior_samples.txt file
save_frames: bool
Flag determining whether to save the frames being plotted
to files. Default: True
"""
data = np.loadtxt(filename)
posterior_sample = np.atleast_2d(np.loadtxt(posterior_dir+"posterior_sample.txt"))
if save_frames:
os.system('rm '+ posterior_dir + 'Frames/*.png')
for i in range(0, posterior_sample.shape[0]):
plt.hold(False)
plt.plot(data[:,0], data[:,2], linestyle="steps-mid")
plt.hold(True)
plt.plot(data[:,0], posterior_sample[i, -data.shape[0]:], 'r')
plt.xlabel('Wavelength [Angstrom]', fontsize=16)
plt.ylabel('Flux', fontsize=16)
if save_frames:
plt.savefig(posterior_dir+'Frames/' + '%0.6d'%(i+1) + '.png', bbox_inches='tight')
print(posterior_dir+'Frames/' + '%0.6d'%(i+1) + '.png')
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description="Display data and posterior samples.")
parser.add_argument('-f', '--filename', action="store", dest="filename",
required=True, help="Filename of the data file to be plotted.")
parser.add_argument('-p', '--posterior_dir', action="store", dest="posterior_dir", required=False, default="./",
help="Directory with the posterior_samples.txt file.")
parser.add_argument('-s', '--save', action="store", dest="save_frames", type=bool, required=False, default=False,
help="Optional boolean flag determining whether to save each sample plot to disk.")
clargs = parser.parse_args()
filename = clargs.filename
posterior_dir = clargs.posterior_dir
save_frames = clargs.save_frames
display_samples(filename, posterior_dir, save_frames)
| gpl-3.0 |
kazemakase/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py | 7 | 26152 | """
An experimental support for curvilinear grid.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import zip
from itertools import chain
from .grid_finder import GridFinder
from .axislines import AxisArtistHelper, GridHelperBase
from .axis_artist import AxisArtist
from matplotlib.transforms import Affine2D, IdentityTransform
import numpy as np
from matplotlib.path import Path
class FixedAxisArtistHelper(AxisArtistHelper.Fixed):
"""
Helper class for a fixed axis.
"""
def __init__(self, grid_helper, side, nth_coord_ticks=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FixedAxisArtistHelper, self).__init__( \
loc=side,
)
self.grid_helper = grid_helper
if nth_coord_ticks is None:
nth_coord_ticks = self.nth_coord
self.nth_coord_ticks = nth_coord_ticks
self.side = side
self._limits_inverted = False
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
if self.nth_coord == 0:
xy1, xy2 = axes.get_ylim()
else:
xy1, xy2 = axes.get_xlim()
if xy1 > xy2:
self._limits_inverted = True
else:
self._limits_inverted = False
def change_tick_coord(self, coord_number=None):
if coord_number is None:
self.nth_coord_ticks = 1 - self.nth_coord_ticks
elif coord_number in [0, 1]:
self.nth_coord_ticks = coord_number
else:
raise Exception("wrong coord number")
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
g = self.grid_helper
if self._limits_inverted:
side = {"left":"right","right":"left",
"top":"bottom", "bottom":"top"}[self.side]
else:
side = self.side
ti1 = g.get_tick_iterator(self.nth_coord_ticks, side)
ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, side, minor=True)
#ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, self.side, minor=True)
return chain(ti1, ti2), iter([])
class FloatingAxisArtistHelper(AxisArtistHelper.Floating):
def __init__(self, grid_helper, nth_coord, value, axis_direction=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FloatingAxisArtistHelper, self).__init__(nth_coord,
value,
)
self.value = value
self.grid_helper = grid_helper
self._extremes = None, None
self._get_line_path = None # a method that returns a Path.
self._line_num_points = 100 # number of points to create a line
def set_extremes(self, e1, e2):
self._extremes = e1, e2
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
grid_finder = self.grid_helper.grid_finder
extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy,
x1, y1, x2, y2)
extremes = list(extremes)
e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
if e1 is not None:
extremes[2] = max(e1, extremes[2])
if e2 is not None:
extremes[3] = min(e2, extremes[3])
elif self.nth_coord == 1:
if e1 is not None:
extremes[0] = max(e1, extremes[0])
if e2 is not None:
extremes[1] = min(e2, extremes[1])
grid_info = dict()
lon_min, lon_max, lat_min, lat_max = extremes
lon_levs, lon_n, lon_factor = \
grid_finder.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = \
grid_finder.grid_locator2(lat_min, lat_max)
grid_info["extremes"] = extremes
grid_info["lon_info"] = lon_levs, lon_n, lon_factor
grid_info["lat_info"] = lat_levs, lat_n, lat_factor
grid_info["lon_labels"] = grid_finder.tick_formatter1("bottom",
lon_factor,
lon_levs)
grid_info["lat_labels"] = grid_finder.tick_formatter2("bottom",
lat_factor,
lat_levs)
grid_finder = self.grid_helper.grid_finder
#e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
xx0 = np.linspace(self.value, self.value, self._line_num_points)
yy0 = np.linspace(extremes[2], extremes[3], self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
elif self.nth_coord == 1:
xx0 = np.linspace(extremes[0], extremes[1], self._line_num_points)
yy0 = np.linspace(self.value, self.value, self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
grid_info["line_xy"] = xx, yy
self.grid_info = grid_info
def get_axislabel_transform(self, axes):
return Affine2D() #axes.transData
def get_axislabel_pos_angle(self, axes):
extremes = self.grid_info["extremes"]
if self.nth_coord == 0:
xx0 = self.value
yy0 = (extremes[2]+extremes[3])/2.
dxx, dyy = 0., abs(extremes[2]-extremes[3])/1000.
elif self.nth_coord == 1:
xx0 = (extremes[0]+extremes[1])/2.
yy0 = self.value
dxx, dyy = abs(extremes[0]-extremes[1])/1000., 0.
grid_finder = self.grid_helper.grid_finder
xx1, yy1 = grid_finder.transform_xy([xx0], [yy0])
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([xx1[0], yy1[0]])
if (0. <= p[0] <= 1.) and (0. <= p[1] <= 1.):
xx1c, yy1c = axes.transData.transform_point([xx1[0], yy1[0]])
xx2, yy2 = grid_finder.transform_xy([xx0+dxx], [yy0+dyy])
xx2c, yy2c = axes.transData.transform_point([xx2[0], yy2[0]])
return (xx1c, yy1c), np.arctan2(yy2c-yy1c, xx2c-xx1c)/np.pi*180.
else:
return None, None
def get_tick_transform(self, axes):
return IdentityTransform() #axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label, (optionally) tick_label"""
grid_finder = self.grid_helper.grid_finder
lat_levs, lat_n, lat_factor = self.grid_info["lat_info"]
lat_levs = np.asarray(lat_levs)
if lat_factor is not None:
yy0 = lat_levs / lat_factor
dy = 0.01 / lat_factor
else:
yy0 = lat_levs
dy = 0.01
lon_levs, lon_n, lon_factor = self.grid_info["lon_info"]
lon_levs = np.asarray(lon_levs)
if lon_factor is not None:
xx0 = lon_levs / lon_factor
dx = 0.01 / lon_factor
else:
xx0 = lon_levs
dx = 0.01
if None in self._extremes:
e0, e1 = self._extremes
else:
e0, e1 = sorted(self._extremes)
if e0 is None:
e0 = -np.inf
if e1 is None:
e1 = np.inf
if self.nth_coord == 0:
mask = (e0 <= yy0) & (yy0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
yy0 = yy0[mask]
elif self.nth_coord == 1:
mask = (e0 <= xx0) & (xx0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
xx0 = xx0[mask]
def transform_xy(x, y):
x1, y1 = grid_finder.transform_xy(x, y)
x2y2 = axes.transData.transform(np.array([x1, y1]).transpose())
x2, y2 = x2y2.transpose()
return x2, y2
# find angles
if self.nth_coord == 0:
xx0 = np.empty_like(yy0)
xx0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx1a, yy1a = transform_xy(xx00, yy0)
xx1b, yy1b = transform_xy(xx00+dx, yy0)
xx2a, yy2a = transform_xy(xx0, yy0)
xx2b, yy2b = transform_xy(xx0, yy0+dy)
labels = self.grid_info["lat_labels"]
labels = [l for l, m in zip(labels, mask) if m]
elif self.nth_coord == 1:
yy0 = np.empty_like(xx0)
yy0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx1a, yy1a = transform_xy(xx0, yy0)
xx1b, yy1b = transform_xy(xx0, yy0+dy)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx2a, yy2a = transform_xy(xx00, yy0)
xx2b, yy2b = transform_xy(xx00+dx, yy0)
labels = self.grid_info["lon_labels"]
labels = [l for l, m in zip(labels, mask) if m]
def f1():
dd = np.arctan2(yy1b-yy1a, xx1b-xx1a) # angle normal
dd2 = np.arctan2(yy2b-yy2a, xx2b-xx2a) # angle tangent
mm = ((yy1b-yy1a)==0.) & ((xx1b-xx1a)==0.) # mask where dd1 is not defined
dd[mm] = dd2[mm]+3.14159/2.
#dd = np.arctan2(yy2-yy1, xx2-xx1) # angle normal
#dd2 = np.arctan2(yy3-yy1, xx3-xx1) # angle tangent
#mm = ((yy2-yy1)==0.) & ((xx2-xx1)==0.) # mask where dd1 is not defined
#dd[mm] = dd2[mm]+3.14159/2.
#dd += 3.14159
#dd = np.arctan2(xx2-xx1, angle_tangent-yy1)
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
for x, y, d, d2, lab in zip(xx1, yy1, dd, dd2, labels):
c2 = tr2ax.transform_point((x, y))
delta=0.00001
if (0. -delta<= c2[0] <= 1.+delta) and \
(0. -delta<= c2[1] <= 1.+delta):
d1 = d/3.14159*180.
d2 = d2/3.14159*180.
yield [x, y], d1, d2, lab
return f1(), iter([])
def get_line_transform(self, axes):
return axes.transData
def get_line(self, axes):
self.update_lim(axes)
x, y = self.grid_info["line_xy"]
if self._get_line_path is None:
return Path(list(zip(x, y)))
else:
return self._get_line_path(axes, x, y)
class GridHelperCurveLinear(GridHelperBase):
def __init__(self, aux_trans,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
aux_trans : a transform from the source (curved) coordinate to
target (rectilinear) coordinate. An instance of MPL's Transform
(inverse transform should be defined) or a tuple of two callable
objects which defines the transform and its inverse. The callables
need take two arguments of array of source coordinates and
should return two target coordinates:
e.g., x2, y2 = trans(x1, y1)
"""
super(GridHelperCurveLinear, self).__init__()
self.grid_info = None
self._old_values = None
#self._grid_params = dict()
self._aux_trans = aux_trans
self.grid_finder = GridFinder(aux_trans,
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1,
tick_formatter2)
def update_grid_finder(self, aux_trans=None, **kw):
if aux_trans is not None:
self.grid_finder.update_transform(aux_trans)
self.grid_finder.update(**kw)
self.invalidate()
def _update(self, x1, x2, y1, y2):
"bbox in 0-based image coordinates"
# update wcsgrid
if self.valid() and self._old_values == (x1, x2, y1, y2):
return
self._update_grid(x1, y1, x2, y2)
self._old_values = (x1, x2, y1, y2)
self._force_update = False
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None):
if axes is None:
axes = self.axes
if axis_direction is None:
axis_direction = loc
_helper = FixedAxisArtistHelper(self, loc,
#nth_coord,
nth_coord_ticks=nth_coord,
)
axisline = AxisArtist(axes, _helper, axis_direction=axis_direction)
return axisline
def new_floating_axis(self, nth_coord,
value,
axes=None,
axis_direction="bottom"
):
if axes is None:
axes = self.axes
_helper = FloatingAxisArtistHelper( \
self, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
#_helper = FloatingAxisArtistHelper(self, nth_coord,
# value,
# label_direction=label_direction,
# )
#axisline = AxisArtistFloating(axes, _helper,
# axis_direction=axis_direction)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
#axisline.major_ticklabels.set_visible(True)
#axisline.minor_ticklabels.set_visible(False)
#axisline.major_ticklabels.set_rotate_along_line(True)
#axisline.set_rotate_label_along_line(True)
return axisline
def _update_grid(self, x1, y1, x2, y2):
self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
def get_gridlines(self, which="major", axis="both"):
grid_lines = []
if axis in ["both", "x"]:
for gl in self.grid_info["lon"]["lines"]:
grid_lines.extend(gl)
if axis in ["both", "y"]:
for gl in self.grid_info["lat"]["lines"]:
grid_lines.extend(gl)
return grid_lines
def get_tick_iterator(self, nth_coord, axis_side, minor=False):
#axisnr = dict(left=0, bottom=1, right=2, top=3)[axis_side]
angle_tangent = dict(left=90, right=90, bottom=0, top=0)[axis_side]
#angle = [0, 90, 180, 270][axisnr]
lon_or_lat = ["lon", "lat"][nth_coord]
if not minor: # major ticks
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, l
else:
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, ""
#for xy, a, l in self.grid_info[lon_or_lat]["ticks"][axis_side]:
# yield xy, a, ""
return f()
def test3():
import numpy as np
from matplotlib.transforms import Transform
from matplotlib.path import Path
class MyTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y-x), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MyTransformInv(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class MyTransformInv(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y+x), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MyTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
tr = MyTransform(1)
grid_helper = GridHelperCurveLinear(tr)
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot_class_factory
from .axislines import Axes
SubplotHost = host_subplot_class_factory(Axes)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
fig.add_subplot(ax1)
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
ax1.parasites.append(ax2)
ax2.plot([3, 6], [5.0, 10.])
ax1.set_aspect(1.)
ax1.set_xlim(0, 10)
ax1.set_ylim(0, 10)
ax1.grid(True)
plt.draw()
def curvelinear_test2(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost, \
ParasiteAxesAuxTrans
import matplotlib.cbook as cbook
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(5)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks=0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks=1
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat"] = axis = grid_helper.new_floating_axis(0, 60, axes=ax1)
axis.label.set_text("Test")
axis.label.set_visible(True)
#axis._extremes = 2, 10
#axis.label.set_text("Test")
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.get_helper()._extremes=2, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 6, axes=ax1)
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.label.set_text("Test 2")
axis.get_helper()._extremes=-180, 90
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.plot(intp(np.array([0, 30]), 50),
intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
def curvelinear_test3(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1, axis
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
for axis in list(six.itervalues(ax1.axis)):
axis.set_visible(False)
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat1"] = axis = grid_helper.new_floating_axis(0, 130,
axes=ax1,
axis_direction="left"
)
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
grid_helper = ax1.get_grid_helper()
ax1.axis["lat2"] = axis = grid_helper.new_floating_axis(0, 50, axes=ax1,
axis_direction="right")
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 10,
axes=ax1,
axis_direction="bottom")
axis.label.set_text("Test 2")
axis.get_helper()._extremes= 50, 130
axis.major_ticklabels.set_axis_direction("top")
axis.label.set_axis_direction("top")
grid_helper.grid_finder.grid_locator1.den = 5
grid_helper.grid_finder.grid_locator2._nbins = 5
# # A parasite axes with given transform
# ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# # note that ax2.transData == tr + ax1.transData
# # Anthing you draw in ax2 will match the ticks and grids of ax1.
# ax1.parasites.append(ax2)
# intp = cbook.simple_linear_interpolation
# ax2.plot(intp(np.array([0, 30]), 50),
# intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, figsize=(5, 5))
fig.clf()
#test3()
#curvelinear_test2(fig)
curvelinear_test3(fig)
#plt.draw()
plt.show()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.