repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
mkness/TheCannon
|
code/makeplots_talks/makeplot_fits_self_cluster_1.py
|
1
|
5646
|
#!/usr/bin/python
import scipy
import numpy
import pickle
from numpy import *
from scipy import ndimage
from scipy import interpolate
from numpy import loadtxt
import os
import numpy as np
from numpy import *
import matplotlib
from pylab import rcParams
from pylab import *
from matplotlib import pyplot
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.pyplot import axes
from matplotlib.pyplot import colorbar
#from matplotlib.ticker import NullFormatter
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
s = matplotlib.font_manager.FontProperties()
s.set_family('serif')
s.set_size(14)
from matplotlib import rc
rc('text', usetex=False)
rc('font', family='serif')
def plotfits():
file_in = "self_tags.pickle"
file_in2 = open(file_in, 'r')
params, icovs_params = pickle.load(file_in2)
params = array(params)
file_in2.close()
filein2 = 'starsin_test2.txt' # this is for self test this is dangerous - need to implement the same logg cut here, this is original data values or otherwise metadata
filein2 = 'starsin_new_all_ordered.txt' # this is for self test this is dangerous - need to implement the same logg cut here, this is original data values or otherwise metadata
filein2 = 'test4_selfg.txt' # this is for self test this is dangerous - need to implement the same logg cut here, this is original data values or otherwise metadata
a = open(filein2)
al = a.readlines()
names = []
for each in al:
names.append(each.split()[1])
unames = unique(names)
starind = arange(0,len(names), 1)
name_ind = []
names = array(names)
for each in unames:
takeit = each == names
name_ind.append(np.int(starind[takeit][-1]+1. ) )
cluster_ind = [0] + list(sort(name_ind))# + [len(al)]
plot_markers = ['ko', 'yo', 'ro', 'bo', 'co','k*', 'y*', 'r*', 'b*', 'c*', 'ks', 'rs', 'bs', 'cs', 'rd', 'kd', 'bd', 'cd', 'mo', 'ms' ]
#plot_markers = ['k', 'y', 'r', 'b', 'c','k', 'y', 'r', 'b', 'c', 'k', 'r', 'b', 'c', 'r', 'k', 'b', 'c', 'm', 'm' ]
#cv_ind = np.arange(395,469,1)
#a = open(filein2)
#al = a.readlines()
#bl = []
#for each in al:
# bl.append(each.strip())
#bl = np.delete(bl, [cv_ind], axis = 0)
#savetxt("starsin_cut.txt", bl, fmt = "%s")
#filein3 = 'starsin_cut.txt'
t,g,feh,t_err,feh_err = loadtxt(filein2, usecols = (4,6,8,16,17), unpack =1)
g_err = [0]*len(g)
g_err = array(g_err)
params = array(params)
covs_params = np.linalg.inv(icovs_params)
rcParams['figure.figsize'] = 12.0, 10.0
fig, temp = pyplot.subplots(3,1, sharex=False, sharey=False)
ax1 = temp[0]
ax2 = temp[1]
ax3 = temp[2]
params_labels = [params[:,0], params[:,1], params[:,2] , covs_params[:,0,0]**0.5, covs_params[:,1,1]**0.5, covs_params[:,2,2]**0.5 ]
cval = ['k', 'b', 'r']
input_ASPCAP = [t, g, feh, t_err, g_err, feh_err]
listit_1 = [0,1,2]
listit_2 = [1,0,0]
axs = [ax1,ax2,ax3]
labels = ["ASPCAP log g", "ASPCAP Teff", "ASPCAP Teff"]
for i in range(0,len(cluster_ind)-1):
indc1 = cluster_ind[i]
indc2 = cluster_ind[i+1]
for ax, num,num2,label1,x1,y1 in zip(axs, listit_1,listit_2,labels, [4800,3.0,0.3], [3400,1,-1.5]):
pick = logical_and(g[indc1:indc2] > 0, logical_and(t_err[indc1:indc2] < 300, feh[indc1:indc2] > -4.0) )
cind = array(input_ASPCAP[1][indc1:indc2][pick])
cind = array(input_ASPCAP[num2][indc1:indc2][pick]).flatten()
ax.plot(input_ASPCAP[num][indc1:indc2][pick], params_labels[num][indc1:indc2][pick], plot_markers[i])
#ax.errorbar(input_ASPCAP[num][indc1:indc2][pick], params_labels[num][indc1:indc2][pick],yerr= params_labels[num+3][indc1:indc2][pick],marker='',ls='',zorder=0, fmt = None,elinewidth = 1,capsize = 0)
#ax.errorbar(input_ASPCAP[num][indc1:indc2][pick], params_labels[num][indc1:indc2][pick],xerr=input_ASPCAP[num+3][indc1:indc2][pick],marker='',ls='',zorder=0, fmt = None,elinewidth = 1,capsize = 0)
ax.text(x1,y1,"y-axis, $<\sigma>$ = "+str(round(mean(params_labels[num+3][pick]),2)),fontsize = 14)
ax1.plot([0,6000], [0,6000], linewidth = 1.5, color = 'k' )
ax2.plot([0,5], [0,5], linewidth = 1.5, color = 'k' )
ax3.plot([-3,2], [-3,2], linewidth = 1.5, color = 'k' )
ax1.set_xlim(3500, 5500)
ax2.set_xlim(0, 5)
ax3.set_xlim(-3, 2)
ax1.set_xlabel("ASPCAP Teff, [K]", fontsize = 14,labelpad = 5)
ax1.set_ylabel("NHR+ Teff, [K]", fontsize = 14,labelpad = 5)
ax2.set_xlabel("ASPCAP logg, [dex]", fontsize = 14,labelpad = 5)
ax2.set_ylabel("NHR+ logg, [dex]", fontsize = 14,labelpad = 5)
ax3.set_xlabel("ASPCAP [Fe/H], [dex]", fontsize = 14,labelpad = 5)
ax3.set_ylabel("NHR+ [Fe/H], [dex]", fontsize = 14,labelpad = 5)
ax1.set_ylim(1000,6000)
ax1.set_ylim(3000,5500)
ax2.set_ylim(-3,6)
ax3.set_ylim(-3,2)
# attach lines to plots
fig.subplots_adjust(hspace=0.22)
#prefix = "/Users/ness/Downloads/Apogee_Raw/calibration_apogeecontinuum/documents/plots/fits_3_self_cut"
## prefix = "/Users/ness/Downloads/Apogee_Raw/calibration_apogeecontinuum/documents/plots/test_self"
#savefig(fig, prefix, transparent=False, bbox_inches='tight', pad_inches=0.5)
return
def savefig(fig, prefix, **kwargs):
for suffix in (".eps", ".png"):
print "writing %s" % (prefix + suffix)
fig.savefig(prefix + suffix, **kwargs)
if __name__ == "__main__": #args in command line
wl1,wl2,wl3,wl4,wl5,wl6 = 15392, 15697, 15958.8, 16208.6, 16120.4, 16169.5
plotfits()
|
mit
|
zorojean/scikit-learn
|
sklearn/gaussian_process/gaussian_process.py
|
83
|
34544
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = check_array(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = check_array(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = check_array(self.thetaL)
self.thetaU = check_array(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
|
bsd-3-clause
|
majkelx/astwro
|
astwro/coord/plots.py
|
1
|
1517
|
# coding=utf-8
from __future__ import absolute_import, division, print_function
import astropy.io.fits as fits
import astropy.visualization as vis
from astropy.wcs import WCS
import matplotlib.pyplot as plt
from .coord_tools import fix_scamp_wcs
import astwro.sampledata
__metaclass__ = type
def plot_coords(fig=None, ax=None, fits_file=astwro.sampledata.fits_image(), gal=True, img_alpha=1.0, cmap=None,
grid_c='gray', grid_alpha=0.5, grid_ls='solid', keep_scale=True, img=True,
):
if ax is None or isinstance(ax, int):
if fig is None:
fig = plt.figure(figsize=(6, 6))
fig.tight_layout()
if ax is None: ax = 111
hdu = fits.open(fits_file)[0]
fix_scamp_wcs(hdu)
wcs = WCS(hdu.header)
ax = fig.add_subplot(ax, projection=wcs)
if img:
vmin, vmax = 120, 90
norm = vis.ImageNormalize(vmin=vmin, vmax=vmax, stretch=vis.SqrtStretch())
ax.imshow(hdu.data, origin='lower', norm=norm, alpha=img_alpha, cmap=cmap)
ax.coords.grid(True, color=grid_c, ls=grid_ls, alpha=grid_alpha)
ax.coords[0].set_axislabel('ra')
ax.coords[1].set_axislabel('dec')
ax.coords['ra'].set_major_formatter('hh:mm:ss')
if keep_scale:
ax.set_autoscale_on(False)
if gal:
overlay = ax.get_coords_overlay('galactic')
overlay.grid(color=grid_c, ls=grid_ls, alpha=grid_alpha)
overlay[0].set_axislabel('$l$')
overlay[1].set_axislabel('$b$')
return ax
|
mit
|
christinahedges/PyKE
|
pyke/kepperiodogram.py
|
2
|
8326
|
from .utils import PyKEArgumentHelpFormatter
import math
import numpy as np
from matplotlib import pyplot as plt
from astropy.io import fits as pyfits
from . import kepio, kepmsg, kepkey, kepstat
from astropy.stats import LombScargle
__all__ = ['kepperiodogram']
def kepperiodogram(infile, outfile=None, datacol='PDCSAP_FLUX', pmin=0.1, pmax=10., nfreq=2000,
plot=False, noninteractive=False, overwrite=False, verbose=False,
logfile='kepperiodogram.log'):
"""
kepperiodogram -- Calculate and store a Lomb Scargle Periodogram based on a
Kepler time series. The result is stored in a new FITS file that is a
direct copy of the input file but with an additional table extension
containing the periodogram.
Parameters
----------
infile : str
The name of a MAST standard format FITS file containing a Kepler light
curve within the first data extension.
outfile : str
The name of the output FITS file with a new extension containing the
Fourier spectrum.
datacol : str
The name of the FITS table column in extension 1 of infile upon which
the Fourier transform will be calculated.
pmin : float [day]
The minimum of the period range over which the Fourier transform will
be calculated.
pmax : float [day]
The maximum of the period range over which the Fourier transform will
be calculated.
nfreq : int
The number of uniform frequency steps between :math:`1/pmax` and
:math:`1/pmin` that the Fourier transform will be calculated.
plot : bool
Plot the output Fourier spectrum?
non-interactive : bool
If True, prevents the matplotlib window to pop up.
overwrite : bool
Overwrite the output file?
verbose : bool
Print informative messages and warnings to the shell and logfile?
logfile : str
Name of the logfile containing error and warning messages.
Examples
--------
.. code-block:: bash
$ kepperiodogram kplr002436324-2009259160929_llc.fits --pmin 0.5
--pmax 100 --nfreq 1000 --plot --verbose
.. image:: ../_static/images/api/kepperiodogram.png
:align: center
"""
if outfile is None:
outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0])
## log the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('kepperiodogram -- '
+ ' infile={}'.format(infile)
+ ' outfile={}'.format(outfile)
+ ' datacol={}'.format(datacol)
+ ' pmin={}'.format(pmin)
+ ' pmax={}'.format(pmax)
+ ' nfreq={}'.format(nfreq)
+ ' plot={}'.format(plot)
+ ' noninteractive={}'.format(noninteractive)
+ ' overwrite={}'.format(overwrite)
+ ' verbose={}'.format(verbose)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call+'\n', verbose)
## start time
kepmsg.clock('Start time is', logfile, verbose)
## overwrite output file
if overwrite:
kepio.overwrite(outfile, logfile, verbose)
if kepio.fileexists(outfile):
errmsg = 'ERROR -- kepperiodogram: {} exists. Use --overwrite'.format(outfile)
kepmsg.err(logfile, errmsg, verbose)
## open input file
instr = pyfits.open(infile)
tstart, tstop, bjdref, cadence = kepio.timekeys(instr, infile, logfile,
verbose)
## fudge non-compliant FITS keywords with no values
instr = kepkey.emptykeys(instr, infile, logfile, verbose)
## read table columns
try:
barytime = instr[1].data.field('barytime') + bjdref
except:
barytime = kepio.readfitscol(infile, instr[1].data, 'time', logfile,
verbose) + bjdref
signal = kepio.readfitscol(infile, instr[1].data, datacol, logfile, verbose)
## remove infinite data from time series
incols = [barytime, signal]
outcols = kepstat.removeinfinlc(signal, incols)
barytime = outcols[0]
signal = outcols[1] - np.median(outcols[1])
## period to frequency conversion
fmin = 1.0 / pmax
fmax = 1.0 / pmin
deltaf = (fmax - fmin) / nfreq
## loop through frequency steps; determine FT power
fr = np.linspace(fmin,fmax,nfreq)
power = LombScargle(barytime, signal, deltaf).power(fr)
#find highest power period
period = 1. / fr[power.argmax()]
## write output file
col1 = pyfits.Column(name='FREQUENCY', format='E', unit='1/day',
array=fr)
col2 = pyfits.Column(name='POWER', format='E', array=power)
cols = pyfits.ColDefs([col1, col2])
instr.append(pyfits.BinTableHDU.from_columns(cols))
instr[-1].header['EXTNAME'] = ('POWER SPECTRUM', 'extension name')
instr[-1].header['PERIOD'] = (period, 'most significant trial period [d]')
kepmsg.log(logfile, "kepperiodogram - best period found: {}".format(period), verbose)
kepmsg.log(logfile, "Writing output file {}...".format(outfile), verbose)
instr.writeto(outfile)
## history keyword in output file
kepkey.history(call, instr[0], outfile, logfile, verbose)
## close input file
instr.close()
## data limits
nrm = int(math.log10(power.max()))
power = power / 10 ** nrm
ylab = 'Power (x10$^{}$)'.format(nrm)
xmin = fr.min()
xmax = fr.max()
ymin = power.min()
ymax = power.max()
xr = xmax - xmin
yr = ymax - ymin
fr = np.insert(fr, [0], fr[0])
fr = np.append(fr, fr[-1])
power = np.insert(power, [0], 0.0)
power = np.append(power, 0.0)
if plot:
plt.figure()
plt.clf()
plt.axes([0.06, 0.113, 0.93, 0.86])
plt.plot(fr, power, color='#0000ff', linestyle='-', linewidth=1.0)
plt.fill(fr, power, color='#ffff00', linewidth=0.0, alpha=0.2)
plt.xlim(xmin - xr * 0.01, xmax + xr * 0.01)
if ymin - yr * 0.01 <= 0.0:
plt.ylim(1.0e-10, ymax + yr * 0.01)
else:
plt.ylim(ymin - yr * 0.01, ymax + yr *0.01)
plt.xlabel(r'Frequency (d$^{-1}$)', {'color' : 'k'})
plt.ylabel(ylab, {'color' : 'k'})
plt.grid()
# render plot
if not noninteractive:
plt.show()
## end time
kepmsg.clock('kepperiodogram completed at', logfile, verbose)
def kepperiodogram_main():
import argparse
parser = argparse.ArgumentParser(
description=('Calculate and store a Fourier Transform from a'
' Kepler time series.'),
formatter_class=PyKEArgumentHelpFormatter)
parser.add_argument('infile', help='Name of input file', type=str)
parser.add_argument('--outfile',
help=('Name of FITS file to output.'
' If None, outfile is infile-kepperiodogram.'),
default=None)
parser.add_argument('--datacol', default='PDCSAP_FLUX',
help='Name of data column to plot', type=str)
parser.add_argument('--pmin', default=0.1,
help='Minimum search period [days]', type=float)
parser.add_argument('--pmax', default=10.,
help='Maximum search period [days]', type=float)
parser.add_argument('--nfreq', default=2000,
help='Number of frequency intervals', type=int)
parser.add_argument('--plot', action='store_true', help='Plot result?')
parser.add_argument('--non-interactive', action='store_true',
help='Pop up matplotlib plot window?',
dest='noninteractive')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite output file?')
parser.add_argument('--verbose', action='store_true',
help='Write to a log file?')
parser.add_argument('--logfile', '-l', help='Name of ascii log file',
default='kepperiodogram.log', type=str)
args = parser.parse_args()
kepperiodogram(args.infile, args.outfile, args.datacol, args.pmin, args.pmax,
args.nfreq, args.plot, args.noninteractive, args.overwrite,
args.verbose, args.logfile)
|
mit
|
courtarro/gnuradio
|
gnuradio-runtime/examples/volk_benchmark/volk_plot.py
|
78
|
6117
|
#!/usr/bin/env python
import sys, math
import argparse
from volk_test_funcs import *
try:
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
sys.stderr.write("Could not import Matplotlib (http://matplotlib.sourceforge.net/)\n")
sys.exit(1)
def main():
desc='Plot Volk performance results from a SQLite database. ' + \
'Run one of the volk tests first (e.g, volk_math.py)'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-D', '--database', type=str,
default='volk_results.db',
help='Database file to read data from [default: %(default)s]')
parser.add_argument('-E', '--errorbars',
action='store_true', default=False,
help='Show error bars (1 standard dev.)')
parser.add_argument('-P', '--plot', type=str,
choices=['mean', 'min', 'max'],
default='mean',
help='Set the type of plot to produce [default: %(default)s]')
parser.add_argument('-%', '--percent', type=str,
default=None, metavar="table",
help='Show percent difference to the given type [default: %(default)s]')
args = parser.parse_args()
# Set up global plotting properties
matplotlib.rcParams['figure.subplot.bottom'] = 0.2
matplotlib.rcParams['figure.subplot.top'] = 0.95
matplotlib.rcParams['figure.subplot.right'] = 0.98
matplotlib.rcParams['ytick.labelsize'] = 16
matplotlib.rcParams['xtick.labelsize'] = 16
matplotlib.rcParams['legend.fontsize'] = 18
# Get list of tables to compare
conn = create_connection(args.database)
tables = list_tables(conn)
M = len(tables)
# Colors to distinguish each table in the bar graph
# More than 5 tables will wrap around to the start.
colors = ['b', 'r', 'g', 'm', 'k']
# Set up figure for plotting
f0 = plt.figure(0, facecolor='w', figsize=(14,10))
s0 = f0.add_subplot(1,1,1)
# Create a register of names that exist in all tables
tmp_regs = []
for table in tables:
# Get results from the next table
res = get_results(conn, table[0])
tmp_regs.append(list())
for r in res:
try:
tmp_regs[-1].index(r['kernel'])
except ValueError:
tmp_regs[-1].append(r['kernel'])
# Get only those names that are common in all tables
name_reg = tmp_regs[0]
for t in tmp_regs[1:]:
name_reg = list(set(name_reg) & set(t))
name_reg.sort()
# Pull the data out for each table into a dictionary
# we can ref the table by it's name and the data associated
# with a given kernel in name_reg by it's name.
# This ensures there is no sorting issue with the data in the
# dictionary, so the kernels are plotted against each other.
table_data = dict()
for i,table in enumerate(tables):
# Get results from the next table
res = get_results(conn, table[0])
data = dict()
for r in res:
data[r['kernel']] = r
table_data[table[0]] = data
if args.percent is not None:
for i,t in enumerate(table_data):
if args.percent == t:
norm_data = []
for name in name_reg:
if(args.plot == 'max'):
norm_data.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
norm_data.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
norm_data.append(table_data[t][name]['avg'])
# Plot the results
x0 = xrange(len(name_reg))
i = 0
for t in (table_data):
ydata = []
stds = []
for name in name_reg:
stds.append(math.sqrt(table_data[t][name]['var']))
if(args.plot == 'max'):
ydata.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
ydata.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
ydata.append(table_data[t][name]['avg'])
if args.percent is not None:
ydata = [-100*(y-n)/y for y,n in zip(ydata,norm_data)]
if(args.percent != t):
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80/(M-1)
x1 = [x + i*wdth for x in x0]
i += 1
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80/M
x1 = [x + i*wdth for x in x0]
i += 1
if(args.errorbars is False):
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
s0.bar(x1, ydata, width=wdth,
yerr=stds,
color=colors[i%M], label=t,
edgecolor='k', linewidth=2,
error_kw={"ecolor": 'k', "capsize":5,
"linewidth":2})
nitems = res[0]['nitems']
if args.percent is None:
s0.set_ylabel("Processing time (sec) [{0:G} items]".format(nitems),
fontsize=22, fontweight='bold',
horizontalalignment='center')
else:
s0.set_ylabel("% Improvement over {0} [{1:G} items]".format(
args.percent, nitems),
fontsize=22, fontweight='bold')
s0.legend()
s0.set_xticks(x0)
s0.set_xticklabels(name_reg)
for label in s0.xaxis.get_ticklabels():
label.set_rotation(45)
label.set_fontsize(16)
plt.show()
if __name__ == "__main__":
main()
|
gpl-3.0
|
pylada/pylada-light
|
src/pylada/vasp/nlep/plotbs.py
|
1
|
6923
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
# simply process OUTCAR to do basic band structure diagram
import os
import sys
import optparse
import matplotlib
matplotlib.use('TkAgg')
#from pylab import plot, show
import matplotlib.pyplot as plt
from optparse import OptionParser
# from
# grep -B 1 -A 3 "^ band No" OUTCAR | awk '{if (NR%6==1) s=$0; else if
# (NR%6==4) printf "%s %s\n", s, $0}' > log2
# indices of kpoints to extract from gw data
gw_g2x = [0, 5, 12, 17, 20]
gw_g2l = [0, 1, 2, 3, 4]
gamma_idx = 0
class BandStructure:
def __init__(self, kg2x, g2x, kg2l, g2l):
self.kg2x = kg2x
self.kg2l = kg2l
self.g2x = g2x
self.g2l = g2l
def split1(fn, ax, col, skipln, gwk, first_vband_idx, my_vband_idx, bstart, bend, first_bs):
#fn = "OUTCAR"
f = file(fn)
ln = f.readline()
all_kpt = []
nband = -1
while (ln != ""):
stuff = ln.split()
# print stuff
if (len(stuff) > 1 and stuff[0] == "band" and stuff[1] == "No."):
if (skipln == True): # gw data has extra space after "band No."
f.readline()
ll = lastLn.split()
ib = 0
kpt_dat = []
ln = f.readline().split()
while (len(ln) > 0):
band = [int(ln[0]), float(ln[1]), float(ln[2])]
kpt_dat.append(band)
kpt = [int(ll[1]), float(ll[3]), float(ll[4]), float(ll[5]), kpt_dat]
ib += 1
ln = f.readline().split()
all_kpt.append(kpt)
if (nband > 0 and nband != ib):
print("problem: found different number bands at different k points")
nband = ib
lastLn = ln
ln = f.readline()
# for kpt in all_kpt:
# print kpt[0]-1, kpt[1], kpt[2], kpt[3]
# for b in kpt[4]: # which is actually a list of energies for each band
# print b
# print "%f %f %f %f %f" % (kpt[1], kpt[2], kpt[3], kpt[4][3][1], kpt[4][4][1])
kg2x = []
kg2l = []
g2x = []
g2l = []
if (gwk == True):
g2x_idx = gw_g2x
g2l_idx = gw_g2l
else:
g2x_idx = list(range(0, 10))
g2l_idx = list(range(10, 20))
for b in range(0, nband):
g2x.append([])
g2l.append([])
for i in g2x_idx:
kx = all_kpt[i]
kg2x.append(kx[1])
kxdat = kx[4]
for b in range(0, nband):
g2x[b].append(kxdat[b][1])
for i in g2l_idx:
kl = all_kpt[i]
kg2l.append(-kl[1])
kldat = kl[4]
for b in range(0, nband):
g2l[b].append(kldat[b][1])
# for k in kg2x:
# print k
# print
# for k in kg2l:
# print k
# print kg2l[i]
# for b in range(0,nband):
# print " ", g2x[b][0]
#from numpy import array
# gamma->X is first 10, gamma->L is 2nd 10
offset = 0
if (my_vband_idx != None and first_bs != None):
offset = first_bs.g2x[first_vband_idx][gamma_idx] - g2x[my_vband_idx][gamma_idx]
if (bstart == None):
bstart = 0
if (bend == None):
bend = nband
print(bstart, bend, offset)
for b in range(bstart, bend):
vals = [e + offset for e in g2x[b]]
ax.plot(kg2x, vals, color=col)
vals = [e + offset for e in g2l[b]]
ax.plot(kg2l, vals, color=col)
return BandStructure(kg2x, g2x, kg2l, g2l)
def real_main(argv, fig, ax, ifig=0):
if (len(argv) == 1):
print("proc1.py <[options] band structure file>*")
print("bs file is actual OUTCAR")
print("options: --notgwk. WITHOUT this, we assume kpoints of interest are as in Stephan Lany's GW data")
print("--notgwk will assume a run of only g2x (kpoints 0-9) and g2l (kpoint 10-19)")
print("--skipln parses OUTCAR to match GW data. necessary for plotting bs from fitting data")
print("--matchvbm=<n1><n2> says band n2 for this data file is same as band n1 for the first data file given")
print("--bstart=<n1> and --bend=<n2> plots only bands from n1 to n2. These options stay in effect until overridden")
print("eg:")
sys.exit()
# fig = plt.figure(ifig)
# ax = fig.add_subplot(1,1,1)
cols = ['k', 'r', 'b', 'g']
colidx = 0
gwk = True
skipln = False
first_bs = None
bstart = None
bend = None
my_vband_idx = None
first_vband_idx = None
for i in range(1, len(argv)):
fn = argv[i]
print(fn)
if (fn == "--notgwk"):
gwk = False
elif (fn == "--skipln"):
skipln = True
elif (fn[0:10] == "--matchvbm"):
# expecting "--matchvbm=<first vb idx>,<this vb idx>"
stuff = fn[11:len(fn)].split(",")
if (len(stuff) != 2):
print("cannot parse %s", fn)
my_vband_idx = int(stuff[1])
first_vband_idx = int(stuff[0])
elif (fn[0:8] == "--bstart"):
bstart = 1 + int(fn[9:len(fn)]) # one for "less than" indexing in "range()"
elif (fn[0:6] == "--bend"):
bend = int(fn[7:len(fn)])
else:
col = cols[colidx % (len(cols))]
bs = split1(fn, ax, col, skipln, gwk, first_vband_idx,
my_vband_idx, bstart, bend, first_bs)
if (first_bs == None):
first_bs = bs
colidx += 1
gwk = True
skipln = False
my_vband_idx = None
first_vband_idx = None
axis = ax.xaxis
axis.set_ticks([-0.5, 0, 0.5])
axis.set_ticklabels(["L", "G", "X"])
return fig
# return plt
def main():
fig = plt.figure(1)
ax = fig.add_subplot(1, 1, 1)
real_main(sys.argv, fig, ax, 1)
plt.show()
if __name__ == '__main__':
main()
|
gpl-3.0
|
ngoix/OCRF
|
examples/decomposition/plot_pca_vs_lda.py
|
176
|
2027
|
"""
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LinearDiscriminantAnalysis(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
colors = ['navy', 'turquoise', 'darkorange']
lw = 2
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of IRIS dataset')
plt.figure()
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('LDA of IRIS dataset')
plt.show()
|
bsd-3-clause
|
MartinDelzant/scikit-learn
|
examples/cluster/plot_digits_linkage.py
|
369
|
2959
|
"""
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
|
bsd-3-clause
|
amolkahat/pandas
|
pandas/tseries/offsets.py
|
1
|
81688
|
# -*- coding: utf-8 -*-
from datetime import date, datetime, timedelta
import functools
import operator
from pandas.compat import range
from pandas import compat
import numpy as np
from pandas.core.dtypes.generic import ABCPeriod
from pandas.core.tools.datetimes import to_datetime
import pandas.core.common as com
# import after tools, dateutil check
from dateutil.easter import easter
from pandas._libs import tslibs, Timestamp, OutOfBoundsDatetime, Timedelta
from pandas.util._decorators import cache_readonly
from pandas._libs.tslibs import (
ccalendar, conversion,
frequencies as libfrequencies)
from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds
import pandas._libs.tslibs.offsets as liboffsets
from pandas._libs.tslibs.offsets import (
ApplyTypeError,
as_datetime, _is_normalized,
_get_calendar, _to_dt64,
apply_index_wraps,
roll_yearday,
shift_month,
BaseOffset)
__all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay',
'CBMonthEnd', 'CBMonthBegin',
'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd',
'SemiMonthEnd', 'SemiMonthBegin',
'BusinessHour', 'CustomBusinessHour',
'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd',
'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd',
'LastWeekOfMonth', 'FY5253Quarter', 'FY5253',
'Week', 'WeekOfMonth', 'Easter',
'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano',
'DateOffset', 'CalendarDay']
# convert to/from datetime/timestamp to allow invalid Timestamp ranges to
# pass thru
def as_timestamp(obj):
if isinstance(obj, Timestamp):
return obj
try:
return Timestamp(obj)
except (OutOfBoundsDatetime):
pass
return obj
def apply_wraps(func):
@functools.wraps(func)
def wrapper(self, other):
if other is tslibs.NaT:
return tslibs.NaT
elif isinstance(other, (timedelta, Tick, DateOffset)):
# timedelta path
return func(self, other)
elif isinstance(other, (np.datetime64, datetime, date)):
other = as_timestamp(other)
tz = getattr(other, 'tzinfo', None)
nano = getattr(other, 'nanosecond', 0)
try:
if self._adjust_dst and isinstance(other, Timestamp):
other = other.tz_localize(None)
result = func(self, other)
if self._adjust_dst:
result = conversion.localize_pydatetime(result, tz)
result = Timestamp(result)
if self.normalize:
result = result.normalize()
# nanosecond may be deleted depending on offset process
if not self.normalize and nano != 0:
if not isinstance(self, Nano) and result.nanosecond != nano:
if result.tz is not None:
# convert to UTC
value = conversion.tz_convert_single(
result.value, 'UTC', result.tz)
else:
value = result.value
result = Timestamp(value + nano)
if tz is not None and result.tzinfo is None:
result = conversion.localize_pydatetime(result, tz)
except OutOfBoundsDatetime:
result = func(self, as_datetime(other))
if self.normalize:
# normalize_date returns normal datetime
result = tslibs.normalize_date(result)
if tz is not None and result.tzinfo is None:
result = conversion.localize_pydatetime(result, tz)
return result
return wrapper
# ---------------------------------------------------------------------
# DateOffset
class DateOffset(BaseOffset):
"""
Standard kind of date increment used for a date range.
Works exactly like relativedelta in terms of the keyword args you
pass in, use of the keyword n is discouraged-- you would be better
off specifying n in the keywords you use, but regardless it is
there for you. n is needed for DateOffset subclasses.
DateOffets work as follows. Each offset specify a set of dates
that conform to the DateOffset. For example, Bday defines this
set to be the set of dates that are weekdays (M-F). To test if a
date is in the set of a DateOffset dateOffset we can use the
onOffset method: dateOffset.onOffset(date).
If a date is not on a valid date, the rollback and rollforward
methods can be used to roll the date to the nearest valid date
before/after the date.
DateOffsets can be created to move dates forward a given number of
valid dates. For example, Bday(2) can be added to a date to move
it two business days forward. If the date does not start on a
valid date, first it is moved to a valid date. Thus pseudo code
is:
def __add__(date):
date = rollback(date) # does nothing if date is valid
return date + <n number of periods>
When a date offset is created for a negative number of periods,
the date is first rolled forward. The pseudo code is:
def __add__(date):
date = rollforward(date) # does nothing is date is valid
return date + <n number of periods>
Zero presents a problem. Should it roll forward or back? We
arbitrarily have it rollforward:
date + BDay(0) == BDay.rollforward(date)
Since 0 is a bit weird, we suggest avoiding its use.
Parameters
----------
n : int, default 1
The number of time periods the offset represents.
normalize : bool, default False
Whether to round the result of a DateOffset addition down to the
previous midnight.
**kwds
Temporal parameter that add to or replace the offset value.
Parameters that **add** to the offset (like Timedelta):
- years
- months
- weeks
- days
- hours
- minutes
- seconds
- microseconds
- nanoseconds
Parameters that **replace** the offset value:
- year
- month
- day
- weekday
- hour
- minute
- second
- microsecond
- nanosecond
See Also
--------
dateutil.relativedelta.relativedelta
Examples
--------
>>> ts = pd.Timestamp('2017-01-01 09:10:11')
>>> ts + DateOffset(months=3)
Timestamp('2017-04-01 09:10:11')
>>> ts = pd.Timestamp('2017-01-01 09:10:11')
>>> ts + DateOffset(month=3)
Timestamp('2017-03-01 09:10:11')
"""
_params = cache_readonly(BaseOffset._params.fget)
_use_relativedelta = False
_adjust_dst = False
_attributes = frozenset(['n', 'normalize'] +
list(liboffsets.relativedelta_kwds))
# default for prior pickles
normalize = False
def __init__(self, n=1, normalize=False, **kwds):
BaseOffset.__init__(self, n, normalize)
off, use_rd = liboffsets._determine_offset(kwds)
object.__setattr__(self, "_offset", off)
object.__setattr__(self, "_use_relativedelta", use_rd)
for key in kwds:
val = kwds[key]
object.__setattr__(self, key, val)
@apply_wraps
def apply(self, other):
if self._use_relativedelta:
other = as_datetime(other)
if len(self.kwds) > 0:
tzinfo = getattr(other, 'tzinfo', None)
if tzinfo is not None and self._use_relativedelta:
# perform calculation in UTC
other = other.replace(tzinfo=None)
if self.n > 0:
for i in range(self.n):
other = other + self._offset
else:
for i in range(-self.n):
other = other - self._offset
if tzinfo is not None and self._use_relativedelta:
# bring tz back from UTC calculation
other = conversion.localize_pydatetime(other, tzinfo)
return as_timestamp(other)
else:
return other + timedelta(self.n)
@apply_index_wraps
def apply_index(self, i):
"""
Vectorized apply of DateOffset to DatetimeIndex,
raises NotImplentedError for offsets without a
vectorized implementation
Parameters
----------
i : DatetimeIndex
Returns
-------
y : DatetimeIndex
"""
if type(self) is not DateOffset:
raise NotImplementedError("DateOffset subclass {name} "
"does not have a vectorized "
"implementation".format(
name=self.__class__.__name__))
kwds = self.kwds
relativedelta_fast = {'years', 'months', 'weeks', 'days', 'hours',
'minutes', 'seconds', 'microseconds'}
# relativedelta/_offset path only valid for base DateOffset
if (self._use_relativedelta and
set(kwds).issubset(relativedelta_fast)):
months = ((kwds.get('years', 0) * 12 +
kwds.get('months', 0)) * self.n)
if months:
shifted = liboffsets.shift_months(i.asi8, months)
i = i._shallow_copy(shifted)
weeks = (kwds.get('weeks', 0)) * self.n
if weeks:
i = (i.to_period('W') + weeks).to_timestamp() + \
i.to_perioddelta('W')
timedelta_kwds = {k: v for k, v in kwds.items()
if k in ['days', 'hours', 'minutes',
'seconds', 'microseconds']}
if timedelta_kwds:
delta = Timedelta(**timedelta_kwds)
i = i + (self.n * delta)
return i
elif not self._use_relativedelta and hasattr(self, '_offset'):
# timedelta
return i + (self._offset * self.n)
else:
# relativedelta with other keywords
kwd = set(kwds) - relativedelta_fast
raise NotImplementedError("DateOffset with relativedelta "
"keyword(s) {kwd} not able to be "
"applied vectorized".format(kwd=kwd))
def isAnchored(self):
# TODO: Does this make sense for the general case? It would help
# if there were a canonical docstring for what isAnchored means.
return (self.n == 1)
# TODO: Combine this with BusinessMixin version by defining a whitelisted
# set of attributes on each object rather than the existing behavior of
# iterating over internal ``__dict__``
def _repr_attrs(self):
exclude = {'n', 'inc', 'normalize'}
attrs = []
for attr in sorted(self.__dict__):
if attr.startswith('_') or attr == 'kwds':
continue
elif attr not in exclude:
value = getattr(self, attr)
attrs.append('{attr}={value}'.format(attr=attr, value=value))
out = ''
if attrs:
out += ': ' + ', '.join(attrs)
return out
@property
def name(self):
return self.rule_code
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt - self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt + self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
# XXX, see #1395
if type(self) == DateOffset or isinstance(self, Tick):
return True
# Default (slow) method for determining if some date is a member of the
# date range generated by this offset. Subclasses may have this
# re-implemented in a nicer way.
a = dt
b = ((dt + self) - self)
return a == b
# way to get around weirdness with rule_code
@property
def _prefix(self):
raise NotImplementedError('Prefix not defined')
@property
def rule_code(self):
return self._prefix
@cache_readonly
def freqstr(self):
try:
code = self.rule_code
except NotImplementedError:
return repr(self)
if self.n != 1:
fstr = '{n}{code}'.format(n=self.n, code=code)
else:
fstr = code
try:
if self._offset:
fstr += self._offset_str()
except AttributeError:
# TODO: standardize `_offset` vs `offset` naming convention
pass
return fstr
def _offset_str(self):
return ''
@property
def nanos(self):
raise ValueError("{name} is a non-fixed frequency".format(name=self))
class SingleConstructorOffset(DateOffset):
@classmethod
def _from_name(cls, suffix=None):
# default _from_name calls cls with no args
if suffix:
raise ValueError("Bad freq suffix {suffix}".format(suffix=suffix))
return cls()
class _CustomMixin(object):
"""
Mixin for classes that define and validate calendar, holidays,
and weekdays attributes
"""
def __init__(self, weekmask, holidays, calendar):
calendar, holidays = _get_calendar(weekmask=weekmask,
holidays=holidays,
calendar=calendar)
# Custom offset instances are identified by the
# following two attributes. See DateOffset._params()
# holidays, weekmask
object.__setattr__(self, "weekmask", weekmask)
object.__setattr__(self, "holidays", holidays)
object.__setattr__(self, "calendar", calendar)
class BusinessMixin(object):
""" Mixin to business types to provide related functions """
@property
def offset(self):
"""Alias for self._offset"""
# Alias for backward compat
return self._offset
def _repr_attrs(self):
if self.offset:
attrs = ['offset={offset!r}'.format(offset=self.offset)]
else:
attrs = None
out = ''
if attrs:
out += ': ' + ', '.join(attrs)
return out
class BusinessDay(BusinessMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
"""
_prefix = 'B'
_adjust_dst = True
_attributes = frozenset(['n', 'normalize', 'offset'])
def __init__(self, n=1, normalize=False, offset=timedelta(0)):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "_offset", offset)
def _offset_str(self):
def get_str(td):
off_str = ''
if td.days > 0:
off_str += str(td.days) + 'D'
if td.seconds > 0:
s = td.seconds
hrs = int(s / 3600)
if hrs != 0:
off_str += str(hrs) + 'H'
s -= hrs * 3600
mts = int(s / 60)
if mts != 0:
off_str += str(mts) + 'Min'
s -= mts * 60
if s != 0:
off_str += str(s) + 's'
if td.microseconds > 0:
off_str += str(td.microseconds) + 'us'
return off_str
if isinstance(self.offset, timedelta):
zero = timedelta(0, 0, 0)
if self.offset >= zero:
off_str = '+' + get_str(self.offset)
else:
off_str = '-' + get_str(-self.offset)
return off_str
else:
return '+' + repr(self.offset)
@apply_wraps
def apply(self, other):
if isinstance(other, datetime):
n = self.n
wday = other.weekday()
# avoid slowness below by operating on weeks first
weeks = n // 5
if n <= 0 and wday > 4:
# roll forward
n += 1
n -= 5 * weeks
# n is always >= 0 at this point
if n == 0 and wday > 4:
# roll back
days = 4 - wday
elif wday > 4:
# roll forward
days = (7 - wday) + (n - 1)
elif wday + n <= 4:
# shift by n days without leaving the current week
days = n
else:
# shift by n days plus 2 to get past the weekend
days = n + 2
result = other + timedelta(days=7 * weeks + days)
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine business day with '
'datetime or timedelta.')
@apply_index_wraps
def apply_index(self, i):
time = i.to_perioddelta('D')
# to_period rolls forward to next BDay; track and
# reduce n where it does when rolling forward
shifted = (i.to_perioddelta('B') - time).asi8 != 0
if self.n > 0:
roll = np.where(shifted, self.n - 1, self.n)
else:
roll = self.n
return (i.to_period('B') + roll).to_timestamp() + time
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.weekday() < 5
class BusinessHourMixin(BusinessMixin):
def __init__(self, start='09:00', end='17:00', offset=timedelta(0)):
# must be validated here to equality check
start = liboffsets._validate_business_time(start)
object.__setattr__(self, "start", start)
end = liboffsets._validate_business_time(end)
object.__setattr__(self, "end", end)
object.__setattr__(self, "_offset", offset)
@cache_readonly
def next_bday(self):
"""used for moving to next businessday"""
if self.n >= 0:
nb_offset = 1
else:
nb_offset = -1
if self._prefix.startswith('C'):
# CustomBusinessHour
return CustomBusinessDay(n=nb_offset,
weekmask=self.weekmask,
holidays=self.holidays,
calendar=self.calendar)
else:
return BusinessDay(n=nb_offset)
@cache_readonly
def _get_daytime_flag(self):
if self.start == self.end:
raise ValueError('start and end must not be the same')
elif self.start < self.end:
return True
else:
return False
def _next_opening_time(self, other):
"""
If n is positive, return tomorrow's business day opening time.
Otherwise yesterday's business day's opening time.
Opening time always locates on BusinessDay.
Otherwise, closing time may not if business hour extends over midnight.
"""
if not self.next_bday.onOffset(other):
other = other + self.next_bday
else:
if self.n >= 0 and self.start < other.time():
other = other + self.next_bday
elif self.n < 0 and other.time() < self.start:
other = other + self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
def _prev_opening_time(self, other):
"""
If n is positive, return yesterday's business day opening time.
Otherwise yesterday business day's opening time.
"""
if not self.next_bday.onOffset(other):
other = other - self.next_bday
else:
if self.n >= 0 and other.time() < self.start:
other = other - self.next_bday
elif self.n < 0 and other.time() > self.start:
other = other - self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
@cache_readonly
def _get_business_hours_by_sec(self):
"""
Return business hours in a day by seconds.
"""
if self._get_daytime_flag:
# create dummy datetime to calculate businesshours in a day
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 1, self.end.hour, self.end.minute)
return (until - dtstart).total_seconds()
else:
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 2, self.end.hour, self.end.minute)
return (until - dtstart).total_seconds()
@apply_wraps
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
if not self.onOffset(dt):
businesshours = self._get_business_hours_by_sec
if self.n >= 0:
dt = self._prev_opening_time(
dt) + timedelta(seconds=businesshours)
else:
dt = self._next_opening_time(
dt) + timedelta(seconds=businesshours)
return dt
@apply_wraps
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
if not self.onOffset(dt):
if self.n >= 0:
return self._next_opening_time(dt)
else:
return self._prev_opening_time(dt)
return dt
@apply_wraps
def apply(self, other):
daytime = self._get_daytime_flag
businesshours = self._get_business_hours_by_sec
bhdelta = timedelta(seconds=businesshours)
if isinstance(other, datetime):
# used for detecting edge condition
nanosecond = getattr(other, 'nanosecond', 0)
# reset timezone and nanosecond
# other may be a Timestamp, thus not use replace
other = datetime(other.year, other.month, other.day,
other.hour, other.minute,
other.second, other.microsecond)
n = self.n
if n >= 0:
if (other.time() == self.end or
not self._onOffset(other, businesshours)):
other = self._next_opening_time(other)
else:
if other.time() == self.start:
# adjustment to move to previous business day
other = other - timedelta(seconds=1)
if not self._onOffset(other, businesshours):
other = self._next_opening_time(other)
other = other + bhdelta
bd, r = divmod(abs(n * 60), businesshours // 60)
if n < 0:
bd, r = -bd, -r
if bd != 0:
skip_bd = BusinessDay(n=bd)
# midnight business hour may not on BusinessDay
if not self.next_bday.onOffset(other):
remain = other - self._prev_opening_time(other)
other = self._next_opening_time(other + skip_bd) + remain
else:
other = other + skip_bd
hours, minutes = divmod(r, 60)
result = other + timedelta(hours=hours, minutes=minutes)
# because of previous adjustment, time will be larger than start
if ((daytime and (result.time() < self.start or
self.end < result.time())) or
not daytime and (self.end < result.time() < self.start)):
if n >= 0:
bday_edge = self._prev_opening_time(other)
bday_edge = bday_edge + bhdelta
# calculate remainder
bday_remain = result - bday_edge
result = self._next_opening_time(other)
result += bday_remain
else:
bday_edge = self._next_opening_time(other)
bday_remain = result - bday_edge
result = self._next_opening_time(result) + bhdelta
result += bday_remain
# edge handling
if n >= 0:
if result.time() == self.end:
result = self._next_opening_time(result)
else:
if result.time() == self.start and nanosecond == 0:
# adjustment to move to previous business day
result = self._next_opening_time(
result - timedelta(seconds=1)) + bhdelta
return result
else:
# TODO: Figure out the end of this sente
raise ApplyTypeError(
'Only know how to combine business hour with ')
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if dt.tzinfo is not None:
dt = datetime(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second, dt.microsecond)
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
businesshours = self._get_business_hours_by_sec
return self._onOffset(dt, businesshours)
def _onOffset(self, dt, businesshours):
"""
Slight speedups using calculated values
"""
# if self.normalize and not _is_normalized(dt):
# return False
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
if self.n >= 0:
op = self._prev_opening_time(dt)
else:
op = self._next_opening_time(dt)
span = (dt - op).total_seconds()
if span <= businesshours:
return True
else:
return False
def _repr_attrs(self):
out = super(BusinessHourMixin, self)._repr_attrs()
start = self.start.strftime('%H:%M')
end = self.end.strftime('%H:%M')
attrs = ['{prefix}={start}-{end}'.format(prefix=self._prefix,
start=start, end=end)]
out += ': ' + ', '.join(attrs)
return out
class BusinessHour(BusinessHourMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
.. versionadded:: 0.16.1
"""
_prefix = 'BH'
_anchor = 0
_attributes = frozenset(['n', 'normalize', 'start', 'end', 'offset'])
def __init__(self, n=1, normalize=False, start='09:00',
end='17:00', offset=timedelta(0)):
BaseOffset.__init__(self, n, normalize)
super(BusinessHour, self).__init__(start=start, end=end, offset=offset)
class CustomBusinessDay(_CustomMixin, BusinessDay):
"""
DateOffset subclass representing possibly n custom business days,
excluding holidays
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_prefix = 'C'
_attributes = frozenset(['n', 'normalize',
'weekmask', 'holidays', 'calendar', 'offset'])
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, offset=timedelta(0)):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "_offset", offset)
_CustomMixin.__init__(self, weekmask, holidays, calendar)
@apply_wraps
def apply(self, other):
if self.n <= 0:
roll = 'forward'
else:
roll = 'backward'
if isinstance(other, datetime):
date_in = other
np_dt = np.datetime64(date_in.date())
np_incr_dt = np.busday_offset(np_dt, self.n, roll=roll,
busdaycal=self.calendar)
dt_date = np_incr_dt.astype(datetime)
result = datetime.combine(dt_date, date_in.time())
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine trading day with '
'datetime, datetime64 or timedelta.')
def apply_index(self, i):
raise NotImplementedError
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
day64 = _to_dt64(dt, 'datetime64[D]')
return np.is_busday(day64, busdaycal=self.calendar)
class CustomBusinessHour(_CustomMixin, BusinessHourMixin,
SingleConstructorOffset):
"""
DateOffset subclass representing possibly n custom business days
.. versionadded:: 0.18.1
"""
_prefix = 'CBH'
_anchor = 0
_attributes = frozenset(['n', 'normalize',
'weekmask', 'holidays', 'calendar',
'start', 'end', 'offset'])
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None,
start='09:00', end='17:00', offset=timedelta(0)):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "_offset", offset)
_CustomMixin.__init__(self, weekmask, holidays, calendar)
BusinessHourMixin.__init__(self, start=start, end=end, offset=offset)
# ---------------------------------------------------------------------
# Month-Based Offset Classes
class MonthOffset(SingleConstructorOffset):
_adjust_dst = True
_attributes = frozenset(['n', 'normalize'])
__init__ = BaseOffset.__init__
@property
def name(self):
if self.isAnchored:
return self.rule_code
else:
month = ccalendar.MONTH_ALIASES[self.n]
return "{code}-{month}".format(code=self.rule_code,
month=month)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.day == self._get_offset_day(dt)
@apply_wraps
def apply(self, other):
compare_day = self._get_offset_day(other)
n = liboffsets.roll_convention(other.day, self.n, compare_day)
return shift_month(other, n, self._day_opt)
@apply_index_wraps
def apply_index(self, i):
shifted = liboffsets.shift_months(i.asi8, self.n, self._day_opt)
return i._shallow_copy(shifted)
class MonthEnd(MonthOffset):
"""DateOffset of one month end"""
_prefix = 'M'
_day_opt = 'end'
class MonthBegin(MonthOffset):
"""DateOffset of one month at beginning"""
_prefix = 'MS'
_day_opt = 'start'
class BusinessMonthEnd(MonthOffset):
"""DateOffset increments between business EOM dates"""
_prefix = 'BM'
_day_opt = 'business_end'
class BusinessMonthBegin(MonthOffset):
"""DateOffset of one business month at beginning"""
_prefix = 'BMS'
_day_opt = 'business_start'
class _CustomBusinessMonth(_CustomMixin, BusinessMixin, MonthOffset):
"""
DateOffset subclass representing one custom business month, incrementing
between [BEGIN/END] of month dates
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_attributes = frozenset(['n', 'normalize',
'weekmask', 'holidays', 'calendar', 'offset'])
onOffset = DateOffset.onOffset # override MonthOffset method
apply_index = DateOffset.apply_index # override MonthOffset method
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, offset=timedelta(0)):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "_offset", offset)
_CustomMixin.__init__(self, weekmask, holidays, calendar)
@cache_readonly
def cbday_roll(self):
"""Define default roll function to be called in apply method"""
cbday = CustomBusinessDay(n=self.n, normalize=False, **self.kwds)
if self._prefix.endswith('S'):
# MonthBegin
roll_func = cbday.rollforward
else:
# MonthEnd
roll_func = cbday.rollback
return roll_func
@cache_readonly
def m_offset(self):
if self._prefix.endswith('S'):
# MonthBegin
moff = MonthBegin(n=1, normalize=False)
else:
# MonthEnd
moff = MonthEnd(n=1, normalize=False)
return moff
@cache_readonly
def month_roll(self):
"""Define default roll function to be called in apply method"""
if self._prefix.endswith('S'):
# MonthBegin
roll_func = self.m_offset.rollback
else:
# MonthEnd
roll_func = self.m_offset.rollforward
return roll_func
@apply_wraps
def apply(self, other):
# First move to month offset
cur_month_offset_date = self.month_roll(other)
# Find this custom month offset
compare_date = self.cbday_roll(cur_month_offset_date)
n = liboffsets.roll_convention(other.day, self.n, compare_date.day)
new = cur_month_offset_date + n * self.m_offset
result = self.cbday_roll(new)
return result
class CustomBusinessMonthEnd(_CustomBusinessMonth):
# TODO(py27): Replace condition with Subsitution after dropping Py27
if _CustomBusinessMonth.__doc__:
__doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'end')
_prefix = 'CBM'
class CustomBusinessMonthBegin(_CustomBusinessMonth):
# TODO(py27): Replace condition with Subsitution after dropping Py27
if _CustomBusinessMonth.__doc__:
__doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]',
'beginning')
_prefix = 'CBMS'
# ---------------------------------------------------------------------
# Semi-Month Based Offset Classes
class SemiMonthOffset(DateOffset):
_adjust_dst = True
_default_day_of_month = 15
_min_day_of_month = 2
_attributes = frozenset(['n', 'normalize', 'day_of_month'])
def __init__(self, n=1, normalize=False, day_of_month=None):
BaseOffset.__init__(self, n, normalize)
if day_of_month is None:
object.__setattr__(self, "day_of_month",
self._default_day_of_month)
else:
object.__setattr__(self, "day_of_month", int(day_of_month))
if not self._min_day_of_month <= self.day_of_month <= 27:
msg = 'day_of_month must be {min}<=day_of_month<=27, got {day}'
raise ValueError(msg.format(min=self._min_day_of_month,
day=self.day_of_month))
@classmethod
def _from_name(cls, suffix=None):
return cls(day_of_month=suffix)
@property
def rule_code(self):
suffix = '-{day_of_month}'.format(day_of_month=self.day_of_month)
return self._prefix + suffix
@apply_wraps
def apply(self, other):
# shift `other` to self.day_of_month, incrementing `n` if necessary
n = liboffsets.roll_convention(other.day, self.n, self.day_of_month)
days_in_month = ccalendar.get_days_in_month(other.year, other.month)
# For SemiMonthBegin on other.day == 1 and
# SemiMonthEnd on other.day == days_in_month,
# shifting `other` to `self.day_of_month` _always_ requires
# incrementing/decrementing `n`, regardless of whether it is
# initially positive.
if type(self) is SemiMonthBegin and (self.n <= 0 and other.day == 1):
n -= 1
elif type(self) is SemiMonthEnd and (self.n > 0 and
other.day == days_in_month):
n += 1
return self._apply(n, other)
def _apply(self, n, other):
"""Handle specific apply logic for child classes"""
raise com.AbstractMethodError(self)
@apply_index_wraps
def apply_index(self, i):
# determine how many days away from the 1st of the month we are
days_from_start = i.to_perioddelta('M').asi8
delta = Timedelta(days=self.day_of_month - 1).value
# get boolean array for each element before the day_of_month
before_day_of_month = days_from_start < delta
# get boolean array for each element after the day_of_month
after_day_of_month = days_from_start > delta
# determine the correct n for each date in i
roll = self._get_roll(i, before_day_of_month, after_day_of_month)
# isolate the time since it will be striped away one the next line
time = i.to_perioddelta('D')
# apply the correct number of months
i = (i.to_period('M') + (roll // 2)).to_timestamp()
# apply the correct day
i = self._apply_index_days(i, roll)
return i + time
def _get_roll(self, i, before_day_of_month, after_day_of_month):
"""Return an array with the correct n for each date in i.
The roll array is based on the fact that i gets rolled back to
the first day of the month.
"""
raise com.AbstractMethodError(self)
def _apply_index_days(self, i, roll):
"""Apply the correct day for each date in i"""
raise com.AbstractMethodError(self)
class SemiMonthEnd(SemiMonthOffset):
"""
Two DateOffset's per month repeating on the last
day of the month and day_of_month.
.. versionadded:: 0.19.0
Parameters
----------
n: int
normalize : bool, default False
day_of_month: int, {1, 3,...,27}, default 15
"""
_prefix = 'SM'
_min_day_of_month = 1
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
days_in_month = ccalendar.get_days_in_month(dt.year, dt.month)
return dt.day in (self.day_of_month, days_in_month)
def _apply(self, n, other):
months = n // 2
day = 31 if n % 2 else self.day_of_month
return shift_month(other, months, day)
def _get_roll(self, i, before_day_of_month, after_day_of_month):
n = self.n
is_month_end = i.is_month_end
if n > 0:
roll_end = np.where(is_month_end, 1, 0)
roll_before = np.where(before_day_of_month, n, n + 1)
roll = roll_end + roll_before
elif n == 0:
roll_after = np.where(after_day_of_month, 2, 0)
roll_before = np.where(~after_day_of_month, 1, 0)
roll = roll_before + roll_after
else:
roll = np.where(after_day_of_month, n + 2, n + 1)
return roll
def _apply_index_days(self, i, roll):
"""Add days portion of offset to DatetimeIndex i
Parameters
----------
i : DatetimeIndex
roll : ndarray[int64_t]
Returns
-------
result : DatetimeIndex
"""
nanos = (roll % 2) * Timedelta(days=self.day_of_month).value
i += nanos.astype('timedelta64[ns]')
return i + Timedelta(days=-1)
class SemiMonthBegin(SemiMonthOffset):
"""
Two DateOffset's per month repeating on the first
day of the month and day_of_month.
.. versionadded:: 0.19.0
Parameters
----------
n: int
normalize : bool, default False
day_of_month: int, {2, 3,...,27}, default 15
"""
_prefix = 'SMS'
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.day in (1, self.day_of_month)
def _apply(self, n, other):
months = n // 2 + n % 2
day = 1 if n % 2 else self.day_of_month
return shift_month(other, months, day)
def _get_roll(self, i, before_day_of_month, after_day_of_month):
n = self.n
is_month_start = i.is_month_start
if n > 0:
roll = np.where(before_day_of_month, n, n + 1)
elif n == 0:
roll_start = np.where(is_month_start, 0, 1)
roll_after = np.where(after_day_of_month, 1, 0)
roll = roll_start + roll_after
else:
roll_after = np.where(after_day_of_month, n + 2, n + 1)
roll_start = np.where(is_month_start, -1, 0)
roll = roll_after + roll_start
return roll
def _apply_index_days(self, i, roll):
"""Add days portion of offset to DatetimeIndex i
Parameters
----------
i : DatetimeIndex
roll : ndarray[int64_t]
Returns
-------
result : DatetimeIndex
"""
nanos = (roll % 2) * Timedelta(days=self.day_of_month - 1).value
return i + nanos.astype('timedelta64[ns]')
# ---------------------------------------------------------------------
# Week-Based Offset Classes
class Week(DateOffset):
"""
Weekly offset
Parameters
----------
weekday : int, default None
Always generate specific day of week. 0 for Monday
"""
_adjust_dst = True
_inc = timedelta(weeks=1)
_prefix = 'W'
_attributes = frozenset(['n', 'normalize', 'weekday'])
def __init__(self, n=1, normalize=False, weekday=None):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "weekday", weekday)
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got {day}'
.format(day=self.weekday))
def isAnchored(self):
return (self.n == 1 and self.weekday is not None)
@apply_wraps
def apply(self, other):
if self.weekday is None:
return other + self.n * self._inc
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
if k > 0:
k -= 1
return other + timedelta(weeks=k)
@apply_index_wraps
def apply_index(self, i):
if self.weekday is None:
return ((i.to_period('W') + self.n).to_timestamp() +
i.to_perioddelta('W'))
else:
return self._end_apply_index(i)
def _end_apply_index(self, dtindex):
"""Add self to the given DatetimeIndex, specialized for case where
self.weekday is non-null.
Parameters
----------
dtindex : DatetimeIndex
Returns
-------
result : DatetimeIndex
"""
off = dtindex.to_perioddelta('D')
base, mult = libfrequencies.get_freq_code(self.freqstr)
base_period = dtindex.to_period(base)
if self.n > 0:
# when adding, dates on end roll to next
normed = dtindex - off + Timedelta(1, 'D') - Timedelta(1, 'ns')
roll = np.where(base_period.to_timestamp(how='end') == normed,
self.n, self.n - 1)
else:
roll = self.n
base = (base_period + roll).to_timestamp(how='end')
return base + off + Timedelta(1, 'ns') - Timedelta(1, 'D')
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
elif self.weekday is None:
return True
return dt.weekday() == self.weekday
@property
def rule_code(self):
suffix = ''
if self.weekday is not None:
weekday = ccalendar.int_to_weekday[self.weekday]
suffix = '-{weekday}'.format(weekday=weekday)
return self._prefix + suffix
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
weekday = None
else:
weekday = ccalendar.weekday_to_int[suffix]
return cls(weekday=weekday)
class _WeekOfMonthMixin(object):
"""Mixin for methods common to WeekOfMonth and LastWeekOfMonth"""
@apply_wraps
def apply(self, other):
compare_day = self._get_offset_day(other)
months = self.n
if months > 0 and compare_day > other.day:
months -= 1
elif months <= 0 and compare_day < other.day:
months += 1
shifted = shift_month(other, months, 'start')
to_day = self._get_offset_day(shifted)
return liboffsets.shift_day(shifted, to_day - shifted.day)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.day == self._get_offset_day(dt)
class WeekOfMonth(_WeekOfMonthMixin, DateOffset):
"""
Describes monthly dates like "the Tuesday of the 2nd week of each month"
Parameters
----------
n : int
week : {0, 1, 2, 3, ...}, default 0
0 is 1st week of month, 1 2nd week, etc.
weekday : {0, 1, ..., 6}, default 0
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
_prefix = 'WOM'
_adjust_dst = True
_attributes = frozenset(['n', 'normalize', 'week', 'weekday'])
def __init__(self, n=1, normalize=False, week=0, weekday=0):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "weekday", weekday)
object.__setattr__(self, "week", week)
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got {day}'
.format(day=self.weekday))
if self.week < 0 or self.week > 3:
raise ValueError('Week must be 0<=week<=3, got {week}'
.format(week=self.week))
def _get_offset_day(self, other):
"""
Find the day in the same month as other that has the same
weekday as self.weekday and is the self.week'th such day in the month.
Parameters
----------
other: datetime
Returns
-------
day: int
"""
mstart = datetime(other.year, other.month, 1)
wday = mstart.weekday()
shift_days = (self.weekday - wday) % 7
return 1 + shift_days + self.week * 7
@property
def rule_code(self):
weekday = ccalendar.int_to_weekday.get(self.weekday, '')
return '{prefix}-{week}{weekday}'.format(prefix=self._prefix,
week=self.week + 1,
weekday=weekday)
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix {prefix!r} requires a suffix."
.format(prefix=cls._prefix))
# TODO: handle n here...
# only one digit weeks (1 --> week 0, 2 --> week 1, etc.)
week = int(suffix[0]) - 1
weekday = ccalendar.weekday_to_int[suffix[1:]]
return cls(week=week, weekday=weekday)
class LastWeekOfMonth(_WeekOfMonthMixin, DateOffset):
"""
Describes monthly dates in last week of month like "the last Tuesday of
each month"
Parameters
----------
n : int, default 1
weekday : {0, 1, ..., 6}, default 0
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
_prefix = 'LWOM'
_adjust_dst = True
_attributes = frozenset(['n', 'normalize', 'weekday'])
def __init__(self, n=1, normalize=False, weekday=0):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "weekday", weekday)
if self.n == 0:
raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got {day}'
.format(day=self.weekday))
def _get_offset_day(self, other):
"""
Find the day in the same month as other that has the same
weekday as self.weekday and is the last such day in the month.
Parameters
----------
other: datetime
Returns
-------
day: int
"""
dim = ccalendar.get_days_in_month(other.year, other.month)
mend = datetime(other.year, other.month, dim)
wday = mend.weekday()
shift_days = (wday - self.weekday) % 7
return dim - shift_days
@property
def rule_code(self):
weekday = ccalendar.int_to_weekday.get(self.weekday, '')
return '{prefix}-{weekday}'.format(prefix=self._prefix,
weekday=weekday)
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix {prefix!r} requires a suffix."
.format(prefix=cls._prefix))
# TODO: handle n here...
weekday = ccalendar.weekday_to_int[suffix]
return cls(weekday=weekday)
# ---------------------------------------------------------------------
# Quarter-Based Offset Classes
class QuarterOffset(DateOffset):
"""Quarter representation - doesn't call super"""
_default_startingMonth = None
_from_name_startingMonth = None
_adjust_dst = True
_attributes = frozenset(['n', 'normalize', 'startingMonth'])
# TODO: Consider combining QuarterOffset and YearOffset __init__ at some
# point. Also apply_index, onOffset, rule_code if
# startingMonth vs month attr names are resolved
def __init__(self, n=1, normalize=False, startingMonth=None):
BaseOffset.__init__(self, n, normalize)
if startingMonth is None:
startingMonth = self._default_startingMonth
object.__setattr__(self, "startingMonth", startingMonth)
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['startingMonth'] = ccalendar.MONTH_TO_CAL_NUM[suffix]
else:
if cls._from_name_startingMonth is not None:
kwargs['startingMonth'] = cls._from_name_startingMonth
return cls(**kwargs)
@property
def rule_code(self):
month = ccalendar.MONTH_ALIASES[self.startingMonth]
return '{prefix}-{month}'.format(prefix=self._prefix, month=month)
@apply_wraps
def apply(self, other):
# months_since: find the calendar quarter containing other.month,
# e.g. if other.month == 8, the calendar quarter is [Jul, Aug, Sep].
# Then find the month in that quarter containing an onOffset date for
# self. `months_since` is the number of months to shift other.month
# to get to this on-offset month.
months_since = other.month % 3 - self.startingMonth % 3
qtrs = liboffsets.roll_qtrday(other, self.n, self.startingMonth,
day_opt=self._day_opt, modby=3)
months = qtrs * 3 - months_since
return shift_month(other, months, self._day_opt)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
mod_month = (dt.month - self.startingMonth) % 3
return mod_month == 0 and dt.day == self._get_offset_day(dt)
@apply_index_wraps
def apply_index(self, dtindex):
shifted = liboffsets.shift_quarters(dtindex.asi8, self.n,
self.startingMonth, self._day_opt)
return dtindex._shallow_copy(shifted)
class BQuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
"""
_outputName = 'BusinessQuarterEnd'
_default_startingMonth = 3
_from_name_startingMonth = 12
_prefix = 'BQ'
_day_opt = 'business_end'
# TODO: This is basically the same as BQuarterEnd
class BQuarterBegin(QuarterOffset):
_outputName = "BusinessQuarterBegin"
# I suspect this is wrong for *all* of them.
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'BQS'
_day_opt = 'business_start'
class QuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/31/2007, 6/30/2007, ...
"""
_outputName = 'QuarterEnd'
_default_startingMonth = 3
_prefix = 'Q'
_day_opt = 'end'
class QuarterBegin(QuarterOffset):
_outputName = 'QuarterBegin'
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'QS'
_day_opt = 'start'
# ---------------------------------------------------------------------
# Year-Based Offset Classes
class YearOffset(DateOffset):
"""DateOffset that just needs a month"""
_adjust_dst = True
_attributes = frozenset(['n', 'normalize', 'month'])
def _get_offset_day(self, other):
# override BaseOffset method to use self.month instead of other.month
# TODO: there may be a more performant way to do this
return liboffsets.get_day_of_month(other.replace(month=self.month),
self._day_opt)
@apply_wraps
def apply(self, other):
years = roll_yearday(other, self.n, self.month, self._day_opt)
months = years * 12 + (self.month - other.month)
return shift_month(other, months, self._day_opt)
@apply_index_wraps
def apply_index(self, dtindex):
shifted = liboffsets.shift_quarters(dtindex.asi8, self.n,
self.month, self._day_opt,
modby=12)
return dtindex._shallow_copy(shifted)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.month == self.month and dt.day == self._get_offset_day(dt)
def __init__(self, n=1, normalize=False, month=None):
BaseOffset.__init__(self, n, normalize)
month = month if month is not None else self._default_month
object.__setattr__(self, "month", month)
if self.month < 1 or self.month > 12:
raise ValueError('Month must go from 1 to 12')
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['month'] = ccalendar.MONTH_TO_CAL_NUM[suffix]
return cls(**kwargs)
@property
def rule_code(self):
month = ccalendar.MONTH_ALIASES[self.month]
return '{prefix}-{month}'.format(prefix=self._prefix, month=month)
class BYearEnd(YearOffset):
"""DateOffset increments between business EOM dates"""
_outputName = 'BusinessYearEnd'
_default_month = 12
_prefix = 'BA'
_day_opt = 'business_end'
class BYearBegin(YearOffset):
"""DateOffset increments between business year begin dates"""
_outputName = 'BusinessYearBegin'
_default_month = 1
_prefix = 'BAS'
_day_opt = 'business_start'
class YearEnd(YearOffset):
"""DateOffset increments between calendar year ends"""
_default_month = 12
_prefix = 'A'
_day_opt = 'end'
class YearBegin(YearOffset):
"""DateOffset increments between calendar year begin dates"""
_default_month = 1
_prefix = 'AS'
_day_opt = 'start'
# ---------------------------------------------------------------------
# Special Offset Classes
class FY5253(DateOffset):
"""
Describes 52-53 week fiscal year. This is also known as a 4-4-5 calendar.
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4-4-5_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'RE'
_adjust_dst = True
_attributes = frozenset(['weekday', 'startingMonth', 'variation'])
def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1,
variation="nearest"):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "startingMonth", startingMonth)
object.__setattr__(self, "weekday", weekday)
object.__setattr__(self, "variation", variation)
if self.n == 0:
raise ValueError('N cannot be 0')
if self.variation not in ["nearest", "last"]:
raise ValueError('{variation} is not a valid variation'
.format(variation=self.variation))
def isAnchored(self):
return (self.n == 1 and
self.startingMonth is not None and
self.weekday is not None)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
dt = datetime(dt.year, dt.month, dt.day)
year_end = self.get_year_end(dt)
if self.variation == "nearest":
# We have to check the year end of "this" cal year AND the previous
return (year_end == dt or
self.get_year_end(shift_month(dt, -1, None)) == dt)
else:
return year_end == dt
@apply_wraps
def apply(self, other):
norm = Timestamp(other).normalize()
n = self.n
prev_year = self.get_year_end(
datetime(other.year - 1, self.startingMonth, 1))
cur_year = self.get_year_end(
datetime(other.year, self.startingMonth, 1))
next_year = self.get_year_end(
datetime(other.year + 1, self.startingMonth, 1))
prev_year = conversion.localize_pydatetime(prev_year, other.tzinfo)
cur_year = conversion.localize_pydatetime(cur_year, other.tzinfo)
next_year = conversion.localize_pydatetime(next_year, other.tzinfo)
# Note: next_year.year == other.year + 1, so we will always
# have other < next_year
if norm == prev_year:
n -= 1
elif norm == cur_year:
pass
elif n > 0:
if norm < prev_year:
n -= 2
elif prev_year < norm < cur_year:
n -= 1
elif cur_year < norm < next_year:
pass
else:
if cur_year < norm < next_year:
n += 1
elif prev_year < norm < cur_year:
pass
elif (norm.year == prev_year.year and norm < prev_year and
prev_year - norm <= timedelta(6)):
# GH#14774, error when next_year.year == cur_year.year
# e.g. prev_year == datetime(2004, 1, 3),
# other == datetime(2004, 1, 1)
n -= 1
else:
assert False
shifted = datetime(other.year + n, self.startingMonth, 1)
result = self.get_year_end(shifted)
result = datetime(result.year, result.month, result.day,
other.hour, other.minute, other.second,
other.microsecond)
return result
def get_year_end(self, dt):
assert dt.tzinfo is None
dim = ccalendar.get_days_in_month(dt.year, self.startingMonth)
target_date = datetime(dt.year, self.startingMonth, dim)
wkday_diff = self.weekday - target_date.weekday()
if wkday_diff == 0:
# year_end is the same for "last" and "nearest" cases
return target_date
if self.variation == "last":
days_forward = (wkday_diff % 7) - 7
# days_forward is always negative, so we always end up
# in the same year as dt
return target_date + timedelta(days=days_forward)
else:
# variation == "nearest":
days_forward = wkday_diff % 7
if days_forward <= 3:
# The upcoming self.weekday is closer than the previous one
return target_date + timedelta(days_forward)
else:
# The previous self.weekday is closer than the upcoming one
return target_date + timedelta(days_forward - 7)
@property
def rule_code(self):
prefix = self._prefix
suffix = self.get_rule_code_suffix()
return "{prefix}-{suffix}".format(prefix=prefix, suffix=suffix)
def _get_suffix_prefix(self):
if self.variation == "nearest":
return 'N'
else:
return 'L'
def get_rule_code_suffix(self):
prefix = self._get_suffix_prefix()
month = ccalendar.MONTH_ALIASES[self.startingMonth]
weekday = ccalendar.int_to_weekday[self.weekday]
return '{prefix}-{month}-{weekday}'.format(prefix=prefix, month=month,
weekday=weekday)
@classmethod
def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code):
if varion_code == "N":
variation = "nearest"
elif varion_code == "L":
variation = "last"
else:
raise ValueError("Unable to parse varion_code: "
"{code}".format(code=varion_code))
startingMonth = ccalendar.MONTH_TO_CAL_NUM[startingMonth_code]
weekday = ccalendar.weekday_to_int[weekday_code]
return {"weekday": weekday,
"startingMonth": startingMonth,
"variation": variation}
@classmethod
def _from_name(cls, *args):
return cls(**cls._parse_suffix(*args))
class FY5253Quarter(DateOffset):
"""
DateOffset increments between business quarter dates
for 52-53 week fiscal year (also known as a 4-4-5 calendar).
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4-4-5_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
qtr_with_extra_week : The quarter number that has the leap
or 14 week when needed. {1,2,3,4}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'REQ'
_adjust_dst = True
_attributes = frozenset(['weekday', 'startingMonth', 'qtr_with_extra_week',
'variation'])
def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1,
qtr_with_extra_week=1, variation="nearest"):
BaseOffset.__init__(self, n, normalize)
object.__setattr__(self, "startingMonth", startingMonth)
object.__setattr__(self, "weekday", weekday)
object.__setattr__(self, "qtr_with_extra_week", qtr_with_extra_week)
object.__setattr__(self, "variation", variation)
if self.n == 0:
raise ValueError('N cannot be 0')
@cache_readonly
def _offset(self):
return FY5253(startingMonth=self.startingMonth,
weekday=self.weekday,
variation=self.variation)
def isAnchored(self):
return self.n == 1 and self._offset.isAnchored()
def _rollback_to_year(self, other):
"""roll `other` back to the most recent date that was on a fiscal year
end. Return the date of that year-end, the number of full quarters
elapsed between that year-end and other, and the remaining Timedelta
since the most recent quarter-end.
Parameters
----------
other : datetime or Timestamp
Returns
-------
tuple of
prev_year_end : Timestamp giving most recent fiscal year end
num_qtrs : int
tdelta : Timedelta
"""
num_qtrs = 0
norm = Timestamp(other).tz_localize(None)
start = self._offset.rollback(norm)
# Note: start <= norm and self._offset.onOffset(start)
if start < norm:
# roll adjustment
qtr_lens = self.get_weeks(norm)
# check thet qtr_lens is consistent with self._offset addition
end = liboffsets.shift_day(start, days=7 * sum(qtr_lens))
assert self._offset.onOffset(end), (start, end, qtr_lens)
tdelta = norm - start
for qlen in qtr_lens:
if qlen * 7 <= tdelta.days:
num_qtrs += 1
tdelta -= Timedelta(days=qlen * 7)
else:
break
else:
tdelta = Timedelta(0)
# Note: we always have tdelta.value >= 0
return start, num_qtrs, tdelta
@apply_wraps
def apply(self, other):
# Note: self.n == 0 is not allowed.
n = self.n
prev_year_end, num_qtrs, tdelta = self._rollback_to_year(other)
res = prev_year_end
n += num_qtrs
if self.n <= 0 and tdelta.value > 0:
n += 1
# Possible speedup by handling years first.
years = n // 4
if years:
res += self._offset * years
n -= years * 4
# Add an extra day to make *sure* we are getting the quarter lengths
# for the upcoming year, not the previous year
qtr_lens = self.get_weeks(res + Timedelta(days=1))
# Note: we always have 0 <= n < 4
weeks = sum(qtr_lens[:n])
if weeks:
res = liboffsets.shift_day(res, days=weeks * 7)
return res
def get_weeks(self, dt):
ret = [13] * 4
year_has_extra_week = self.year_has_extra_week(dt)
if year_has_extra_week:
ret[self.qtr_with_extra_week - 1] = 14
return ret
def year_has_extra_week(self, dt):
# Avoid round-down errors --> normalize to get
# e.g. '370D' instead of '360D23H'
norm = Timestamp(dt).normalize().tz_localize(None)
next_year_end = self._offset.rollforward(norm)
prev_year_end = norm - self._offset
weeks_in_year = (next_year_end - prev_year_end).days / 7
assert weeks_in_year in [52, 53], weeks_in_year
return weeks_in_year == 53
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if self._offset.onOffset(dt):
return True
next_year_end = dt - self._offset
qtr_lens = self.get_weeks(dt)
current = next_year_end
for qtr_len in qtr_lens:
current = liboffsets.shift_day(current, days=qtr_len * 7)
if dt == current:
return True
return False
@property
def rule_code(self):
suffix = self._offset.get_rule_code_suffix()
qtr = self.qtr_with_extra_week
return "{prefix}-{suffix}-{qtr}".format(prefix=self._prefix,
suffix=suffix, qtr=qtr)
@classmethod
def _from_name(cls, *args):
return cls(**dict(FY5253._parse_suffix(*args[:-1]),
qtr_with_extra_week=int(args[-1])))
class Easter(DateOffset):
"""
DateOffset for the Easter holiday using
logic defined in dateutil. Right now uses
the revised method which is valid in years
1583-4099.
"""
_adjust_dst = True
_attributes = frozenset(['n', 'normalize'])
__init__ = BaseOffset.__init__
@apply_wraps
def apply(self, other):
current_easter = easter(other.year)
current_easter = datetime(current_easter.year,
current_easter.month, current_easter.day)
current_easter = conversion.localize_pydatetime(current_easter,
other.tzinfo)
n = self.n
if n >= 0 and other < current_easter:
n -= 1
elif n < 0 and other > current_easter:
n += 1
# TODO: Why does this handle the 0 case the opposite of others?
# NOTE: easter returns a datetime.date so we have to convert to type of
# other
new = easter(other.year + n)
new = datetime(new.year, new.month, new.day, other.hour,
other.minute, other.second, other.microsecond)
return new
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return date(dt.year, dt.month, dt.day) == easter(dt.year)
class CalendarDay(SingleConstructorOffset):
"""
Calendar day offset. Respects calendar arithmetic as opposed to Day which
respects absolute time.
"""
_adjust_dst = True
_inc = Timedelta(days=1)
_prefix = 'CD'
_attributes = frozenset(['n', 'normalize'])
def __init__(self, n=1, normalize=False):
BaseOffset.__init__(self, n, normalize)
@apply_wraps
def apply(self, other):
"""
Apply scalar arithmetic with CalendarDay offset. Incoming datetime
objects can be tz-aware or naive.
"""
if type(other) == type(self):
# Add other CalendarDays
return type(self)(self.n + other.n, normalize=self.normalize)
tzinfo = getattr(other, 'tzinfo', None)
if tzinfo is not None:
other = other.replace(tzinfo=None)
other = other + self.n * self._inc
if tzinfo is not None:
# This can raise a AmbiguousTimeError or NonExistentTimeError
other = conversion.localize_pydatetime(other, tzinfo)
try:
return as_timestamp(other)
except TypeError:
raise TypeError("Cannot perform arithmetic between {other} and "
"CalendarDay".format(other=type(other)))
@apply_index_wraps
def apply_index(self, i):
"""
Apply the CalendarDay offset to a DatetimeIndex. Incoming DatetimeIndex
objects are assumed to be tz_naive
"""
return i + self.n * self._inc
# ---------------------------------------------------------------------
# Ticks
def _tick_comp(op):
def f(self, other):
return op(self.delta, other.delta)
return f
class Tick(liboffsets._Tick, SingleConstructorOffset):
_inc = Timedelta(microseconds=1000)
_prefix = 'undefined'
_attributes = frozenset(['n', 'normalize'])
def __init__(self, n=1, normalize=False):
BaseOffset.__init__(self, n, normalize)
if normalize:
raise ValueError("Tick offset with `normalize=True` are not "
"allowed.") # GH#21427
__gt__ = _tick_comp(operator.gt)
__ge__ = _tick_comp(operator.ge)
__lt__ = _tick_comp(operator.lt)
__le__ = _tick_comp(operator.le)
__eq__ = _tick_comp(operator.eq)
__ne__ = _tick_comp(operator.ne)
def __add__(self, other):
if isinstance(other, Tick):
if type(self) == type(other):
return type(self)(self.n + other.n)
else:
return _delta_to_tick(self.delta + other.delta)
elif isinstance(other, ABCPeriod):
return other + self
try:
return self.apply(other)
except ApplyTypeError:
return NotImplemented
except OverflowError:
raise OverflowError("the add operation between {self} and {other} "
"will overflow".format(self=self, other=other))
def __eq__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta == other.delta
else:
return False
# This is identical to DateOffset.__hash__, but has to be redefined here
# for Python 3, because we've redefined __eq__.
def __hash__(self):
return hash(self._params)
def __ne__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta != other.delta
else:
return True
@property
def delta(self):
return self.n * self._inc
@property
def nanos(self):
return delta_to_nanoseconds(self.delta)
# TODO: Should Tick have its own apply_index?
def apply(self, other):
# Timestamp can handle tz and nano sec, thus no need to use apply_wraps
if isinstance(other, Timestamp):
# GH 15126
# in order to avoid a recursive
# call of __add__ and __radd__ if there is
# an exception, when we call using the + operator,
# we directly call the known method
result = other.__add__(self)
if result == NotImplemented:
raise OverflowError
return result
elif isinstance(other, (datetime, np.datetime64, date)):
return as_timestamp(other) + self
if isinstance(other, timedelta):
return other + self.delta
elif isinstance(other, type(self)):
return type(self)(self.n + other.n)
raise ApplyTypeError('Unhandled type: {type_str}'
.format(type_str=type(other).__name__))
def isAnchored(self):
return False
def _delta_to_tick(delta):
if delta.microseconds == 0:
if delta.seconds == 0:
return Day(delta.days)
else:
seconds = delta.days * 86400 + delta.seconds
if seconds % 3600 == 0:
return Hour(seconds / 3600)
elif seconds % 60 == 0:
return Minute(seconds / 60)
else:
return Second(seconds)
else:
nanos = delta_to_nanoseconds(delta)
if nanos % 1000000 == 0:
return Milli(nanos // 1000000)
elif nanos % 1000 == 0:
return Micro(nanos // 1000)
else: # pragma: no cover
return Nano(nanos)
class Day(Tick):
_inc = Timedelta(days=1)
_prefix = 'D'
class Hour(Tick):
_inc = Timedelta(hours=1)
_prefix = 'H'
class Minute(Tick):
_inc = Timedelta(minutes=1)
_prefix = 'T'
class Second(Tick):
_inc = Timedelta(seconds=1)
_prefix = 'S'
class Milli(Tick):
_inc = Timedelta(milliseconds=1)
_prefix = 'L'
class Micro(Tick):
_inc = Timedelta(microseconds=1)
_prefix = 'U'
class Nano(Tick):
_inc = Timedelta(nanoseconds=1)
_prefix = 'N'
BDay = BusinessDay
BMonthEnd = BusinessMonthEnd
BMonthBegin = BusinessMonthBegin
CBMonthEnd = CustomBusinessMonthEnd
CBMonthBegin = CustomBusinessMonthBegin
CDay = CustomBusinessDay
# ---------------------------------------------------------------------
def generate_range(start=None, end=None, periods=None,
offset=BDay(), time_rule=None):
"""
Generates a sequence of dates corresponding to the specified time
offset. Similar to dateutil.rrule except uses pandas DateOffset
objects to represent time increments
Parameters
----------
start : datetime (default None)
end : datetime (default None)
periods : int, (default None)
offset : DateOffset, (default BDay())
time_rule : (legacy) name of DateOffset object to be used, optional
Corresponds with names expected by tseries.frequencies.get_offset
Notes
-----
* This method is faster for generating weekdays than dateutil.rrule
* At least two of (start, end, periods) must be specified.
* If both start and end are specified, the returned dates will
satisfy start <= date <= end.
* If both time_rule and offset are specified, time_rule supersedes offset.
Returns
-------
dates : generator object
"""
if time_rule is not None:
from pandas.tseries.frequencies import get_offset
offset = get_offset(time_rule)
start = to_datetime(start)
end = to_datetime(end)
if start and not offset.onOffset(start):
start = offset.rollforward(start)
elif end and not offset.onOffset(end):
end = offset.rollback(end)
if periods is None and end < start and offset.n >= 0:
end = None
periods = 0
if end is None:
end = start + (periods - 1) * offset
if start is None:
start = end - (periods - 1) * offset
cur = start
if offset.n >= 0:
while cur <= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date <= cur:
raise ValueError('Offset {offset} did not increment date'
.format(offset=offset))
cur = next_date
else:
while cur >= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date >= cur:
raise ValueError('Offset {offset} did not decrement date'
.format(offset=offset))
cur = next_date
prefix_mapping = {offset._prefix: offset for offset in [
YearBegin, # 'AS'
YearEnd, # 'A'
BYearBegin, # 'BAS'
BYearEnd, # 'BA'
BusinessDay, # 'B'
BusinessMonthBegin, # 'BMS'
BusinessMonthEnd, # 'BM'
BQuarterEnd, # 'BQ'
BQuarterBegin, # 'BQS'
BusinessHour, # 'BH'
CustomBusinessDay, # 'C'
CustomBusinessMonthEnd, # 'CBM'
CustomBusinessMonthBegin, # 'CBMS'
CustomBusinessHour, # 'CBH'
MonthEnd, # 'M'
MonthBegin, # 'MS'
Nano, # 'N'
SemiMonthEnd, # 'SM'
SemiMonthBegin, # 'SMS'
Week, # 'W'
Second, # 'S'
Minute, # 'T'
Micro, # 'U'
QuarterEnd, # 'Q'
QuarterBegin, # 'QS'
Milli, # 'L'
Hour, # 'H'
Day, # 'D'
WeekOfMonth, # 'WOM'
FY5253,
FY5253Quarter,
CalendarDay # 'CD'
]}
|
bsd-3-clause
|
ARM-software/lisa
|
external/devlib/setup.py
|
3
|
4520
|
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import imp
import os
import sys
import warnings
from itertools import chain
try:
from setuptools import setup
from setuptools.command.sdist import sdist as orig_sdist
except ImportError:
from distutils.core import setup
from distutils.command.sdist import sdist as orig_sdist
devlib_dir = os.path.join(os.path.dirname(__file__), 'devlib')
sys.path.insert(0, os.path.join(devlib_dir, 'core'))
# happends if falling back to distutils
warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'")
warnings.filterwarnings('ignore', "Unknown distribution option: 'extras_require'")
try:
os.remove('MANIFEST')
except OSError:
pass
vh_path = os.path.join(devlib_dir, 'utils', 'version.py')
# can load this, as it does not have any devlib imports
version_helper = imp.load_source('version_helper', vh_path)
__version__ = version_helper.get_devlib_version()
commit = version_helper.get_commit()
if commit:
__version__ = '{}+{}'.format(__version__, commit)
packages = []
data_files = {}
source_dir = os.path.dirname(__file__)
for root, dirs, files in os.walk(devlib_dir):
rel_dir = os.path.relpath(root, source_dir)
data = []
if '__init__.py' in files:
for f in files:
if os.path.splitext(f)[1] not in ['.py', '.pyc', '.pyo']:
data.append(f)
package_name = rel_dir.replace(os.sep, '.')
package_dir = root
packages.append(package_name)
data_files[package_name] = data
else:
# use previous package name
filepaths = [os.path.join(root, f) for f in files]
data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths])
with open("README.rst", "r") as fh:
long_description = fh.read()
params = dict(
name='devlib',
description='A library for interacting with and instrumentation of remote devices.',
long_description=long_description,
version=__version__,
packages=packages,
package_data=data_files,
url='https://github.com/ARM-software/devlib',
license='Apache v2',
maintainer='ARM Ltd.',
install_requires=[
'python-dateutil', # converting between UTC and local time.
'pexpect>=3.3', # Send/recieve to/from device
'pyserial', # Serial port interface
'paramiko', # SSH connection
'scp', # SSH connection file transfers
'wrapt', # Basic for construction of decorator functions
'future', # Python 2-3 compatibility
'enum34;python_version<"3.4"', # Enums for Python < 3.4
'contextlib2;python_version<"3.0"', # Python 3 contextlib backport for Python 2
'numpy<=1.16.4; python_version<"3"',
'numpy; python_version>="3"',
'pandas<=0.24.2; python_version<"3"',
'pandas; python_version>"3"',
'lxml', # More robust xml parsing
],
extras_require={
'daq': ['daqpower>=2'],
'doc': ['sphinx'],
'monsoon': ['python-gflags'],
'acme': ['pandas', 'numpy'],
},
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
],
)
all_extras = list(chain(iter(params['extras_require'].values())))
params['extras_require']['full'] = all_extras
class sdist(orig_sdist):
user_options = orig_sdist.user_options + [
('strip-commit', 's',
"Strip git commit hash from package version ")
]
def initialize_options(self):
orig_sdist.initialize_options(self)
self.strip_commit = False
def run(self):
if self.strip_commit:
self.distribution.get_version = lambda : __version__.split('+')[0]
orig_sdist.run(self)
params['cmdclass'] = {'sdist': sdist}
setup(**params)
|
apache-2.0
|
ltiao/scikit-learn
|
examples/linear_model/plot_ols_3d.py
|
350
|
2040
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
|
bsd-3-clause
|
NicovincX2/Python-3.5
|
Divers/bachelors_degrees_by_gender_USA.py
|
1
|
4545
|
# -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
from matplotlib.mlab import csv2rec
from matplotlib.cbook import get_sample_data
fname = get_sample_data('percent_bachelors_degrees_women_usa.csv')
gender_degree_data = csv2rec(fname)
# These are the colors that will be used in the plot
color_sequence = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c',
'#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5',
'#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f',
'#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5']
# You typically want your plot to be ~1.33x wider than tall. This plot
# is a rare exception because of the number of lines being plotted on it.
# Common sizes: (10, 7.5) and (12, 9)
fig, ax = plt.subplots(1, 1, figsize=(12, 14))
# Remove the plot frame lines. They are unnecessary here.
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
# Ensure that the axis ticks only show up on the bottom and left of the plot.
# Ticks on the right and top of the plot are generally unnecessary.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Limit the range of the plot to only where the data is.
# Avoid unnecessary whitespace.
plt.xlim(1968.5, 2011.1)
plt.ylim(-0.25, 90)
# Make sure your axis ticks are large enough to be easily read.
# You don't want your viewers squinting to read your plot.
plt.xticks(range(1970, 2011, 10), fontsize=14)
plt.yticks(range(0, 91, 10), ['{0}%'.format(x)
for x in range(0, 91, 10)], fontsize=14)
# Provide tick lines across the plot to help your viewers trace along
# the axis ticks. Make sure that the lines are light and small so they
# don't obscure the primary data lines.
for y in range(10, 91, 10):
plt.plot(range(1969, 2012), [y] * len(range(1969, 2012)), '--',
lw=0.5, color='black', alpha=0.3)
# Remove the tick marks; they are unnecessary with the tick lines we just
# plotted.
plt.tick_params(axis='both', which='both', bottom='off', top='off',
labelbottom='on', left='off', right='off', labelleft='on')
# Now that the plot is prepared, it's time to actually plot the data!
# Note that I plotted the majors in order of the highest % in the final year.
majors = ['Health Professions', 'Public Administration', 'Education',
'Psychology', 'Foreign Languages', 'English',
'Communications\nand Journalism', 'Art and Performance', 'Biology',
'Agriculture', 'Social Sciences and History', 'Business',
'Math and Statistics', 'Architecture', 'Physical Sciences',
'Computer Science', 'Engineering']
y_offsets = {'Foreign Languages': 0.5, 'English': -0.5,
'Communications\nand Journalism': 0.75,
'Art and Performance': -0.25, 'Agriculture': 1.25,
'Social Sciences and History': 0.25, 'Business': -0.75,
'Math and Statistics': 0.75, 'Architecture': -0.75,
'Computer Science': 0.75, 'Engineering': -0.25}
for rank, column in enumerate(majors):
# Plot each line separately with its own color.
column_rec_name = column.replace('\n', '_').replace(' ', '_').lower()
line = plt.plot(gender_degree_data.year,
gender_degree_data[column_rec_name],
lw=2.5,
color=color_sequence[rank])
# Add a text label to the right end of every line. Most of the code below
# is adding specific offsets y position because some labels overlapped.
y_pos = gender_degree_data[column_rec_name][-1] - 0.5
if column in y_offsets:
y_pos += y_offsets[column]
# Again, make sure that all labels are large enough to be easily read
# by the viewer.
plt.text(2011.5, y_pos, column, fontsize=14, color=color_sequence[rank])
# Make the title big enough so it spans the entire plot, but don't make it
# so big that it requires two lines to show.
# Note that if the title is descriptive enough, it is unnecessary to include
# axis labels; they are self-evident, in this plot's case.
plt.title('Percentage of Bachelor\'s degrees conferred to women in '
'the U.S.A. by major (1970-2011)\n', fontsize=18, ha='center')
# Finally, save the figure as a PNG.
# You can also save it as a PDF, JPEG, etc.
# Just change the file extension in this call.
plt.savefig('percent-bachelors-degrees-women-usa.png', bbox_inches='tight')
os.system("pause")
|
gpl-3.0
|
deeuu/loudness
|
python/tests/modelValidations/validate_StationaryLoudnessANSIS342007.py
|
1
|
6403
|
import numpy as np
from usefulFunctions import *
from loudness.tools.extractors import StationaryLoudnessExtractor
from loudness.tools.predictors import StationaryLoudnessContourPredictor
from loudness.tools import spectra
from loudness import StationaryLoudnessANSIS342007, soneToPhonMGB1997
import matplotlib.pyplot as plt
if __name__ == '__main__':
model = StationaryLoudnessANSIS342007()
feature = 'Loudness'
extractor = StationaryLoudnessExtractor(model, feature)
'''
Pure tones
'''
#Table 7
levels = np.array([0, 1, 2, 3, 4, 5, 7.5])
levels = np.append(levels, np.arange(10, 125, 5))
expected = np.array([0.0011, 0.0018, 0.0028, 0.0044, 0.0065, 0.0088, 0.017,
0.029, 0.070, 0.142, 0.255, 0.422, 0.662, 0.997, 1.461, 2.098, 2.970,
4.166, 5.813, 8.102, 11.326, 15.980, 22.929, 33.216, 48.242, 70.362,
103.274, 152.776, 227.855, 341.982])
measured = np.zeros(expected.size)
for i, level in enumerate(levels):
extractor.process(np.array([1000.0]), np.array([level]))
measured[i] = extractor.outputDict[feature]
writeTo3ColumnCSVFile(levels, expected, measured,
'./data/StationaryLoudnessANSIS342007_PureTonesEX1.csv')
#Example 2
levels = np.array([20, 40, 60, 80]);
expected = np.array([0.35, 1.8, 7.1, 27.5])
measured = np.zeros(expected.size)
for i, level in enumerate(levels):
extractor.process(np.array([3000.0]), np.array([level]))
measured[i] = extractor.outputDict[feature]
writeTo3ColumnCSVFile(levels, expected, measured,
'./data/StationaryLoudnessANSIS342007_PureTonesEX2.csv')
#Example 4
levels = np.array([50])
expected = np.array([0.345])
measured = np.zeros(expected.size)
for i, level in enumerate(levels):
extractor.process(np.array([100.0]), np.array([level]))
measured[i] = extractor.outputDict[feature]
writeTo3ColumnCSVFile(levels, expected, measured,
'./data/StationaryLoudnessANSIS342007_PureTonesEX4.csv')
'''
Filtered noise
'''
#Example 1:
expected = np.array([4.25, 14.29])
measured = np.zeros(2)
freqs, spectrum = generateWhiteNoiseBandFromFc(1000, 100, 40, False)
extractor.process(freqs, 10 * np.log10(spectrum))
measured[0] = extractor.outputDict[feature]
freqs, spectrum = generateWhiteNoiseBandFromFc(1000, 1000, 40, False)
extractor.process(freqs, 10 * np.log10(spectrum))
measured[1] = extractor.outputDict[feature]
writeTo3ColumnCSVFile(np.array([100, 1000]), expected, measured,
'./data/StationaryLoudnessANSIS342007_FilteredNoiseEX1.csv')
#Example 2:
expected = np.array([4.25, 8.02])
measured = np.zeros(2)
freqs, spectrum = generateWhiteNoiseBandFromFc(1000, 100, 60, True)
extractor.process(freqs, 10 * np.log10(spectrum))
measured[0] = extractor.outputDict[feature]
freqs, spectrum = generateWhiteNoiseBandFromFc(1000, 1000, 60, True)
extractor.process(freqs, 10 * np.log10(spectrum))
measured[1] = extractor.outputDict[feature]
writeTo3ColumnCSVFile(np.array([100, 1000]), expected, measured,
'./data/StationaryLoudnessANSIS342007_FilteredNoiseEX2.csv')
#Example 3:
levels = np.array([0, 20, 40])
expected = np.array([3.62, 16.00, 49.28])
measured = np.zeros(3)
for i, level in enumerate(levels):
freqs, spectrum = generatePinkNoise(50, 15000, level, 1000)
extractor.process(freqs, 10 * np.log10(spectrum))
measured[i] = extractor.outputDict[feature]
writeTo3ColumnCSVFile(levels, expected, measured,
'./data/StationaryLoudnessANSIS342007_FilteredNoiseEX3.csv')
#Example4:
levels = np.arange(0, 60, 10)
expected = np.array([0.071, 0.67, 2.51, 6.26, 12.7, 23.3])
measured = np.zeros(levels.size)
for i, level in enumerate(levels):
bandLevels = np.ones(26) * level
freqs, spectrum = generateSpectrumFromThirdOctaveBandLevels(bandLevels)
extractor.process(freqs, 10 * np.log10(spectrum))
measured[i] = extractor.outputDict[feature]
writeTo3ColumnCSVFile(levels, expected, measured,
'./data/StationaryLoudnessANSIS342007_FilteredNoiseEX4.csv')
'''
Multiple tones
'''
#Example 1
level = 60
expected = np.array([6.35])
measured = np.zeros(expected.size)
extractor.process(np.array([1500, 1600, 1700]), np.array([level, level, level]))
measured[0] = extractor.outputDict[feature]
writeTo3ColumnCSVFile(np.array([level]), expected, measured,
'./data/StationaryLoudnessANSIS342007_MultipleTonesEX1.csv')
#Example 2
level = 60
expected = np.array([12.62])
measured = np.zeros(expected.size)
extractor.process(np.array([1000, 1600, 2400]), np.array([level, level, level]))
measured[0] = extractor.outputDict[feature]
writeTo3ColumnCSVFile(np.array([level]), expected, measured,
'./data/StationaryLoudnessANSIS342007_MultipleTonesEX2.csv')
#Example 3
level = 30
expected = np.array([1.99])
measured = np.zeros(expected.size)
extractor.process(np.arange(100, 1100, 100), np.ones(10) * level)
measured[0] = extractor.outputDict[feature]
writeTo3ColumnCSVFile(np.array([level]), expected, measured,
'./data/StationaryLoudnessANSIS342007_MultipleTonesEX3.csv')
'''
Tones plus noise
'''
#Example 1
expected = np.array([5.14])
measured = np.zeros(expected.size)
freqs, spectrum = generateWhiteNoiseBandFromFc(1000, 100, 40, False)
freqs = np.sort(np.append(freqs, 1000))
idx = np.where(freqs == 1000)[0]
spectrum = np.insert(spectrum, idx, 10 ** (60 / 10.0))
extractor.process(freqs, 10 * np.log10(spectrum))
measured[0] = extractor.outputDict[feature]
writeTo3ColumnCSVFile(np.array([40]), expected, measured,
'./data/StationaryLoudnessANSIS342007_TonePlusNoiseEX1.csv')
'''
ISO Absolute thresholds
'''
def func(x):
return soneToPhonMGB1997(float(x), True)
predictor = StationaryLoudnessContourPredictor(model, feature, func, 'abs')
predictor.tol = 0.01
predictor.setTargetLoudnessLevel = 2.2
predictor.process()
writeTo3ColumnCSVFile(predictor.freqs, predictor.sPLs, predictor.predictions,
'./data/StationaryLoudnessANSIS342007_ISO389-7AbsThresholds.csv')
|
gpl-3.0
|
yidawang/brainiak
|
examples/hyperparamopt/hpo_example.py
|
7
|
4770
|
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example for using hyperparameter optimization (hpo) package.
In this example, we will try to optimize a function of
2 variables (branin) using both hpo and grid search.
"""
import brainiak.hyperparamopt.hpo as hpo
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
# Branin is the function we want to minimize.
# It is a function of 2 variables.
# In the range x1 in [-5, 10] and x2 in [0, 15],
# this function has 2 local minima and 1 global minima.
# Global minima of -16.6 at (-3.7, 13.7).
# This is the modified version (Branin-Hoo) of the standard branin function.
# If you want the standard version (which has 3 global minima),
# you can omit the "+5*x1" term at the end
# For more details, see http://www.sfu.ca/~ssurjano/branin.html
def branin(x1, x2):
a = 1.0
b = 5.1/(4*np.pi*np.pi)
c = 5.0/np.pi
r = 6.0
s = 10.0
t = 1.0/(8*np.pi)
return a*((x2 - b*x1*x1 + c*x1 - r)**2) + s*(1-t)*np.cos(x1) + s + 5*x1
# This is a wrapper around branin that takes in a dictionary
def branin_wrapper(args):
x1 = args['x1']
x2 = args['x2']
return branin(x1,x2)
# Define ranges for the two variables
x1lo = -5
x1hi = 10
x2lo = 0
x2hi = 15
##############################
# Optimization through hpo
##############################
# Define a space for hpo to use
# The space needs to define
# 1. Name of the variables
# 2. Default samplers for the variables (use scipy.stats objects)
# 3. lo and hi ranges for the variables (will use -inf, inf if not specified)
space = {'x1':{'dist': st.uniform(x1lo, x1hi-x1lo), 'lo':x1lo, 'hi':x1hi},
'x2':{'dist': st.uniform(x2lo, x2hi-x2lo), 'lo':x2lo, 'hi':x2hi}}
# The trials object is just a list that stores the samples generated and the
# corresponding function values at those sample points.
trials = []
# Maximum number of samples that will be generated.
# This is the maximum number of function evaluations that will be performed.
n_hpo_samples = 100
# Call the fmin function that does the optimization.
# The function to be optimized should take in a dictionary. You will probably
# need to wrap your function to do this (see branin() and branin_wrapper()).
# You can pass in a non-empty trials object as well e.g. from a previous
# fmin run. We just append to the trials object and will use existing data
# in our optimization.
print("Starting optimization through hpo")
best = hpo.fmin(loss_fn=branin_wrapper, space=space,
max_evals=n_hpo_samples, trials=trials)
# Print out the best value obtained through HPO
print("Best obtained through HPO (", n_hpo_samples, " samples) = ",
best['x1'], best['x2'], "; min value = ", best['loss'])
#####################################
# Optimization through grid search
#####################################
# Divide the space into a uniform grid (meshgrid)
n = 200
x1 = np.linspace(x1lo, x1hi, n)
x2 = np.linspace(x2lo, x2hi, n)
x1_grid, x2_grid = np.meshgrid(x1, x2)
# Calculate the function values along the grid
print("Starting optimization through grid search")
z = branin(x1_grid, x2_grid)
# Print out the best value obtained through grid search
print("Best obtained through grid search (", n*n, " samples) = ",
x1_grid.flatten()[z.argmin()], x2_grid.flatten()[z.argmin()],
"; min value = ", z.min())
########
# Plots
########
# Convert trials object data into numpy arrays
x1 = np.array([tr['x1'] for tr in trials])
x2 = np.array([tr['x2'] for tr in trials])
y = np.array([tr['loss'] for tr in trials])
# Plot the function contour using the grid search data
h = (z.max()-z.min())/25
plt.contour(x1_grid, x2_grid, z, levels=np.linspace(z.min()-h, z.max(), 26))
# Mark the points that were sampled through HPO
plt.scatter(x1, x2, s=10, color='r', label='HPO Samples')
# Mark the best points obtained through both methods
plt.scatter(best['x1'], best['x2'], s=30, color='b', label='Best HPO')
plt.scatter(x1_grid.flatten()[z.argmin()], x2_grid.flatten()[z.argmin()],
s=30, color='g', label='Best grid search')
# Labels
plt.xlabel('x1')
plt.ylabel('x2')
plt.title('Hyperparameter optimization using HPO (Branin function)')
plt.legend()
plt.show()
|
apache-2.0
|
jhamman/xarray
|
xarray/tests/test_plot.py
|
1
|
76162
|
import inspect
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
import xarray as xr
import xarray.plot as xplt
from xarray import DataArray, Dataset
from xarray.plot.dataset_plot import _infer_meta_data
from xarray.plot.plot import _infer_interval_breaks
from xarray.plot.utils import (
_build_discrete_cmap,
_color_palette,
_determine_cmap_params,
import_seaborn,
label_from_attrs,
)
from . import (
assert_array_equal,
assert_equal,
has_nc_time_axis,
raises_regex,
requires_cftime,
requires_matplotlib,
requires_nc_time_axis,
requires_seaborn,
)
# import mpl and change the backend before other mpl imports
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
except ImportError:
pass
@pytest.mark.flaky
@pytest.mark.skip(reason="maybe flaky")
def text_in_fig():
"""
Return the set of all text in the figure
"""
return {t.get_text() for t in plt.gcf().findobj(mpl.text.Text)}
def find_possible_colorbars():
# nb. this function also matches meshes from pcolormesh
return plt.gcf().findobj(mpl.collections.QuadMesh)
def substring_in_axes(substring, ax):
"""
Return True if a substring is found anywhere in an axes
"""
alltxt = {t.get_text() for t in ax.findobj(mpl.text.Text)}
for txt in alltxt:
if substring in txt:
return True
return False
def easy_array(shape, start=0, stop=1):
"""
Make an array with desired shape using np.linspace
shape is a tuple like (2, 3)
"""
a = np.linspace(start, stop, num=np.prod(shape))
return a.reshape(shape)
@requires_matplotlib
class PlotTestCase:
@pytest.fixture(autouse=True)
def setup(self):
yield
# Remove all matplotlib figures
plt.close("all")
def pass_in_axis(self, plotmethod):
fig, axes = plt.subplots(ncols=2)
plotmethod(ax=axes[0])
assert axes[0].has_data()
@pytest.mark.slow
def imshow_called(self, plotmethod):
plotmethod()
images = plt.gca().findobj(mpl.image.AxesImage)
return len(images) > 0
def contourf_called(self, plotmethod):
plotmethod()
paths = plt.gca().findobj(mpl.collections.PathCollection)
return len(paths) > 0
class TestPlot(PlotTestCase):
@pytest.fixture(autouse=True)
def setup_array(self):
self.darray = DataArray(easy_array((2, 3, 4)))
def test_label_from_attrs(self):
da = self.darray.copy()
assert "" == label_from_attrs(da)
da.name = "a"
da.attrs["units"] = "a_units"
da.attrs["long_name"] = "a_long_name"
da.attrs["standard_name"] = "a_standard_name"
assert "a_long_name [a_units]" == label_from_attrs(da)
da.attrs.pop("long_name")
assert "a_standard_name [a_units]" == label_from_attrs(da)
da.attrs.pop("units")
assert "a_standard_name" == label_from_attrs(da)
da.attrs["units"] = "a_units"
da.attrs.pop("standard_name")
assert "a [a_units]" == label_from_attrs(da)
da.attrs.pop("units")
assert "a" == label_from_attrs(da)
def test1d(self):
self.darray[:, 0, 0].plot()
with raises_regex(ValueError, "None"):
self.darray[:, 0, 0].plot(x="dim_1")
def test_1d_x_y_kw(self):
z = np.arange(10)
da = DataArray(np.cos(z), dims=["z"], coords=[z], name="f")
xy = [[None, None], [None, "z"], ["z", None]]
f, ax = plt.subplots(3, 1)
for aa, (x, y) in enumerate(xy):
da.plot(x=x, y=y, ax=ax.flat[aa])
with raises_regex(ValueError, "cannot"):
da.plot(x="z", y="z")
with raises_regex(ValueError, "None"):
da.plot(x="f", y="z")
with raises_regex(ValueError, "None"):
da.plot(x="z", y="f")
# Test for bug in GH issue #2725
def test_infer_line_data(self):
current = DataArray(
name="I",
data=np.array([5, 8]),
dims=["t"],
coords={
"t": (["t"], np.array([0.1, 0.2])),
"V": (["t"], np.array([100, 200])),
},
)
# Plot current against voltage
line = current.plot.line(x="V")[0]
assert_array_equal(line.get_xdata(), current.coords["V"].values)
# Plot current against time
line = current.plot.line()[0]
assert_array_equal(line.get_xdata(), current.coords["t"].values)
def test_line_plot_along_1d_coord(self):
# Test for bug in GH #3334
x_coord = xr.DataArray(data=[0.1, 0.2], dims=["x"])
t_coord = xr.DataArray(data=[10, 20], dims=["t"])
da = xr.DataArray(
data=np.array([[0, 1], [5, 9]]),
dims=["x", "t"],
coords={"x": x_coord, "time": t_coord},
)
line = da.plot(x="time", hue="x")[0]
assert_array_equal(line.get_xdata(), da.coords["time"].values)
line = da.plot(y="time", hue="x")[0]
assert_array_equal(line.get_ydata(), da.coords["time"].values)
def test_2d_line(self):
with raises_regex(ValueError, "hue"):
self.darray[:, :, 0].plot.line()
self.darray[:, :, 0].plot.line(hue="dim_1")
self.darray[:, :, 0].plot.line(x="dim_1")
self.darray[:, :, 0].plot.line(y="dim_1")
self.darray[:, :, 0].plot.line(x="dim_0", hue="dim_1")
self.darray[:, :, 0].plot.line(y="dim_0", hue="dim_1")
with raises_regex(ValueError, "cannot"):
self.darray[:, :, 0].plot.line(x="dim_1", y="dim_0", hue="dim_1")
def test_2d_line_accepts_legend_kw(self):
self.darray[:, :, 0].plot.line(x="dim_0", add_legend=False)
assert not plt.gca().get_legend()
plt.cla()
self.darray[:, :, 0].plot.line(x="dim_0", add_legend=True)
assert plt.gca().get_legend()
# check whether legend title is set
assert plt.gca().get_legend().get_title().get_text() == "dim_1"
def test_2d_line_accepts_x_kw(self):
self.darray[:, :, 0].plot.line(x="dim_0")
assert plt.gca().get_xlabel() == "dim_0"
plt.cla()
self.darray[:, :, 0].plot.line(x="dim_1")
assert plt.gca().get_xlabel() == "dim_1"
def test_2d_line_accepts_hue_kw(self):
self.darray[:, :, 0].plot.line(hue="dim_0")
assert plt.gca().get_legend().get_title().get_text() == "dim_0"
plt.cla()
self.darray[:, :, 0].plot.line(hue="dim_1")
assert plt.gca().get_legend().get_title().get_text() == "dim_1"
def test_2d_coords_line_plot(self):
lon, lat = np.meshgrid(np.linspace(-20, 20, 5), np.linspace(0, 30, 4))
lon += lat / 10
lat += lon / 10
da = xr.DataArray(
np.arange(20).reshape(4, 5),
dims=["y", "x"],
coords={"lat": (("y", "x"), lat), "lon": (("y", "x"), lon)},
)
hdl = da.plot.line(x="lon", hue="x")
assert len(hdl) == 5
plt.clf()
hdl = da.plot.line(x="lon", hue="y")
assert len(hdl) == 4
with pytest.raises(ValueError, match="For 2D inputs, hue must be a dimension"):
da.plot.line(x="lon", hue="lat")
def test_2d_before_squeeze(self):
a = DataArray(easy_array((1, 5)))
a.plot()
def test2d_uniform_calls_imshow(self):
assert self.imshow_called(self.darray[:, :, 0].plot.imshow)
@pytest.mark.slow
def test2d_nonuniform_calls_contourf(self):
a = self.darray[:, :, 0]
a.coords["dim_1"] = [2, 1, 89]
assert self.contourf_called(a.plot.contourf)
def test2d_1d_2d_coordinates_contourf(self):
sz = (20, 10)
depth = easy_array(sz)
a = DataArray(
easy_array(sz),
dims=["z", "time"],
coords={"depth": (["z", "time"], depth), "time": np.linspace(0, 1, sz[1])},
)
a.plot.contourf(x="time", y="depth")
def test3d(self):
self.darray.plot()
def test_can_pass_in_axis(self):
self.pass_in_axis(self.darray.plot)
def test__infer_interval_breaks(self):
assert_array_equal([-0.5, 0.5, 1.5], _infer_interval_breaks([0, 1]))
assert_array_equal(
[-0.5, 0.5, 5.0, 9.5, 10.5], _infer_interval_breaks([0, 1, 9, 10])
)
assert_array_equal(
pd.date_range("20000101", periods=4) - np.timedelta64(12, "h"),
_infer_interval_breaks(pd.date_range("20000101", periods=3)),
)
# make a bounded 2D array that we will center and re-infer
xref, yref = np.meshgrid(np.arange(6), np.arange(5))
cx = (xref[1:, 1:] + xref[:-1, :-1]) / 2
cy = (yref[1:, 1:] + yref[:-1, :-1]) / 2
x = _infer_interval_breaks(cx, axis=1)
x = _infer_interval_breaks(x, axis=0)
y = _infer_interval_breaks(cy, axis=1)
y = _infer_interval_breaks(y, axis=0)
np.testing.assert_allclose(xref, x)
np.testing.assert_allclose(yref, y)
# test that ValueError is raised for non-monotonic 1D inputs
with pytest.raises(ValueError):
_infer_interval_breaks(np.array([0, 2, 1]), check_monotonic=True)
def test_geo_data(self):
# Regression test for gh2250
# Realistic coordinates taken from the example dataset
lat = np.array(
[
[16.28, 18.48, 19.58, 19.54, 18.35],
[28.07, 30.52, 31.73, 31.68, 30.37],
[39.65, 42.27, 43.56, 43.51, 42.11],
[50.52, 53.22, 54.55, 54.50, 53.06],
]
)
lon = np.array(
[
[-126.13, -113.69, -100.92, -88.04, -75.29],
[-129.27, -115.62, -101.54, -87.32, -73.26],
[-133.10, -118.00, -102.31, -86.42, -70.76],
[-137.85, -120.99, -103.28, -85.28, -67.62],
]
)
data = np.sqrt(lon ** 2 + lat ** 2)
da = DataArray(
data,
dims=("y", "x"),
coords={"lon": (("y", "x"), lon), "lat": (("y", "x"), lat)},
)
da.plot(x="lon", y="lat")
ax = plt.gca()
assert ax.has_data()
da.plot(x="lat", y="lon")
ax = plt.gca()
assert ax.has_data()
def test_datetime_dimension(self):
nrow = 3
ncol = 4
time = pd.date_range("2000-01-01", periods=nrow)
a = DataArray(
easy_array((nrow, ncol)), coords=[("time", time), ("y", range(ncol))]
)
a.plot()
ax = plt.gca()
assert ax.has_data()
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
def test_convenient_facetgrid(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=["y", "x", "z"])
d.coords["z"] = list("abcd")
g = d.plot(x="x", y="y", col="z", col_wrap=2, cmap="cool")
assert_array_equal(g.axes.shape, [2, 2])
for ax in g.axes.flat:
assert ax.has_data()
with raises_regex(ValueError, "[Ff]acet"):
d.plot(x="x", y="y", col="z", ax=plt.gca())
with raises_regex(ValueError, "[Ff]acet"):
d[0].plot(x="x", y="y", col="z", ax=plt.gca())
@pytest.mark.slow
def test_subplot_kws(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=["y", "x", "z"])
d.coords["z"] = list("abcd")
g = d.plot(
x="x",
y="y",
col="z",
col_wrap=2,
cmap="cool",
subplot_kws=dict(facecolor="r"),
)
for ax in g.axes.flat:
# mpl V2
assert ax.get_facecolor()[0:3] == mpl.colors.to_rgb("r")
@pytest.mark.slow
def test_plot_size(self):
self.darray[:, 0, 0].plot(figsize=(13, 5))
assert tuple(plt.gcf().get_size_inches()) == (13, 5)
self.darray.plot(figsize=(13, 5))
assert tuple(plt.gcf().get_size_inches()) == (13, 5)
self.darray.plot(size=5)
assert plt.gcf().get_size_inches()[1] == 5
self.darray.plot(size=5, aspect=2)
assert tuple(plt.gcf().get_size_inches()) == (10, 5)
with raises_regex(ValueError, "cannot provide both"):
self.darray.plot(ax=plt.gca(), figsize=(3, 4))
with raises_regex(ValueError, "cannot provide both"):
self.darray.plot(size=5, figsize=(3, 4))
with raises_regex(ValueError, "cannot provide both"):
self.darray.plot(size=5, ax=plt.gca())
with raises_regex(ValueError, "cannot provide `aspect`"):
self.darray.plot(aspect=1)
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
def test_convenient_facetgrid_4d(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=["y", "x", "columns", "rows"])
g = d.plot(x="x", y="y", col="columns", row="rows")
assert_array_equal(g.axes.shape, [3, 2])
for ax in g.axes.flat:
assert ax.has_data()
with raises_regex(ValueError, "[Ff]acet"):
d.plot(x="x", y="y", col="columns", ax=plt.gca())
def test_coord_with_interval(self):
bins = [-1, 0, 1, 2]
self.darray.groupby_bins("dim_0", bins).mean(...).plot()
class TestPlot1D(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
d = [0, 1.1, 0, 2]
self.darray = DataArray(d, coords={"period": range(len(d))}, dims="period")
self.darray.period.attrs["units"] = "s"
def test_xlabel_is_index_name(self):
self.darray.plot()
assert "period [s]" == plt.gca().get_xlabel()
def test_no_label_name_on_x_axis(self):
self.darray.plot(y="period")
assert "" == plt.gca().get_xlabel()
def test_no_label_name_on_y_axis(self):
self.darray.plot()
assert "" == plt.gca().get_ylabel()
def test_ylabel_is_data_name(self):
self.darray.name = "temperature"
self.darray.attrs["units"] = "degrees_Celsius"
self.darray.plot()
assert "temperature [degrees_Celsius]" == plt.gca().get_ylabel()
def test_xlabel_is_data_name(self):
self.darray.name = "temperature"
self.darray.attrs["units"] = "degrees_Celsius"
self.darray.plot(y="period")
assert "temperature [degrees_Celsius]" == plt.gca().get_xlabel()
def test_format_string(self):
self.darray.plot.line("ro")
def test_can_pass_in_axis(self):
self.pass_in_axis(self.darray.plot.line)
def test_nonnumeric_index_raises_typeerror(self):
a = DataArray([1, 2, 3], {"letter": ["a", "b", "c"]}, dims="letter")
with raises_regex(TypeError, r"[Pp]lot"):
a.plot.line()
def test_primitive_returned(self):
p = self.darray.plot.line()
assert isinstance(p[0], mpl.lines.Line2D)
@pytest.mark.slow
def test_plot_nans(self):
self.darray[1] = np.nan
self.darray.plot.line()
def test_x_ticks_are_rotated_for_time(self):
time = pd.date_range("2000-01-01", "2000-01-10")
a = DataArray(np.arange(len(time)), [("t", time)])
a.plot.line()
rotation = plt.gca().get_xticklabels()[0].get_rotation()
assert rotation != 0
def test_xyincrease_false_changes_axes(self):
self.darray.plot.line(xincrease=False, yincrease=False)
xlim = plt.gca().get_xlim()
ylim = plt.gca().get_ylim()
diffs = xlim[1] - xlim[0], ylim[1] - ylim[0]
assert all(x < 0 for x in diffs)
def test_slice_in_title(self):
self.darray.coords["d"] = 10
self.darray.plot.line()
title = plt.gca().get_title()
assert "d = 10" == title
class TestPlotStep(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
self.darray = DataArray(easy_array((2, 3, 4)))
def test_step(self):
self.darray[0, 0].plot.step()
def test_coord_with_interval_step(self):
bins = [-1, 0, 1, 2]
self.darray.groupby_bins("dim_0", bins).mean(...).plot.step()
assert len(plt.gca().lines[0].get_xdata()) == ((len(bins) - 1) * 2)
class TestPlotHistogram(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
self.darray = DataArray(easy_array((2, 3, 4)))
def test_3d_array(self):
self.darray.plot.hist()
def test_xlabel_uses_name(self):
self.darray.name = "testpoints"
self.darray.attrs["units"] = "testunits"
self.darray.plot.hist()
assert "testpoints [testunits]" == plt.gca().get_xlabel()
def test_title_is_histogram(self):
self.darray.plot.hist()
assert "Histogram" == plt.gca().get_title()
def test_can_pass_in_kwargs(self):
nbins = 5
self.darray.plot.hist(bins=nbins)
assert nbins == len(plt.gca().patches)
def test_can_pass_in_axis(self):
self.pass_in_axis(self.darray.plot.hist)
def test_primitive_returned(self):
h = self.darray.plot.hist()
assert isinstance(h[-1][0], mpl.patches.Rectangle)
@pytest.mark.slow
def test_plot_nans(self):
self.darray[0, 0, 0] = np.nan
self.darray.plot.hist()
def test_hist_coord_with_interval(self):
(
self.darray.groupby_bins("dim_0", [-1, 0, 1, 2])
.mean(...)
.plot.hist(range=(-1, 2))
)
@requires_matplotlib
class TestDetermineCmapParams:
@pytest.fixture(autouse=True)
def setUp(self):
self.data = np.linspace(0, 1, num=100)
def test_robust(self):
cmap_params = _determine_cmap_params(self.data, robust=True)
assert cmap_params["vmin"] == np.percentile(self.data, 2)
assert cmap_params["vmax"] == np.percentile(self.data, 98)
assert cmap_params["cmap"] == "viridis"
assert cmap_params["extend"] == "both"
assert cmap_params["levels"] is None
assert cmap_params["norm"] is None
def test_center(self):
cmap_params = _determine_cmap_params(self.data, center=0.5)
assert cmap_params["vmax"] - 0.5 == 0.5 - cmap_params["vmin"]
assert cmap_params["cmap"] == "RdBu_r"
assert cmap_params["extend"] == "neither"
assert cmap_params["levels"] is None
assert cmap_params["norm"] is None
def test_cmap_sequential_option(self):
with xr.set_options(cmap_sequential="magma"):
cmap_params = _determine_cmap_params(self.data)
assert cmap_params["cmap"] == "magma"
def test_cmap_sequential_explicit_option(self):
with xr.set_options(cmap_sequential=mpl.cm.magma):
cmap_params = _determine_cmap_params(self.data)
assert cmap_params["cmap"] == mpl.cm.magma
def test_cmap_divergent_option(self):
with xr.set_options(cmap_divergent="magma"):
cmap_params = _determine_cmap_params(self.data, center=0.5)
assert cmap_params["cmap"] == "magma"
def test_nan_inf_are_ignored(self):
cmap_params1 = _determine_cmap_params(self.data)
data = self.data
data[50:55] = np.nan
data[56:60] = np.inf
cmap_params2 = _determine_cmap_params(data)
assert cmap_params1["vmin"] == cmap_params2["vmin"]
assert cmap_params1["vmax"] == cmap_params2["vmax"]
@pytest.mark.slow
def test_integer_levels(self):
data = self.data + 1
# default is to cover full data range but with no guarantee on Nlevels
for level in np.arange(2, 10, dtype=int):
cmap_params = _determine_cmap_params(data, levels=level)
assert cmap_params["vmin"] == cmap_params["levels"][0]
assert cmap_params["vmax"] == cmap_params["levels"][-1]
assert cmap_params["extend"] == "neither"
# with min max we are more strict
cmap_params = _determine_cmap_params(
data, levels=5, vmin=0, vmax=5, cmap="Blues"
)
assert cmap_params["vmin"] == 0
assert cmap_params["vmax"] == 5
assert cmap_params["vmin"] == cmap_params["levels"][0]
assert cmap_params["vmax"] == cmap_params["levels"][-1]
assert cmap_params["cmap"].name == "Blues"
assert cmap_params["extend"] == "neither"
assert cmap_params["cmap"].N == 4
assert cmap_params["norm"].N == 5
cmap_params = _determine_cmap_params(data, levels=5, vmin=0.5, vmax=1.5)
assert cmap_params["cmap"].name == "viridis"
assert cmap_params["extend"] == "max"
cmap_params = _determine_cmap_params(data, levels=5, vmin=1.5)
assert cmap_params["cmap"].name == "viridis"
assert cmap_params["extend"] == "min"
cmap_params = _determine_cmap_params(data, levels=5, vmin=1.3, vmax=1.5)
assert cmap_params["cmap"].name == "viridis"
assert cmap_params["extend"] == "both"
def test_list_levels(self):
data = self.data + 1
orig_levels = [0, 1, 2, 3, 4, 5]
# vmin and vmax should be ignored if levels are explicitly provided
cmap_params = _determine_cmap_params(data, levels=orig_levels, vmin=0, vmax=3)
assert cmap_params["vmin"] == 0
assert cmap_params["vmax"] == 5
assert cmap_params["cmap"].N == 5
assert cmap_params["norm"].N == 6
for wrap_levels in [list, np.array, pd.Index, DataArray]:
cmap_params = _determine_cmap_params(data, levels=wrap_levels(orig_levels))
assert_array_equal(cmap_params["levels"], orig_levels)
def test_divergentcontrol(self):
neg = self.data - 0.1
pos = self.data
# Default with positive data will be a normal cmap
cmap_params = _determine_cmap_params(pos)
assert cmap_params["vmin"] == 0
assert cmap_params["vmax"] == 1
assert cmap_params["cmap"] == "viridis"
# Default with negative data will be a divergent cmap
cmap_params = _determine_cmap_params(neg)
assert cmap_params["vmin"] == -0.9
assert cmap_params["vmax"] == 0.9
assert cmap_params["cmap"] == "RdBu_r"
# Setting vmin or vmax should prevent this only if center is false
cmap_params = _determine_cmap_params(neg, vmin=-0.1, center=False)
assert cmap_params["vmin"] == -0.1
assert cmap_params["vmax"] == 0.9
assert cmap_params["cmap"] == "viridis"
cmap_params = _determine_cmap_params(neg, vmax=0.5, center=False)
assert cmap_params["vmin"] == -0.1
assert cmap_params["vmax"] == 0.5
assert cmap_params["cmap"] == "viridis"
# Setting center=False too
cmap_params = _determine_cmap_params(neg, center=False)
assert cmap_params["vmin"] == -0.1
assert cmap_params["vmax"] == 0.9
assert cmap_params["cmap"] == "viridis"
# However, I should still be able to set center and have a div cmap
cmap_params = _determine_cmap_params(neg, center=0)
assert cmap_params["vmin"] == -0.9
assert cmap_params["vmax"] == 0.9
assert cmap_params["cmap"] == "RdBu_r"
# Setting vmin or vmax alone will force symmetric bounds around center
cmap_params = _determine_cmap_params(neg, vmin=-0.1)
assert cmap_params["vmin"] == -0.1
assert cmap_params["vmax"] == 0.1
assert cmap_params["cmap"] == "RdBu_r"
cmap_params = _determine_cmap_params(neg, vmax=0.5)
assert cmap_params["vmin"] == -0.5
assert cmap_params["vmax"] == 0.5
assert cmap_params["cmap"] == "RdBu_r"
cmap_params = _determine_cmap_params(neg, vmax=0.6, center=0.1)
assert cmap_params["vmin"] == -0.4
assert cmap_params["vmax"] == 0.6
assert cmap_params["cmap"] == "RdBu_r"
# But this is only true if vmin or vmax are negative
cmap_params = _determine_cmap_params(pos, vmin=-0.1)
assert cmap_params["vmin"] == -0.1
assert cmap_params["vmax"] == 0.1
assert cmap_params["cmap"] == "RdBu_r"
cmap_params = _determine_cmap_params(pos, vmin=0.1)
assert cmap_params["vmin"] == 0.1
assert cmap_params["vmax"] == 1
assert cmap_params["cmap"] == "viridis"
cmap_params = _determine_cmap_params(pos, vmax=0.5)
assert cmap_params["vmin"] == 0
assert cmap_params["vmax"] == 0.5
assert cmap_params["cmap"] == "viridis"
# If both vmin and vmax are provided, output is non-divergent
cmap_params = _determine_cmap_params(neg, vmin=-0.2, vmax=0.6)
assert cmap_params["vmin"] == -0.2
assert cmap_params["vmax"] == 0.6
assert cmap_params["cmap"] == "viridis"
def test_norm_sets_vmin_vmax(self):
vmin = self.data.min()
vmax = self.data.max()
for norm, extend in zip(
[
mpl.colors.LogNorm(),
mpl.colors.LogNorm(vmin + 1, vmax - 1),
mpl.colors.LogNorm(None, vmax - 1),
mpl.colors.LogNorm(vmin + 1, None),
],
["neither", "both", "max", "min"],
):
test_min = vmin if norm.vmin is None else norm.vmin
test_max = vmax if norm.vmax is None else norm.vmax
cmap_params = _determine_cmap_params(self.data, norm=norm)
assert cmap_params["vmin"] == test_min
assert cmap_params["vmax"] == test_max
assert cmap_params["extend"] == extend
assert cmap_params["norm"] == norm
@requires_matplotlib
class TestDiscreteColorMap:
@pytest.fixture(autouse=True)
def setUp(self):
x = np.arange(start=0, stop=10, step=2)
y = np.arange(start=9, stop=-7, step=-3)
xy = np.dstack(np.meshgrid(x, y))
distance = np.linalg.norm(xy, axis=2)
self.darray = DataArray(distance, list(zip(("y", "x"), (y, x))))
self.data_min = distance.min()
self.data_max = distance.max()
@pytest.mark.slow
def test_recover_from_seaborn_jet_exception(self):
pal = _color_palette("jet", 4)
assert type(pal) == np.ndarray
assert len(pal) == 4
@pytest.mark.slow
def test_build_discrete_cmap(self):
for (cmap, levels, extend, filled) in [
("jet", [0, 1], "both", False),
("hot", [-4, 4], "max", True),
]:
ncmap, cnorm = _build_discrete_cmap(cmap, levels, extend, filled)
assert ncmap.N == len(levels) - 1
assert len(ncmap.colors) == len(levels) - 1
assert cnorm.N == len(levels)
assert_array_equal(cnorm.boundaries, levels)
assert max(levels) == cnorm.vmax
assert min(levels) == cnorm.vmin
if filled:
assert ncmap.colorbar_extend == extend
else:
assert ncmap.colorbar_extend == "max"
@pytest.mark.slow
def test_discrete_colormap_list_of_levels(self):
for extend, levels in [
("max", [-1, 2, 4, 8, 10]),
("both", [2, 5, 10, 11]),
("neither", [0, 5, 10, 15]),
("min", [2, 5, 10, 15]),
]:
for kind in ["imshow", "pcolormesh", "contourf", "contour"]:
primitive = getattr(self.darray.plot, kind)(levels=levels)
assert_array_equal(levels, primitive.norm.boundaries)
assert max(levels) == primitive.norm.vmax
assert min(levels) == primitive.norm.vmin
if kind != "contour":
assert extend == primitive.cmap.colorbar_extend
else:
assert "max" == primitive.cmap.colorbar_extend
assert len(levels) - 1 == len(primitive.cmap.colors)
@pytest.mark.slow
def test_discrete_colormap_int_levels(self):
for extend, levels, vmin, vmax, cmap in [
("neither", 7, None, None, None),
("neither", 7, None, 20, mpl.cm.RdBu),
("both", 7, 4, 8, None),
("min", 10, 4, 15, None),
]:
for kind in ["imshow", "pcolormesh", "contourf", "contour"]:
primitive = getattr(self.darray.plot, kind)(
levels=levels, vmin=vmin, vmax=vmax, cmap=cmap
)
assert levels >= len(primitive.norm.boundaries) - 1
if vmax is None:
assert primitive.norm.vmax >= self.data_max
else:
assert primitive.norm.vmax >= vmax
if vmin is None:
assert primitive.norm.vmin <= self.data_min
else:
assert primitive.norm.vmin <= vmin
if kind != "contour":
assert extend == primitive.cmap.colorbar_extend
else:
assert "max" == primitive.cmap.colorbar_extend
assert levels >= len(primitive.cmap.colors)
def test_discrete_colormap_list_levels_and_vmin_or_vmax(self):
levels = [0, 5, 10, 15]
primitive = self.darray.plot(levels=levels, vmin=-3, vmax=20)
assert primitive.norm.vmax == max(levels)
assert primitive.norm.vmin == min(levels)
def test_discrete_colormap_provided_boundary_norm(self):
norm = mpl.colors.BoundaryNorm([0, 5, 10, 15], 4)
primitive = self.darray.plot.contourf(norm=norm)
np.testing.assert_allclose(primitive.levels, norm.boundaries)
class Common2dMixin:
"""
Common tests for 2d plotting go here.
These tests assume that a staticmethod for `self.plotfunc` exists.
Should have the same name as the method.
"""
@pytest.fixture(autouse=True)
def setUp(self):
da = DataArray(
easy_array((10, 15), start=-1),
dims=["y", "x"],
coords={"y": np.arange(10), "x": np.arange(15)},
)
# add 2d coords
ds = da.to_dataset(name="testvar")
x, y = np.meshgrid(da.x.values, da.y.values)
ds["x2d"] = DataArray(x, dims=["y", "x"])
ds["y2d"] = DataArray(y, dims=["y", "x"])
ds = ds.set_coords(["x2d", "y2d"])
# set darray and plot method
self.darray = ds.testvar
# Add CF-compliant metadata
self.darray.attrs["long_name"] = "a_long_name"
self.darray.attrs["units"] = "a_units"
self.darray.x.attrs["long_name"] = "x_long_name"
self.darray.x.attrs["units"] = "x_units"
self.darray.y.attrs["long_name"] = "y_long_name"
self.darray.y.attrs["units"] = "y_units"
self.plotmethod = getattr(self.darray.plot, self.plotfunc.__name__)
def test_label_names(self):
self.plotmethod()
assert "x_long_name [x_units]" == plt.gca().get_xlabel()
assert "y_long_name [y_units]" == plt.gca().get_ylabel()
def test_1d_raises_valueerror(self):
with raises_regex(ValueError, r"DataArray must be 2d"):
self.plotfunc(self.darray[0, :])
def test_3d_raises_valueerror(self):
a = DataArray(easy_array((2, 3, 4)))
if self.plotfunc.__name__ == "imshow":
pytest.skip()
with raises_regex(ValueError, r"DataArray must be 2d"):
self.plotfunc(a)
def test_nonnumeric_index_raises_typeerror(self):
a = DataArray(easy_array((3, 2)), coords=[["a", "b", "c"], ["d", "e"]])
with raises_regex(TypeError, r"[Pp]lot"):
self.plotfunc(a)
def test_can_pass_in_axis(self):
self.pass_in_axis(self.plotmethod)
def test_xyincrease_defaults(self):
# With default settings the axis must be ordered regardless
# of the coords order.
self.plotfunc(DataArray(easy_array((3, 2)), coords=[[1, 2, 3], [1, 2]]))
bounds = plt.gca().get_ylim()
assert bounds[0] < bounds[1]
bounds = plt.gca().get_xlim()
assert bounds[0] < bounds[1]
# Inverted coords
self.plotfunc(DataArray(easy_array((3, 2)), coords=[[3, 2, 1], [2, 1]]))
bounds = plt.gca().get_ylim()
assert bounds[0] < bounds[1]
bounds = plt.gca().get_xlim()
assert bounds[0] < bounds[1]
def test_xyincrease_false_changes_axes(self):
self.plotmethod(xincrease=False, yincrease=False)
xlim = plt.gca().get_xlim()
ylim = plt.gca().get_ylim()
diffs = xlim[0] - 14, xlim[1] - 0, ylim[0] - 9, ylim[1] - 0
assert all(abs(x) < 1 for x in diffs)
def test_xyincrease_true_changes_axes(self):
self.plotmethod(xincrease=True, yincrease=True)
xlim = plt.gca().get_xlim()
ylim = plt.gca().get_ylim()
diffs = xlim[0] - 0, xlim[1] - 14, ylim[0] - 0, ylim[1] - 9
assert all(abs(x) < 1 for x in diffs)
def test_x_ticks_are_rotated_for_time(self):
time = pd.date_range("2000-01-01", "2000-01-10")
a = DataArray(np.random.randn(2, len(time)), [("xx", [1, 2]), ("t", time)])
a.plot(x="t")
rotation = plt.gca().get_xticklabels()[0].get_rotation()
assert rotation != 0
def test_plot_nans(self):
x1 = self.darray[:5]
x2 = self.darray.copy()
x2[5:] = np.nan
clim1 = self.plotfunc(x1).get_clim()
clim2 = self.plotfunc(x2).get_clim()
assert clim1 == clim2
@pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.filterwarnings("ignore:invalid value encountered")
def test_can_plot_all_nans(self):
# regression test for issue #1780
self.plotfunc(DataArray(np.full((2, 2), np.nan)))
@pytest.mark.filterwarnings("ignore: Attempting to set")
def test_can_plot_axis_size_one(self):
if self.plotfunc.__name__ not in ("contour", "contourf"):
self.plotfunc(DataArray(np.ones((1, 1))))
def test_disallows_rgb_arg(self):
with pytest.raises(ValueError):
# Always invalid for most plots. Invalid for imshow with 2D data.
self.plotfunc(DataArray(np.ones((2, 2))), rgb="not None")
def test_viridis_cmap(self):
cmap_name = self.plotmethod(cmap="viridis").get_cmap().name
assert "viridis" == cmap_name
def test_default_cmap(self):
cmap_name = self.plotmethod().get_cmap().name
assert "RdBu_r" == cmap_name
cmap_name = self.plotfunc(abs(self.darray)).get_cmap().name
assert "viridis" == cmap_name
@requires_seaborn
def test_seaborn_palette_as_cmap(self):
cmap_name = self.plotmethod(levels=2, cmap="husl").get_cmap().name
assert "husl" == cmap_name
def test_can_change_default_cmap(self):
cmap_name = self.plotmethod(cmap="Blues").get_cmap().name
assert "Blues" == cmap_name
def test_diverging_color_limits(self):
artist = self.plotmethod()
vmin, vmax = artist.get_clim()
assert round(abs(-vmin - vmax), 7) == 0
def test_xy_strings(self):
self.plotmethod("y", "x")
ax = plt.gca()
assert "y_long_name [y_units]" == ax.get_xlabel()
assert "x_long_name [x_units]" == ax.get_ylabel()
def test_positional_coord_string(self):
self.plotmethod(y="x")
ax = plt.gca()
assert "x_long_name [x_units]" == ax.get_ylabel()
assert "y_long_name [y_units]" == ax.get_xlabel()
self.plotmethod(x="x")
ax = plt.gca()
assert "x_long_name [x_units]" == ax.get_xlabel()
assert "y_long_name [y_units]" == ax.get_ylabel()
def test_bad_x_string_exception(self):
with raises_regex(ValueError, "x and y must be coordinate variables"):
self.plotmethod("not_a_real_dim", "y")
with raises_regex(
ValueError, "x must be a dimension name if y is not supplied"
):
self.plotmethod(x="not_a_real_dim")
with raises_regex(
ValueError, "y must be a dimension name if x is not supplied"
):
self.plotmethod(y="not_a_real_dim")
self.darray.coords["z"] = 100
def test_coord_strings(self):
# 1d coords (same as dims)
assert {"x", "y"} == set(self.darray.dims)
self.plotmethod(y="y", x="x")
def test_non_linked_coords(self):
# plot with coordinate names that are not dimensions
self.darray.coords["newy"] = self.darray.y + 150
# Normal case, without transpose
self.plotfunc(self.darray, x="x", y="newy")
ax = plt.gca()
assert "x_long_name [x_units]" == ax.get_xlabel()
assert "newy" == ax.get_ylabel()
# ax limits might change between plotfuncs
# simply ensure that these high coords were passed over
assert np.min(ax.get_ylim()) > 100.0
def test_non_linked_coords_transpose(self):
# plot with coordinate names that are not dimensions,
# and with transposed y and x axes
# This used to raise an error with pcolormesh and contour
# https://github.com/pydata/xarray/issues/788
self.darray.coords["newy"] = self.darray.y + 150
self.plotfunc(self.darray, x="newy", y="x")
ax = plt.gca()
assert "newy" == ax.get_xlabel()
assert "x_long_name [x_units]" == ax.get_ylabel()
# ax limits might change between plotfuncs
# simply ensure that these high coords were passed over
assert np.min(ax.get_xlim()) > 100.0
def test_default_title(self):
a = DataArray(easy_array((4, 3, 2)), dims=["a", "b", "c"])
a.coords["c"] = [0, 1]
a.coords["d"] = "foo"
self.plotfunc(a.isel(c=1))
title = plt.gca().get_title()
assert "c = 1, d = foo" == title or "d = foo, c = 1" == title
def test_colorbar_default_label(self):
self.plotmethod(add_colorbar=True)
assert "a_long_name [a_units]" in text_in_fig()
def test_no_labels(self):
self.darray.name = "testvar"
self.darray.attrs["units"] = "test_units"
self.plotmethod(add_labels=False)
alltxt = text_in_fig()
for string in [
"x_long_name [x_units]",
"y_long_name [y_units]",
"testvar [test_units]",
]:
assert string not in alltxt
def test_colorbar_kwargs(self):
# replace label
self.darray.attrs.pop("long_name")
self.darray.attrs["units"] = "test_units"
# check default colorbar label
self.plotmethod(add_colorbar=True)
alltxt = text_in_fig()
assert "testvar [test_units]" in alltxt
self.darray.attrs.pop("units")
self.darray.name = "testvar"
self.plotmethod(add_colorbar=True, cbar_kwargs={"label": "MyLabel"})
alltxt = text_in_fig()
assert "MyLabel" in alltxt
assert "testvar" not in alltxt
# you can use anything accepted by the dict constructor as well
self.plotmethod(add_colorbar=True, cbar_kwargs=(("label", "MyLabel"),))
alltxt = text_in_fig()
assert "MyLabel" in alltxt
assert "testvar" not in alltxt
# change cbar ax
fig, (ax, cax) = plt.subplots(1, 2)
self.plotmethod(
ax=ax, cbar_ax=cax, add_colorbar=True, cbar_kwargs={"label": "MyBar"}
)
assert ax.has_data()
assert cax.has_data()
alltxt = text_in_fig()
assert "MyBar" in alltxt
assert "testvar" not in alltxt
# note that there are two ways to achieve this
fig, (ax, cax) = plt.subplots(1, 2)
self.plotmethod(
ax=ax, add_colorbar=True, cbar_kwargs={"label": "MyBar", "cax": cax}
)
assert ax.has_data()
assert cax.has_data()
alltxt = text_in_fig()
assert "MyBar" in alltxt
assert "testvar" not in alltxt
# see that no colorbar is respected
self.plotmethod(add_colorbar=False)
assert "testvar" not in text_in_fig()
# check that error is raised
pytest.raises(
ValueError,
self.plotmethod,
add_colorbar=False,
cbar_kwargs={"label": "label"},
)
def test_verbose_facetgrid(self):
a = easy_array((10, 15, 3))
d = DataArray(a, dims=["y", "x", "z"])
g = xplt.FacetGrid(d, col="z")
g.map_dataarray(self.plotfunc, "x", "y")
for ax in g.axes.flat:
assert ax.has_data()
def test_2d_function_and_method_signature_same(self):
func_sig = inspect.getcallargs(self.plotfunc, self.darray)
method_sig = inspect.getcallargs(self.plotmethod)
del method_sig["_PlotMethods_obj"]
del func_sig["darray"]
assert func_sig == method_sig
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
def test_convenient_facetgrid(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=["y", "x", "z"])
g = self.plotfunc(d, x="x", y="y", col="z", col_wrap=2)
assert_array_equal(g.axes.shape, [2, 2])
for (y, x), ax in np.ndenumerate(g.axes):
assert ax.has_data()
if x == 0:
assert "y" == ax.get_ylabel()
else:
assert "" == ax.get_ylabel()
if y == 1:
assert "x" == ax.get_xlabel()
else:
assert "" == ax.get_xlabel()
# Infering labels
g = self.plotfunc(d, col="z", col_wrap=2)
assert_array_equal(g.axes.shape, [2, 2])
for (y, x), ax in np.ndenumerate(g.axes):
assert ax.has_data()
if x == 0:
assert "y" == ax.get_ylabel()
else:
assert "" == ax.get_ylabel()
if y == 1:
assert "x" == ax.get_xlabel()
else:
assert "" == ax.get_xlabel()
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
def test_convenient_facetgrid_4d(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=["y", "x", "columns", "rows"])
g = self.plotfunc(d, x="x", y="y", col="columns", row="rows")
assert_array_equal(g.axes.shape, [3, 2])
for ax in g.axes.flat:
assert ax.has_data()
@pytest.mark.filterwarnings("ignore:This figure includes")
def test_facetgrid_map_only_appends_mappables(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=["y", "x", "columns", "rows"])
g = self.plotfunc(d, x="x", y="y", col="columns", row="rows")
expected = g._mappables
g.map(lambda: plt.plot(1, 1))
actual = g._mappables
assert expected == actual
def test_facetgrid_cmap(self):
# Regression test for GH592
data = np.random.random(size=(20, 25, 12)) + np.linspace(-3, 3, 12)
d = DataArray(data, dims=["x", "y", "time"])
fg = d.plot.pcolormesh(col="time")
# check that all color limits are the same
assert len({m.get_clim() for m in fg._mappables}) == 1
# check that all colormaps are the same
assert len({m.get_cmap().name for m in fg._mappables}) == 1
def test_facetgrid_cbar_kwargs(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=["y", "x", "columns", "rows"])
g = self.plotfunc(
d,
x="x",
y="y",
col="columns",
row="rows",
cbar_kwargs={"label": "test_label"},
)
# catch contour case
if hasattr(g, "cbar"):
assert g.cbar._label == "test_label"
def test_facetgrid_no_cbar_ax(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=["y", "x", "columns", "rows"])
with pytest.raises(ValueError):
self.plotfunc(d, x="x", y="y", col="columns", row="rows", cbar_ax=1)
def test_cmap_and_color_both(self):
with pytest.raises(ValueError):
self.plotmethod(colors="k", cmap="RdBu")
def test_2d_coord_with_interval(self):
for dim in self.darray.dims:
gp = self.darray.groupby_bins(dim, range(15), restore_coord_dims=True).mean(
dim
)
for kind in ["imshow", "pcolormesh", "contourf", "contour"]:
getattr(gp.plot, kind)()
def test_colormap_error_norm_and_vmin_vmax(self):
norm = mpl.colors.LogNorm(0.1, 1e1)
with pytest.raises(ValueError):
self.darray.plot(norm=norm, vmin=2)
with pytest.raises(ValueError):
self.darray.plot(norm=norm, vmax=2)
@pytest.mark.slow
class TestContourf(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.contourf)
@pytest.mark.slow
def test_contourf_called(self):
# Having both statements ensures the test works properly
assert not self.contourf_called(self.darray.plot.imshow)
assert self.contourf_called(self.darray.plot.contourf)
def test_primitive_artist_returned(self):
artist = self.plotmethod()
assert isinstance(artist, mpl.contour.QuadContourSet)
@pytest.mark.slow
def test_extend(self):
artist = self.plotmethod()
assert artist.extend == "neither"
self.darray[0, 0] = -100
self.darray[-1, -1] = 100
artist = self.plotmethod(robust=True)
assert artist.extend == "both"
self.darray[0, 0] = 0
self.darray[-1, -1] = 0
artist = self.plotmethod(vmin=-0, vmax=10)
assert artist.extend == "min"
artist = self.plotmethod(vmin=-10, vmax=0)
assert artist.extend == "max"
@pytest.mark.slow
def test_2d_coord_names(self):
self.plotmethod(x="x2d", y="y2d")
# make sure labels came out ok
ax = plt.gca()
assert "x2d" == ax.get_xlabel()
assert "y2d" == ax.get_ylabel()
@pytest.mark.slow
def test_levels(self):
artist = self.plotmethod(levels=[-0.5, -0.4, 0.1])
assert artist.extend == "both"
artist = self.plotmethod(levels=3)
assert artist.extend == "neither"
@pytest.mark.slow
class TestContour(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.contour)
# matplotlib cmap.colors gives an rgbA ndarray
# when seaborn is used, instead we get an rgb tuple
@staticmethod
def _color_as_tuple(c):
return tuple(c[:3])
def test_colors(self):
# with single color, we don't want rgb array
artist = self.plotmethod(colors="k")
assert artist.cmap.colors[0] == "k"
artist = self.plotmethod(colors=["k", "b"])
assert self._color_as_tuple(artist.cmap.colors[1]) == (0.0, 0.0, 1.0)
artist = self.darray.plot.contour(
levels=[-0.5, 0.0, 0.5, 1.0], colors=["k", "r", "w", "b"]
)
assert self._color_as_tuple(artist.cmap.colors[1]) == (1.0, 0.0, 0.0)
assert self._color_as_tuple(artist.cmap.colors[2]) == (1.0, 1.0, 1.0)
# the last color is now under "over"
assert self._color_as_tuple(artist.cmap._rgba_over) == (0.0, 0.0, 1.0)
def test_colors_np_levels(self):
# https://github.com/pydata/xarray/issues/3284
levels = np.array([-0.5, 0.0, 0.5, 1.0])
artist = self.darray.plot.contour(levels=levels, colors=["k", "r", "w", "b"])
assert self._color_as_tuple(artist.cmap.colors[1]) == (1.0, 0.0, 0.0)
assert self._color_as_tuple(artist.cmap.colors[2]) == (1.0, 1.0, 1.0)
# the last color is now under "over"
assert self._color_as_tuple(artist.cmap._rgba_over) == (0.0, 0.0, 1.0)
def test_cmap_and_color_both(self):
with pytest.raises(ValueError):
self.plotmethod(colors="k", cmap="RdBu")
def list_of_colors_in_cmap_raises_error(self):
with raises_regex(ValueError, "list of colors"):
self.plotmethod(cmap=["k", "b"])
@pytest.mark.slow
def test_2d_coord_names(self):
self.plotmethod(x="x2d", y="y2d")
# make sure labels came out ok
ax = plt.gca()
assert "x2d" == ax.get_xlabel()
assert "y2d" == ax.get_ylabel()
def test_single_level(self):
# this used to raise an error, but not anymore since
# add_colorbar defaults to false
self.plotmethod(levels=[0.1])
self.plotmethod(levels=1)
class TestPcolormesh(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.pcolormesh)
def test_primitive_artist_returned(self):
artist = self.plotmethod()
assert isinstance(artist, mpl.collections.QuadMesh)
def test_everything_plotted(self):
artist = self.plotmethod()
assert artist.get_array().size == self.darray.size
@pytest.mark.slow
def test_2d_coord_names(self):
self.plotmethod(x="x2d", y="y2d")
# make sure labels came out ok
ax = plt.gca()
assert "x2d" == ax.get_xlabel()
assert "y2d" == ax.get_ylabel()
def test_dont_infer_interval_breaks_for_cartopy(self):
# Regression for GH 781
ax = plt.gca()
# Simulate a Cartopy Axis
setattr(ax, "projection", True)
artist = self.plotmethod(x="x2d", y="y2d", ax=ax)
assert isinstance(artist, mpl.collections.QuadMesh)
# Let cartopy handle the axis limits and artist size
assert artist.get_array().size <= self.darray.size
@pytest.mark.slow
class TestImshow(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.imshow)
@pytest.mark.slow
def test_imshow_called(self):
# Having both statements ensures the test works properly
assert not self.imshow_called(self.darray.plot.contourf)
assert self.imshow_called(self.darray.plot.imshow)
def test_xy_pixel_centered(self):
self.darray.plot.imshow(yincrease=False)
assert np.allclose([-0.5, 14.5], plt.gca().get_xlim())
assert np.allclose([9.5, -0.5], plt.gca().get_ylim())
def test_default_aspect_is_auto(self):
self.darray.plot.imshow()
assert "auto" == plt.gca().get_aspect()
@pytest.mark.slow
def test_cannot_change_mpl_aspect(self):
with raises_regex(ValueError, "not available in xarray"):
self.darray.plot.imshow(aspect="equal")
# with numbers we fall back to fig control
self.darray.plot.imshow(size=5, aspect=2)
assert "auto" == plt.gca().get_aspect()
assert tuple(plt.gcf().get_size_inches()) == (10, 5)
@pytest.mark.slow
def test_primitive_artist_returned(self):
artist = self.plotmethod()
assert isinstance(artist, mpl.image.AxesImage)
@pytest.mark.slow
@requires_seaborn
def test_seaborn_palette_needs_levels(self):
with pytest.raises(ValueError):
self.plotmethod(cmap="husl")
def test_2d_coord_names(self):
with raises_regex(ValueError, "requires 1D coordinates"):
self.plotmethod(x="x2d", y="y2d")
def test_plot_rgb_image(self):
DataArray(
easy_array((10, 15, 3), start=0), dims=["y", "x", "band"]
).plot.imshow()
assert 0 == len(find_possible_colorbars())
def test_plot_rgb_image_explicit(self):
DataArray(
easy_array((10, 15, 3), start=0), dims=["y", "x", "band"]
).plot.imshow(y="y", x="x", rgb="band")
assert 0 == len(find_possible_colorbars())
def test_plot_rgb_faceted(self):
DataArray(
easy_array((2, 2, 10, 15, 3), start=0), dims=["a", "b", "y", "x", "band"]
).plot.imshow(row="a", col="b")
assert 0 == len(find_possible_colorbars())
def test_plot_rgba_image_transposed(self):
# We can handle the color axis being in any position
DataArray(
easy_array((4, 10, 15), start=0), dims=["band", "y", "x"]
).plot.imshow()
def test_warns_ambigious_dim(self):
arr = DataArray(easy_array((3, 3, 3)), dims=["y", "x", "band"])
with pytest.warns(UserWarning):
arr.plot.imshow()
# but doesn't warn if dimensions specified
arr.plot.imshow(rgb="band")
arr.plot.imshow(x="x", y="y")
def test_rgb_errors_too_many_dims(self):
arr = DataArray(easy_array((3, 3, 3, 3)), dims=["y", "x", "z", "band"])
with pytest.raises(ValueError):
arr.plot.imshow(rgb="band")
def test_rgb_errors_bad_dim_sizes(self):
arr = DataArray(easy_array((5, 5, 5)), dims=["y", "x", "band"])
with pytest.raises(ValueError):
arr.plot.imshow(rgb="band")
def test_normalize_rgb_imshow(self):
for kwargs in (
dict(vmin=-1),
dict(vmax=2),
dict(vmin=-1, vmax=1),
dict(vmin=0, vmax=0),
dict(vmin=0, robust=True),
dict(vmax=-1, robust=True),
):
da = DataArray(easy_array((5, 5, 3), start=-0.6, stop=1.4))
arr = da.plot.imshow(**kwargs).get_array()
assert 0 <= arr.min() <= arr.max() <= 1, kwargs
def test_normalize_rgb_one_arg_error(self):
da = DataArray(easy_array((5, 5, 3), start=-0.6, stop=1.4))
# If passed one bound that implies all out of range, error:
for kwargs in [dict(vmax=-1), dict(vmin=2)]:
with pytest.raises(ValueError):
da.plot.imshow(**kwargs)
# If passed two that's just moving the range, *not* an error:
for kwargs in [dict(vmax=-1, vmin=-1.2), dict(vmin=2, vmax=2.1)]:
da.plot.imshow(**kwargs)
def test_imshow_rgb_values_in_valid_range(self):
da = DataArray(np.arange(75, dtype="uint8").reshape((5, 5, 3)))
_, ax = plt.subplots()
out = da.plot.imshow(ax=ax).get_array()
assert out.dtype == np.uint8
assert (out[..., :3] == da.values).all() # Compare without added alpha
@pytest.mark.filterwarnings("ignore:Several dimensions of this array")
def test_regression_rgb_imshow_dim_size_one(self):
# Regression: https://github.com/pydata/xarray/issues/1966
da = DataArray(easy_array((1, 3, 3), start=0.0, stop=1.0))
da.plot.imshow()
def test_origin_overrides_xyincrease(self):
da = DataArray(easy_array((3, 2)), coords=[[-2, 0, 2], [-1, 1]])
da.plot.imshow(origin="upper")
assert plt.xlim()[0] < 0
assert plt.ylim()[1] < 0
plt.clf()
da.plot.imshow(origin="lower")
assert plt.xlim()[0] < 0
assert plt.ylim()[0] < 0
class TestFacetGrid(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
d = easy_array((10, 15, 3))
self.darray = DataArray(d, dims=["y", "x", "z"], coords={"z": ["a", "b", "c"]})
self.g = xplt.FacetGrid(self.darray, col="z")
@pytest.mark.slow
def test_no_args(self):
self.g.map_dataarray(xplt.contourf, "x", "y")
# Don't want colorbar labeled with 'None'
alltxt = text_in_fig()
assert "None" not in alltxt
for ax in self.g.axes.flat:
assert ax.has_data()
@pytest.mark.slow
def test_names_appear_somewhere(self):
self.darray.name = "testvar"
self.g.map_dataarray(xplt.contourf, "x", "y")
for k, ax in zip("abc", self.g.axes.flat):
assert f"z = {k}" == ax.get_title()
alltxt = text_in_fig()
assert self.darray.name in alltxt
for label in ["x", "y"]:
assert label in alltxt
@pytest.mark.slow
def test_text_not_super_long(self):
self.darray.coords["z"] = [100 * letter for letter in "abc"]
g = xplt.FacetGrid(self.darray, col="z")
g.map_dataarray(xplt.contour, "x", "y")
alltxt = text_in_fig()
maxlen = max(len(txt) for txt in alltxt)
assert maxlen < 50
t0 = g.axes[0, 0].get_title()
assert t0.endswith("...")
@pytest.mark.slow
def test_colorbar(self):
vmin = self.darray.values.min()
vmax = self.darray.values.max()
expected = np.array((vmin, vmax))
self.g.map_dataarray(xplt.imshow, "x", "y")
for image in plt.gcf().findobj(mpl.image.AxesImage):
clim = np.array(image.get_clim())
assert np.allclose(expected, clim)
assert 1 == len(find_possible_colorbars())
@pytest.mark.slow
def test_empty_cell(self):
g = xplt.FacetGrid(self.darray, col="z", col_wrap=2)
g.map_dataarray(xplt.imshow, "x", "y")
bottomright = g.axes[-1, -1]
assert not bottomright.has_data()
assert not bottomright.get_visible()
@pytest.mark.slow
def test_norow_nocol_error(self):
with raises_regex(ValueError, r"[Rr]ow"):
xplt.FacetGrid(self.darray)
@pytest.mark.slow
def test_groups(self):
self.g.map_dataarray(xplt.imshow, "x", "y")
upperleft_dict = self.g.name_dicts[0, 0]
upperleft_array = self.darray.loc[upperleft_dict]
z0 = self.darray.isel(z=0)
assert_equal(upperleft_array, z0)
@pytest.mark.slow
def test_float_index(self):
self.darray.coords["z"] = [0.1, 0.2, 0.4]
g = xplt.FacetGrid(self.darray, col="z")
g.map_dataarray(xplt.imshow, "x", "y")
@pytest.mark.slow
def test_nonunique_index_error(self):
self.darray.coords["z"] = [0.1, 0.2, 0.2]
with raises_regex(ValueError, r"[Uu]nique"):
xplt.FacetGrid(self.darray, col="z")
@pytest.mark.slow
def test_robust(self):
z = np.zeros((20, 20, 2))
darray = DataArray(z, dims=["y", "x", "z"])
darray[:, :, 1] = 1
darray[2, 0, 0] = -1000
darray[3, 0, 0] = 1000
g = xplt.FacetGrid(darray, col="z")
g.map_dataarray(xplt.imshow, "x", "y", robust=True)
# Color limits should be 0, 1
# The largest number displayed in the figure should be less than 21
numbers = set()
alltxt = text_in_fig()
for txt in alltxt:
try:
numbers.add(float(txt))
except ValueError:
pass
largest = max(abs(x) for x in numbers)
assert largest < 21
@pytest.mark.slow
def test_can_set_vmin_vmax(self):
vmin, vmax = 50.0, 1000.0
expected = np.array((vmin, vmax))
self.g.map_dataarray(xplt.imshow, "x", "y", vmin=vmin, vmax=vmax)
for image in plt.gcf().findobj(mpl.image.AxesImage):
clim = np.array(image.get_clim())
assert np.allclose(expected, clim)
@pytest.mark.slow
def test_can_set_norm(self):
norm = mpl.colors.SymLogNorm(0.1)
self.g.map_dataarray(xplt.imshow, "x", "y", norm=norm)
for image in plt.gcf().findobj(mpl.image.AxesImage):
assert image.norm is norm
@pytest.mark.slow
def test_figure_size(self):
assert_array_equal(self.g.fig.get_size_inches(), (10, 3))
g = xplt.FacetGrid(self.darray, col="z", size=6)
assert_array_equal(g.fig.get_size_inches(), (19, 6))
g = self.darray.plot.imshow(col="z", size=6)
assert_array_equal(g.fig.get_size_inches(), (19, 6))
g = xplt.FacetGrid(self.darray, col="z", size=4, aspect=0.5)
assert_array_equal(g.fig.get_size_inches(), (7, 4))
g = xplt.FacetGrid(self.darray, col="z", figsize=(9, 4))
assert_array_equal(g.fig.get_size_inches(), (9, 4))
with raises_regex(ValueError, "cannot provide both"):
g = xplt.plot(self.darray, row=2, col="z", figsize=(6, 4), size=6)
with raises_regex(ValueError, "Can't use"):
g = xplt.plot(self.darray, row=2, col="z", ax=plt.gca(), size=6)
@pytest.mark.slow
def test_num_ticks(self):
nticks = 99
maxticks = nticks + 1
self.g.map_dataarray(xplt.imshow, "x", "y")
self.g.set_ticks(max_xticks=nticks, max_yticks=nticks)
for ax in self.g.axes.flat:
xticks = len(ax.get_xticks())
yticks = len(ax.get_yticks())
assert xticks <= maxticks
assert yticks <= maxticks
assert xticks >= nticks / 2.0
assert yticks >= nticks / 2.0
@pytest.mark.slow
def test_map(self):
assert self.g._finalized is False
self.g.map(plt.contourf, "x", "y", Ellipsis)
assert self.g._finalized is True
self.g.map(lambda: None)
@pytest.mark.slow
def test_map_dataset(self):
g = xplt.FacetGrid(self.darray.to_dataset(name="foo"), col="z")
g.map(plt.contourf, "x", "y", "foo")
alltxt = text_in_fig()
for label in ["x", "y"]:
assert label in alltxt
# everything has a label
assert "None" not in alltxt
# colorbar can't be inferred automatically
assert "foo" not in alltxt
assert 0 == len(find_possible_colorbars())
g.add_colorbar(label="colors!")
assert "colors!" in text_in_fig()
assert 1 == len(find_possible_colorbars())
@pytest.mark.slow
def test_set_axis_labels(self):
g = self.g.map_dataarray(xplt.contourf, "x", "y")
g.set_axis_labels("longitude", "latitude")
alltxt = text_in_fig()
for label in ["longitude", "latitude"]:
assert label in alltxt
@pytest.mark.slow
def test_facetgrid_colorbar(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=["y", "x", "z"], name="foo")
d.plot.imshow(x="x", y="y", col="z")
assert 1 == len(find_possible_colorbars())
d.plot.imshow(x="x", y="y", col="z", add_colorbar=True)
assert 1 == len(find_possible_colorbars())
d.plot.imshow(x="x", y="y", col="z", add_colorbar=False)
assert 0 == len(find_possible_colorbars())
@pytest.mark.slow
def test_facetgrid_polar(self):
# test if polar projection in FacetGrid does not raise an exception
self.darray.plot.pcolormesh(
col="z", subplot_kws=dict(projection="polar"), sharex=False, sharey=False
)
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
class TestFacetGrid4d(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
a = easy_array((10, 15, 3, 2))
darray = DataArray(a, dims=["y", "x", "col", "row"])
darray.coords["col"] = np.array(
["col" + str(x) for x in darray.coords["col"].values]
)
darray.coords["row"] = np.array(
["row" + str(x) for x in darray.coords["row"].values]
)
self.darray = darray
@pytest.mark.slow
def test_default_labels(self):
g = xplt.FacetGrid(self.darray, col="col", row="row")
assert (2, 3) == g.axes.shape
g.map_dataarray(xplt.imshow, "x", "y")
# Rightmost column should be labeled
for label, ax in zip(self.darray.coords["row"].values, g.axes[:, -1]):
assert substring_in_axes(label, ax)
# Top row should be labeled
for label, ax in zip(self.darray.coords["col"].values, g.axes[0, :]):
assert substring_in_axes(label, ax)
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
class TestFacetedLinePlotsLegend(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
self.darray = xr.tutorial.scatter_example_dataset()
def test_legend_labels(self):
fg = self.darray.A.plot.line(col="x", row="w", hue="z")
all_legend_labels = [t.get_text() for t in fg.figlegend.texts]
# labels in legend should be ['0', '1', '2', '3']
assert sorted(all_legend_labels) == ["0", "1", "2", "3"]
@pytest.mark.filterwarnings("ignore:tight_layout cannot")
class TestFacetedLinePlots(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
self.darray = DataArray(
np.random.randn(10, 6, 3, 4),
dims=["hue", "x", "col", "row"],
coords=[range(10), range(6), range(3), ["A", "B", "C", "C++"]],
name="Cornelius Ortega the 1st",
)
self.darray.hue.name = "huename"
self.darray.hue.attrs["units"] = "hunits"
self.darray.x.attrs["units"] = "xunits"
self.darray.col.attrs["units"] = "colunits"
self.darray.row.attrs["units"] = "rowunits"
def test_facetgrid_shape(self):
g = self.darray.plot(row="row", col="col", hue="hue")
assert g.axes.shape == (len(self.darray.row), len(self.darray.col))
g = self.darray.plot(row="col", col="row", hue="hue")
assert g.axes.shape == (len(self.darray.col), len(self.darray.row))
def test_unnamed_args(self):
g = self.darray.plot.line("o--", row="row", col="col", hue="hue")
lines = [
q for q in g.axes.flat[0].get_children() if isinstance(q, mpl.lines.Line2D)
]
# passing 'o--' as argument should set marker and linestyle
assert lines[0].get_marker() == "o"
assert lines[0].get_linestyle() == "--"
def test_default_labels(self):
g = self.darray.plot(row="row", col="col", hue="hue")
# Rightmost column should be labeled
for label, ax in zip(self.darray.coords["row"].values, g.axes[:, -1]):
assert substring_in_axes(label, ax)
# Top row should be labeled
for label, ax in zip(self.darray.coords["col"].values, g.axes[0, :]):
assert substring_in_axes(str(label), ax)
# Leftmost column should have array name
for ax in g.axes[:, 0]:
assert substring_in_axes(self.darray.name, ax)
def test_test_empty_cell(self):
g = self.darray.isel(row=1).drop("row").plot(col="col", hue="hue", col_wrap=2)
bottomright = g.axes[-1, -1]
assert not bottomright.has_data()
assert not bottomright.get_visible()
def test_set_axis_labels(self):
g = self.darray.plot(row="row", col="col", hue="hue")
g.set_axis_labels("longitude", "latitude")
alltxt = text_in_fig()
assert "longitude" in alltxt
assert "latitude" in alltxt
def test_axes_in_faceted_plot(self):
with pytest.raises(ValueError):
self.darray.plot.line(row="row", col="col", x="x", ax=plt.axes())
def test_figsize_and_size(self):
with pytest.raises(ValueError):
self.darray.plot.line(row="row", col="col", x="x", size=3, figsize=4)
def test_wrong_num_of_dimensions(self):
with pytest.raises(ValueError):
self.darray.plot(row="row", hue="hue")
self.darray.plot.line(row="row", hue="hue")
@requires_matplotlib
class TestDatasetScatterPlots(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
das = [
DataArray(
np.random.randn(3, 3, 4, 4),
dims=["x", "row", "col", "hue"],
coords=[range(k) for k in [3, 3, 4, 4]],
)
for _ in [1, 2]
]
ds = Dataset({"A": das[0], "B": das[1]})
ds.hue.name = "huename"
ds.hue.attrs["units"] = "hunits"
ds.x.attrs["units"] = "xunits"
ds.col.attrs["units"] = "colunits"
ds.row.attrs["units"] = "rowunits"
ds.A.attrs["units"] = "Aunits"
ds.B.attrs["units"] = "Bunits"
self.ds = ds
@pytest.mark.parametrize(
"add_guide, hue_style, legend, colorbar",
[
(None, None, False, True),
(False, None, False, False),
(True, None, False, True),
(True, "continuous", False, True),
(False, "discrete", False, False),
(True, "discrete", True, False),
],
)
def test_add_guide(self, add_guide, hue_style, legend, colorbar):
meta_data = _infer_meta_data(
self.ds, x="A", y="B", hue="hue", hue_style=hue_style, add_guide=add_guide
)
assert meta_data["add_legend"] is legend
assert meta_data["add_colorbar"] is colorbar
def test_facetgrid_shape(self):
g = self.ds.plot.scatter(x="A", y="B", row="row", col="col")
assert g.axes.shape == (len(self.ds.row), len(self.ds.col))
g = self.ds.plot.scatter(x="A", y="B", row="col", col="row")
assert g.axes.shape == (len(self.ds.col), len(self.ds.row))
def test_default_labels(self):
g = self.ds.plot.scatter("A", "B", row="row", col="col", hue="hue")
# Top row should be labeled
for label, ax in zip(self.ds.coords["col"].values, g.axes[0, :]):
assert substring_in_axes(str(label), ax)
# Bottom row should have name of x array name and units
for ax in g.axes[-1, :]:
assert ax.get_xlabel() == "A [Aunits]"
# Leftmost column should have name of y array name and units
for ax in g.axes[:, 0]:
assert ax.get_ylabel() == "B [Bunits]"
def test_axes_in_faceted_plot(self):
with pytest.raises(ValueError):
self.ds.plot.scatter(x="A", y="B", row="row", ax=plt.axes())
def test_figsize_and_size(self):
with pytest.raises(ValueError):
self.ds.plot.scatter(x="A", y="B", row="row", size=3, figsize=4)
@pytest.mark.parametrize(
"x, y, hue_style, add_guide",
[
("A", "B", "something", True),
("A", "B", "discrete", True),
("A", "B", None, True),
("A", "The Spanish Inquisition", None, None),
("The Spanish Inquisition", "B", None, True),
],
)
def test_bad_args(self, x, y, hue_style, add_guide):
with pytest.raises(ValueError):
self.ds.plot.scatter(x, y, hue_style=hue_style, add_guide=add_guide)
@pytest.mark.xfail(reason="datetime,timedelta hue variable not supported.")
@pytest.mark.parametrize("hue_style", ["discrete", "continuous"])
def test_datetime_hue(self, hue_style):
ds2 = self.ds.copy()
ds2["hue"] = pd.date_range("2000-1-1", periods=4)
ds2.plot.scatter(x="A", y="B", hue="hue", hue_style=hue_style)
ds2["hue"] = pd.timedelta_range("-1D", periods=4, freq="D")
ds2.plot.scatter(x="A", y="B", hue="hue", hue_style=hue_style)
def test_facetgrid_hue_style(self):
# Can't move this to pytest.mark.parametrize because py36-bare-minimum
# doesn't have matplotlib.
for hue_style, map_type in (
("discrete", list),
("continuous", mpl.collections.PathCollection),
):
g = self.ds.plot.scatter(
x="A", y="B", row="row", col="col", hue="hue", hue_style=hue_style
)
# for 'discrete' a list is appended to _mappables
# for 'continuous', should be single PathCollection
assert isinstance(g._mappables[-1], map_type)
@pytest.mark.parametrize(
"x, y, hue, markersize", [("A", "B", "x", "col"), ("x", "row", "A", "B")]
)
def test_scatter(self, x, y, hue, markersize):
self.ds.plot.scatter(x, y, hue=hue, markersize=markersize)
def test_non_numeric_legend(self):
ds2 = self.ds.copy()
ds2["hue"] = ["a", "b", "c", "d"]
lines = ds2.plot.scatter(x="A", y="B", hue="hue")
# should make a discrete legend
assert lines[0].axes.legend_ is not None
# and raise an error if explicitly not allowed to do so
with pytest.raises(ValueError):
ds2.plot.scatter(x="A", y="B", hue="hue", hue_style="continuous")
def test_add_legend_by_default(self):
sc = self.ds.plot.scatter(x="A", y="B", hue="hue")
assert len(sc.figure.axes) == 2
class TestDatetimePlot(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
"""
Create a DataArray with a time-axis that contains datetime objects.
"""
month = np.arange(1, 13, 1)
data = np.sin(2 * np.pi * month / 12.0)
darray = DataArray(data, dims=["time"])
darray.coords["time"] = np.array([datetime(2017, m, 1) for m in month])
self.darray = darray
def test_datetime_line_plot(self):
# test if line plot raises no Exception
self.darray.plot.line()
@requires_nc_time_axis
@requires_cftime
class TestCFDatetimePlot(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
"""
Create a DataArray with a time-axis that contains cftime.datetime
objects.
"""
# case for 1d array
data = np.random.rand(4, 12)
time = xr.cftime_range(start="2017", periods=12, freq="1M", calendar="noleap")
darray = DataArray(data, dims=["x", "time"])
darray.coords["time"] = time
self.darray = darray
def test_cfdatetime_line_plot(self):
self.darray.isel(x=0).plot.line()
def test_cfdatetime_pcolormesh_plot(self):
self.darray.plot.pcolormesh()
def test_cfdatetime_contour_plot(self):
self.darray.plot.contour()
@requires_cftime
@pytest.mark.skipif(has_nc_time_axis, reason="nc_time_axis is installed")
class TestNcAxisNotInstalled(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
"""
Create a DataArray with a time-axis that contains cftime.datetime
objects.
"""
month = np.arange(1, 13, 1)
data = np.sin(2 * np.pi * month / 12.0)
darray = DataArray(data, dims=["time"])
darray.coords["time"] = xr.cftime_range(
start="2017", periods=12, freq="1M", calendar="noleap"
)
self.darray = darray
def test_ncaxis_notinstalled_line_plot(self):
with raises_regex(ImportError, "optional `nc-time-axis`"):
self.darray.plot.line()
@requires_seaborn
def test_import_seaborn_no_warning():
# GH1633
with pytest.warns(None) as record:
import_seaborn()
assert len(record) == 0
@requires_matplotlib
def test_plot_seaborn_no_import_warning():
# GH1633
with pytest.warns(None) as record:
_color_palette("Blues", 4)
assert len(record) == 0
test_da_list = [
DataArray(easy_array((10,))),
DataArray(easy_array((10, 3))),
DataArray(easy_array((10, 3, 2))),
]
@requires_matplotlib
class TestAxesKwargs:
@pytest.mark.parametrize("da", test_da_list)
@pytest.mark.parametrize("xincrease", [True, False])
def test_xincrease_kwarg(self, da, xincrease):
plt.clf()
da.plot(xincrease=xincrease)
assert plt.gca().xaxis_inverted() == (not xincrease)
@pytest.mark.parametrize("da", test_da_list)
@pytest.mark.parametrize("yincrease", [True, False])
def test_yincrease_kwarg(self, da, yincrease):
plt.clf()
da.plot(yincrease=yincrease)
assert plt.gca().yaxis_inverted() == (not yincrease)
@pytest.mark.parametrize("da", test_da_list)
@pytest.mark.parametrize("xscale", ["linear", "log", "logit", "symlog"])
def test_xscale_kwarg(self, da, xscale):
plt.clf()
da.plot(xscale=xscale)
assert plt.gca().get_xscale() == xscale
@pytest.mark.parametrize(
"da", [DataArray(easy_array((10,))), DataArray(easy_array((10, 3)))]
)
@pytest.mark.parametrize("yscale", ["linear", "log", "logit", "symlog"])
def test_yscale_kwarg(self, da, yscale):
plt.clf()
da.plot(yscale=yscale)
assert plt.gca().get_yscale() == yscale
@pytest.mark.parametrize("da", test_da_list)
def test_xlim_kwarg(self, da):
plt.clf()
expected = (0.0, 1000.0)
da.plot(xlim=[0, 1000])
assert plt.gca().get_xlim() == expected
@pytest.mark.parametrize("da", test_da_list)
def test_ylim_kwarg(self, da):
plt.clf()
da.plot(ylim=[0, 1000])
expected = (0.0, 1000.0)
assert plt.gca().get_ylim() == expected
@pytest.mark.parametrize("da", test_da_list)
def test_xticks_kwarg(self, da):
plt.clf()
da.plot(xticks=np.arange(5))
expected = np.arange(5).tolist()
assert np.all(plt.gca().get_xticks() == expected)
@pytest.mark.parametrize("da", test_da_list)
def test_yticks_kwarg(self, da):
plt.clf()
da.plot(yticks=np.arange(5))
expected = np.arange(5)
assert np.all(plt.gca().get_yticks() == expected)
|
apache-2.0
|
ChemiKyle/Novel-heatmap
|
book_map.py
|
1
|
4532
|
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import os
import tkinter
import tkinter.filedialog
import re
def make_bookmap(book_text):
book_map = []
longest_line = 0
line_map = re.split('[\.\?\!]', book_text)
for line in line_map[:-1]: # Skip the last entry since it's a space after the final period
this_line_map = line.replace('"', '').replace(',', '').replace(';', '').replace("\n", '').split(' ')
line_count_map = []
for word in this_line_map:
wordlength = len(word)
if wordlength > 0:
line_count_map.append(wordlength)
this_line_map_size = len(line_count_map)
if this_line_map_size > 0:
book_map.append(line_count_map)
if this_line_map_size > longest_line:
longest_line = this_line_map_size
return book_map, longest_line
def pad_vector_with_zeroes(book_map, longest_line):
for line in book_map:
word_count = len(line)
if word_count < longest_line:
for i in range(longest_line - word_count):
line.append(0)
def numpy_image_maker(book_map, book_name):
a = np.asarray(book_map)
a = np.transpose(a)
plt.axis('off')
# Probably the most important value, this is the colormap
cmap = 'viridis'
# More info at: https://matplotlib.org/users/colormaps.html
dpi = 100 # Default for this with direct write is 80, but images our size don't warrant compression
plt.imshow(a, cmap=cmap, interpolation='nearest')
directly_to_figure = True # Set to false to produce a preview first
if directly_to_figure:
plt.savefig(book_name + '_map_image.png', bbox_inches='tight', dpi=dpi)
else:
plt.show()
# Create a file with a matrix suitable for Octave/MATLAB
def matrix_maker(book_map, book_name):
a = np.asarray(book_map)
a = np.transpose(a)
np.savetxt(book_name + ".mat", a, newline=";\n")
def process_to_dictionary(chosen_file, chapter_dict):
book_name, ext = os.path.splitext(chosen_file)
with open(chosen_file, 'r') as f:
book_text = f.read()
book_map, longest_line = make_bookmap(book_text)
chapter_dict[book_name] = book_map
pad_vector_with_zeroes(book_map, longest_line)
def process_chapters_to_images(chapter_dict):
for i in chapter_dict:
file_name = i
book_map = chapter_dict[i]
numpy_image_maker(book_map, file_name)
def process_chapters_to_matrices(chapter_dict):
for i in chapter_dict:
file_name = i
book_map = chapter_dict[i]
matrix_maker(book_map, file_name)
def normalize_all_maps(chapter_dict):
normalizing_height = False # Toggle for whether to normalize across all heights,
# Turning on can lead to unsightly excessive whitespace
longest_sentence = 0
most_sentences = 0
for i in chapter_dict:
this_sentence_count = len(chapter_dict[i])
if this_sentence_count > most_sentences:
most_sentences = this_sentence_count
this_longest_sentence = len(chapter_dict[i][0])
if this_longest_sentence > longest_sentence:
longest_sentence = this_longest_sentence
# Make a vector full of zeroes the size of the longest sentence, used to pad width
if normalizing_height:
pad_vector = [0]*int(longest_sentence)
for i in chapter_dict:
pad_vector_with_zeroes(chapter_dict[i], longest_sentence)
if not normalizing_height:
pad_vector = [0]*int(len(chapter_dict[i][0]))
for j in range(most_sentences - len(chapter_dict[i])): # Add blank vectors to normalize width
chapter_dict[i].append(pad_vector)
# TODO: Option to alternate between append and prepend to center heatmap
def main():
normalizing = False # Pad images to make them the same size; increases computation time quite a bit
making_image = True # Determines whether or not to plot the result
making_matrix = False # Output a .mat file to use with Octave/MATLAB
chapter_dict = {}
file_list = tkinter.filedialog.askopenfilenames() # Supports processing multiple files sequentially
for chosen_file in file_list:
process_to_dictionary(chosen_file, chapter_dict)
if normalizing:
normalize_all_maps(chapter_dict)
if making_image:
process_chapters_to_images(chapter_dict)
if making_matrix:
process_chapters_to_matrices(chapter_dict)
if __name__ == "__main__":
main()
|
gpl-3.0
|
kylerbrown/scikit-learn
|
sklearn/datasets/tests/test_mldata.py
|
384
|
5221
|
"""Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
|
bsd-3-clause
|
murrayrm/python-control
|
examples/cruise-control.py
|
2
|
17056
|
# cruise-control.py - Cruise control example from FBS
# RMM, 16 May 2019
#
# The cruise control system of a car is a common feedback system encountered
# in everyday life. The system attempts to maintain a constant velocity in the
# presence of disturbances primarily caused by changes in the slope of a
# road. The controller compensates for these unknowns by measuring the speed
# of the car and adjusting the throttle appropriately.
#
# This file explore the dynamics and control of the cruise control system,
# following the material presenting in Feedback Systems by Astrom and Murray.
# A full nonlinear model of the vehicle dynamics is used, with both PI and
# state space control laws. Different methods of constructing control systems
# are show, all using the InputOutputSystem class (and subclasses).
import numpy as np
import matplotlib.pyplot as plt
from math import pi
import control as ct
#
# Section 4.1: Cruise control modeling and control
#
# Vehicle model: vehicle()
#
# To develop a mathematical model we start with a force balance for
# the car body. Let v be the speed of the car, m the total mass
# (including passengers), F the force generated by the contact of the
# wheels with the road, and Fd the disturbance force due to gravity,
# friction, and aerodynamic drag.
def vehicle_update(t, x, u, params={}):
"""Vehicle dynamics for cruise control system.
Parameters
----------
x : array
System state: car velocity in m/s
u : array
System input: [throttle, gear, road_slope], where throttle is
a float between 0 and 1, gear is an integer between 1 and 5,
and road_slope is in rad.
Returns
-------
float
Vehicle acceleration
"""
from math import copysign, sin
sign = lambda x: copysign(1, x) # define the sign() function
# Set up the system parameters
m = params.get('m', 1600.)
g = params.get('g', 9.8)
Cr = params.get('Cr', 0.01)
Cd = params.get('Cd', 0.32)
rho = params.get('rho', 1.3)
A = params.get('A', 2.4)
alpha = params.get(
'alpha', [40, 25, 16, 12, 10]) # gear ratio / wheel radius
# Define variables for vehicle state and inputs
v = x[0] # vehicle velocity
throttle = np.clip(u[0], 0, 1) # vehicle throttle
gear = u[1] # vehicle gear
theta = u[2] # road slope
# Force generated by the engine
omega = alpha[int(gear)-1] * v # engine angular speed
F = alpha[int(gear)-1] * motor_torque(omega, params) * throttle
# Disturbance forces
#
# The disturbance force Fd has three major components: Fg, the forces due
# to gravity; Fr, the forces due to rolling friction; and Fa, the
# aerodynamic drag.
# Letting the slope of the road be \theta (theta), gravity gives the
# force Fg = m g sin \theta.
Fg = m * g * sin(theta)
# A simple model of rolling friction is Fr = m g Cr sgn(v), where Cr is
# the coefficient of rolling friction and sgn(v) is the sign of v (+/- 1) or
# zero if v = 0.
Fr = m * g * Cr * sign(v)
# The aerodynamic drag is proportional to the square of the speed: Fa =
# 1/\rho Cd A |v| v, where \rho is the density of air, Cd is the
# shape-dependent aerodynamic drag coefficient, and A is the frontal area
# of the car.
Fa = 1/2 * rho * Cd * A * abs(v) * v
# Final acceleration on the car
Fd = Fg + Fr + Fa
dv = (F - Fd) / m
return dv
# Engine model: motor_torque
#
# The force F is generated by the engine, whose torque is proportional to
# the rate of fuel injection, which is itself proportional to a control
# signal 0 <= u <= 1 that controls the throttle position. The torque also
# depends on engine speed omega.
def motor_torque(omega, params={}):
# Set up the system parameters
Tm = params.get('Tm', 190.) # engine torque constant
omega_m = params.get('omega_m', 420.) # peak engine angular speed
beta = params.get('beta', 0.4) # peak engine rolloff
return np.clip(Tm * (1 - beta * (omega/omega_m - 1)**2), 0, None)
# Define the input/output system for the vehicle
vehicle = ct.NonlinearIOSystem(
vehicle_update, None, name='vehicle',
inputs = ('u', 'gear', 'theta'), outputs = ('v'), states=('v'))
# Figure 1.11: A feedback system for controlling the speed of a vehicle. In
# this example, the speed of the vehicle is measured and compared to the
# desired speed. The controller is a PI controller represented as a transfer
# function. In the textbook, the simulations are done for LTI systems, but
# here we simulate the full nonlinear system.
# Construct a PI controller with rolloff, as a transfer function
Kp = 0.5 # proportional gain
Ki = 0.1 # integral gain
control_tf = ct.tf2io(
ct.TransferFunction([Kp, Ki], [1, 0.01*Ki/Kp]),
name='control', inputs='u', outputs='y')
# Construct the closed loop control system
# Inputs: vref, gear, theta
# Outputs: v (vehicle velocity)
cruise_tf = ct.InterconnectedSystem(
(control_tf, vehicle), name='cruise',
connections = (
['control.u', '-vehicle.v'],
['vehicle.u', 'control.y']),
inplist = ('control.u', 'vehicle.gear', 'vehicle.theta'),
inputs = ('vref', 'gear', 'theta'),
outlist = ('vehicle.v', 'vehicle.u'),
outputs = ('v', 'u'))
# Define the time and input vectors
T = np.linspace(0, 25, 101)
vref = 20 * np.ones(T.shape)
gear = 4 * np.ones(T.shape)
theta0 = np.zeros(T.shape)
# Now simulate the effect of a hill at t = 5 seconds
plt.figure()
plt.suptitle('Response to change in road slope')
vel_axes = plt.subplot(2, 1, 1)
inp_axes = plt.subplot(2, 1, 2)
theta_hill = np.array([
0 if t <= 5 else
4./180. * pi * (t-5) if t <= 6 else
4./180. * pi for t in T])
for m in (1200, 1600, 2000):
# Compute the equilibrium state for the system
X0, U0 = ct.find_eqpt(
cruise_tf, [0, vref[0]], [vref[0], gear[0], theta0[0]],
iu=[1, 2], y0=[vref[0], 0], iy=[0], params={'m':m})
t, y = ct.input_output_response(
cruise_tf, T, [vref, gear, theta_hill], X0, params={'m':m})
# Plot the velocity
plt.sca(vel_axes)
plt.plot(t, y[0])
# Plot the input
plt.sca(inp_axes)
plt.plot(t, y[1])
# Add labels to the plots
plt.sca(vel_axes)
plt.ylabel('Speed [m/s]')
plt.legend(['m = 1000 kg', 'm = 2000 kg', 'm = 3000 kg'], frameon=False)
plt.sca(inp_axes)
plt.ylabel('Throttle')
plt.xlabel('Time [s]')
# Figure 4.2: Torque curves for a typical car engine. The graph on the
# left shows the torque generated by the engine as a function of the
# angular velocity of the engine, while the curve on the right shows
# torque as a function of car speed for different gears.
plt.figure()
plt.suptitle('Torque curves for typical car engine')
# Figure 4.2a - single torque curve as function of omega
omega_range = np.linspace(0, 700, 701)
plt.subplot(2, 2, 1)
plt.plot(omega_range, [motor_torque(w) for w in omega_range])
plt.xlabel('Angular velocity $\omega$ [rad/s]')
plt.ylabel('Torque $T$ [Nm]')
plt.grid(True, linestyle='dotted')
# Figure 4.2b - torque curves in different gears, as function of velocity
plt.subplot(2, 2, 2)
v_range = np.linspace(0, 70, 71)
alpha = [40, 25, 16, 12, 10]
for gear in range(5):
omega_range = alpha[gear] * v_range
plt.plot(v_range, [motor_torque(w) for w in omega_range],
color='blue', linestyle='solid')
# Set up the axes and style
plt.axis([0, 70, 100, 200])
plt.grid(True, linestyle='dotted')
# Add labels
plt.text(11.5, 120, '$n$=1')
plt.text(24, 120, '$n$=2')
plt.text(42.5, 120, '$n$=3')
plt.text(58.5, 120, '$n$=4')
plt.text(58.5, 185, '$n$=5')
plt.xlabel('Velocity $v$ [m/s]')
plt.ylabel('Torque $T$ [Nm]')
plt.show(block=False)
# Figure 4.3: Car with cruise control encountering a sloping road
# PI controller model: control_pi()
#
# We add to this model a feedback controller that attempts to regulate the
# speed of the car in the presence of disturbances. We shall use a
# proportional-integral controller
def pi_update(t, x, u, params={}):
# Get the controller parameters that we need
ki = params.get('ki', 0.1)
kaw = params.get('kaw', 2) # anti-windup gain
# Assign variables for inputs and states (for readability)
v = u[0] # current velocity
vref = u[1] # reference velocity
z = x[0] # integrated error
# Compute the nominal controller output (needed for anti-windup)
u_a = pi_output(t, x, u, params)
# Compute anti-windup compensation (scale by ki to account for structure)
u_aw = kaw/ki * (np.clip(u_a, 0, 1) - u_a) if ki != 0 else 0
# State is the integrated error, minus anti-windup compensation
return (vref - v) + u_aw
def pi_output(t, x, u, params={}):
# Get the controller parameters that we need
kp = params.get('kp', 0.5)
ki = params.get('ki', 0.1)
# Assign variables for inputs and states (for readability)
v = u[0] # current velocity
vref = u[1] # reference velocity
z = x[0] # integrated error
# PI controller
return kp * (vref - v) + ki * z
control_pi = ct.NonlinearIOSystem(
pi_update, pi_output, name='control',
inputs = ['v', 'vref'], outputs = ['u'], states = ['z'],
params = {'kp':0.5, 'ki':0.1})
# Create the closed loop system
cruise_pi = ct.InterconnectedSystem(
(vehicle, control_pi), name='cruise',
connections=(
['vehicle.u', 'control.u'],
['control.v', 'vehicle.v']),
inplist=('control.vref', 'vehicle.gear', 'vehicle.theta'),
outlist=('control.u', 'vehicle.v'), outputs=['u', 'v'])
# Figure 4.3b shows the response of the closed loop system. The figure shows
# that even if the hill is so steep that the throttle changes from 0.17 to
# almost full throttle, the largest speed error is less than 1 m/s, and the
# desired velocity is recovered after 20 s.
# Define a function for creating a "standard" cruise control plot
def cruise_plot(sys, t, y, t_hill=5, vref=20, antiwindup=False,
linetype='b-', subplots=[None, None]):
# Figure out the plot bounds and indices
v_min = vref-1.2; v_max = vref+0.5; v_ind = sys.find_output('v')
u_min = 0; u_max = 2 if antiwindup else 1; u_ind = sys.find_output('u')
# Make sure the upper and lower bounds on v are OK
while max(y[v_ind]) > v_max: v_max += 1
while min(y[v_ind]) < v_min: v_min -= 1
# Create arrays for return values
subplot_axes = list(subplots)
# Velocity profile
if subplot_axes[0] is None:
subplot_axes[0] = plt.subplot(2, 1, 1)
else:
plt.sca(subplots[0])
plt.plot(t, y[v_ind], linetype)
plt.plot(t, vref*np.ones(t.shape), 'k-')
plt.plot([t_hill, t_hill], [v_min, v_max], 'k--')
plt.axis([0, t[-1], v_min, v_max])
plt.xlabel('Time $t$ [s]')
plt.ylabel('Velocity $v$ [m/s]')
# Commanded input profile
if subplot_axes[1] is None:
subplot_axes[1] = plt.subplot(2, 1, 2)
else:
plt.sca(subplots[1])
plt.plot(t, y[u_ind], 'r--' if antiwindup else linetype)
plt.plot([t_hill, t_hill], [u_min, u_max], 'k--')
plt.axis([0, t[-1], u_min, u_max])
plt.xlabel('Time $t$ [s]')
plt.ylabel('Throttle $u$')
# Applied input profile
if antiwindup:
# TODO: plot the actual signal from the process?
plt.plot(t, np.clip(y[u_ind], 0, 1), linetype)
plt.legend(['Commanded', 'Applied'], frameon=False)
return subplot_axes
# Define the time and input vectors
T = np.linspace(0, 30, 101)
vref = 20 * np.ones(T.shape)
gear = 4 * np.ones(T.shape)
theta0 = np.zeros(T.shape)
# Compute the equilibrium throttle setting for the desired speed (solve for x
# and u given the gear, slope, and desired output velocity)
X0, U0, Y0 = ct.find_eqpt(
cruise_pi, [vref[0], 0], [vref[0], gear[0], theta0[0]],
y0=[0, vref[0]], iu=[1, 2], iy=[1], return_y=True)
# Now simulate the effect of a hill at t = 5 seconds
plt.figure()
plt.suptitle('Car with cruise control encountering sloping road')
theta_hill = [
0 if t <= 5 else
4./180. * pi * (t-5) if t <= 6 else
4./180. * pi for t in T]
t, y = ct.input_output_response(cruise_pi, T, [vref, gear, theta_hill], X0)
cruise_plot(cruise_pi, t, y)
#
# Example 7.8: State space feedback with integral action
#
# State space controller model: control_sf_ia()
#
# Construct a state space controller with integral action, linearized around
# an equilibrium point. The controller is constructed around the equilibrium
# point (x_d, u_d) and includes both feedforward and feedback compensation.
#
# Controller inputs: (x, y, r) system states, system output, reference
# Controller state: z integrated error (y - r)
# Controller output: u state feedback control
#
# Note: to make the structure of the controller more clear, we implement this
# as a "nonlinear" input/output module, even though the actual input/output
# system is linear. This also allows the use of parameters to set the
# operating point and gains for the controller.
def sf_update(t, z, u, params={}):
y, r = u[1], u[2]
return y - r
def sf_output(t, z, u, params={}):
# Get the controller parameters that we need
K = params.get('K', 0)
ki = params.get('ki', 0)
kf = params.get('kf', 0)
xd = params.get('xd', 0)
yd = params.get('yd', 0)
ud = params.get('ud', 0)
# Get the system state and reference input
x, y, r = u[0], u[1], u[2]
return ud - K * (x - xd) - ki * z + kf * (r - yd)
# Create the input/output system for the controller
control_sf = ct.NonlinearIOSystem(
sf_update, sf_output, name='control',
inputs=('x', 'y', 'r'),
outputs=('u'),
states=('z'))
# Create the closed loop system for the state space controller
cruise_sf = ct.InterconnectedSystem(
(vehicle, control_sf), name='cruise',
connections=(
['vehicle.u', 'control.u'],
['control.x', 'vehicle.v'],
['control.y', 'vehicle.v']),
inplist=('control.r', 'vehicle.gear', 'vehicle.theta'),
outlist=('control.u', 'vehicle.v'), outputs=['u', 'v'])
# Compute the linearization of the dynamics around the equilibrium point
# Y0 represents the steady state with PI control => we can use it to
# identify the steady state velocity and required throttle setting.
xd = Y0[1]
ud = Y0[0]
yd = Y0[1]
# Compute the linearized system at the eq pt
cruise_linearized = ct.linearize(vehicle, xd, [ud, gear[0], 0])
# Construct the gain matrices for the system
A, B, C = cruise_linearized.A, cruise_linearized.B[0, 0], cruise_linearized.C
K = 0.5
kf = -1 / (C * np.linalg.inv(A - B * K) * B)
# Response of the system with no integral feedback term
plt.figure()
plt.suptitle('Cruise control with proportional and PI control')
theta_hill = [
0 if t <= 8 else
4./180. * pi * (t-8) if t <= 9 else
4./180. * pi for t in T]
t, y = ct.input_output_response(
cruise_sf, T, [vref, gear, theta_hill], [X0[0], 0],
params={'K':K, 'kf':kf, 'ki':0.0, 'kf':kf, 'xd':xd, 'ud':ud, 'yd':yd})
subplots = cruise_plot(cruise_sf, t, y, t_hill=8, linetype='b--')
# Response of the system with state feedback + integral action
t, y = ct.input_output_response(
cruise_sf, T, [vref, gear, theta_hill], [X0[0], 0],
params={'K':K, 'kf':kf, 'ki':0.1, 'kf':kf, 'xd':xd, 'ud':ud, 'yd':yd})
cruise_plot(cruise_sf, t, y, t_hill=8, linetype='b-', subplots=subplots)
# Add a legend
plt.legend(['Proportional', 'PI control'], frameon=False)
# Example 11.5: simulate the effect of a (steeper) hill at t = 5 seconds
#
# The windup effect occurs when a car encounters a hill that is so steep (6
# deg) that the throttle saturates when the cruise controller attempts to
# maintain speed.
plt.figure()
plt.suptitle('Cruise control with integrator windup')
T = np.linspace(0, 70, 101)
vref = 20 * np.ones(T.shape)
theta_hill = [
0 if t <= 5 else
6./180. * pi * (t-5) if t <= 6 else
6./180. * pi for t in T]
t, y = ct.input_output_response(
cruise_pi, T, [vref, gear, theta_hill], X0,
params={'kaw':0})
cruise_plot(cruise_pi, t, y, antiwindup=True)
# Example 11.6: add anti-windup compensation
#
# Anti-windup can be applied to the system to improve the response. Because of
# the feedback from the actuator model, the output of the integrator is
# quickly reset to a value such that the controller output is at the
# saturation limit.
plt.figure()
plt.suptitle('Cruise control with integrator anti-windup protection')
t, y = ct.input_output_response(
cruise_pi, T, [vref, gear, theta_hill], X0,
params={'kaw':2.})
cruise_plot(cruise_pi, t, y, antiwindup=True)
# If running as a standalone program, show plots and wait before closing
import os
if __name__ == '__main__' and 'PYCONTROL_TEST_EXAMPLES' not in os.environ:
plt.show()
else:
plt.show(block=False)
|
bsd-3-clause
|
ocefpaf/iris
|
docs/iris/example_code/General/lineplot_with_legend.py
|
2
|
1136
|
"""
Multi-line temperature profile plot
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""
import matplotlib.pyplot as plt
import iris
import iris.plot as iplt
import iris.quickplot as qplt
def main():
fname = iris.sample_data_path("air_temp.pp")
# Load exactly one cube from the given file.
temperature = iris.load_cube(fname)
# We only want a small number of latitudes, so filter some out
# using "extract".
temperature = temperature.extract(
iris.Constraint(latitude=lambda cell: 68 <= cell < 78)
)
for cube in temperature.slices("longitude"):
# Create a string label to identify this cube (i.e. latitude: value).
cube_label = "latitude: %s" % cube.coord("latitude").points[0]
# Plot the cube, and associate it with a label.
qplt.plot(cube, label=cube_label)
# Add the legend with 2 columns.
plt.legend(ncol=2)
# Put a grid on the plot.
plt.grid(True)
# Tell matplotlib not to extend the plot axes range to nicely
# rounded numbers.
plt.axis("tight")
# Finally, show it.
iplt.show()
if __name__ == "__main__":
main()
|
lgpl-3.0
|
StuartLittlefair/astroplan
|
astroplan/utils.py
|
1
|
11416
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# Standard library
import urllib.error
import warnings
# Third-party
import numpy as np
from astropy.utils.data import download_file, clear_download_cache
from astropy.utils import iers
from astropy.time import Time
import astropy.units as u
from astropy.utils.data import _get_download_cache_locs, CacheMissingWarning
from astropy.coordinates import EarthLocation
# Package
from .exceptions import OldEarthOrientationDataWarning
__all__ = ["get_IERS_A_or_workaround", "download_IERS_A",
"time_grid_from_range", "_set_mpl_style_sheet",
"stride_array"]
IERS_A_WARNING = ("For best precision (on the order of arcseconds), you must "
"download an up-to-date IERS Bulletin A table. To do so, run:"
"\n\n"
">>> from astroplan import download_IERS_A\n"
">>> download_IERS_A()\n")
BACKUP_Time_get_delta_ut1_utc = Time._get_delta_ut1_utc
def _low_precision_utc_to_ut1(self, jd1, jd2):
"""
When no IERS Bulletin A is available (no internet connection), use low
precision time conversion by assuming UT1-UTC=0 always.
This method mimics `~astropy.coordinates.builtin_frames.utils.get_dut1utc`
"""
try:
if self.mjd*u.day not in iers.IERS_Auto.open()['MJD']:
warnings.warn(IERS_A_WARNING, OldEarthOrientationDataWarning)
return self.delta_ut1_utc
except (AttributeError, ValueError):
warnings.warn(IERS_A_WARNING, OldEarthOrientationDataWarning)
return np.zeros(self.shape)
def get_IERS_A_or_workaround():
"""
Get the cached IERS Bulletin A table if one exists. If one does not exist,
monkey patch `~astropy.time.Time._get_delta_ut1_utc` so that
`~astropy.time.Time` objects don't raise errors by computing UT1-UTC off
the end of the IERS table.
"""
if IERS_A_in_cache():
iers.IERS.iers_table = _get_IERS_A_table()
else:
Time._get_delta_ut1_utc = _low_precision_utc_to_ut1
def IERS_A_in_cache():
"""
Check if the IERS Bulletin A table is locally cached.
"""
urls = (iers.conf.iers_auto_url, iers.conf.iers_auto_url_mirror)
for url_key in urls:
# The below code which accesses ``urlmapfn`` is stolen from
# astropy.utils.data.download_file()
try:
dldir, urlmapfn = _get_download_cache_locs()
except (IOError, OSError) as e:
msg = 'Remote data cache could not be accessed due to '
estr = '' if len(e.args) < 1 else (': ' + str(e))
warnings.warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
else:
with _open_shelve(urlmapfn, True) as url2hash:
# TODO: try to figure out how to test this in the unicode case
if str(url_key) in url2hash:
return True
return False
def _get_IERS_A_table(warn_update=14*u.day):
"""
Grab the locally cached copy of the IERS Bulletin A table. Check to see
if it's up to date, and warn the user if it is not.
This will fail and raise OSError if the file is not in the cache.
"""
if IERS_A_in_cache():
table = iers.IERS_Auto.open()
# Use polar motion flag to identify last observation before predictions
if 'PolPMFlag_A' in table.colnames:
index_of_last_observation = ''.join(table['PolPMFlag_A']).index('IP')
time_of_last_observation = Time(table['MJD'][index_of_last_observation],
format='mjd')
# If time of last observation is not available, set it equal to the
# final prediction in the table:
else:
time_of_last_observation = Time(table['MJD'].max(),
format='mjd')
time_since_last_update = Time.now() - time_of_last_observation
# If the IERS bulletin is more than `warn_update` days old, warn user
if warn_update < time_since_last_update:
warnmsg = ("Your version of the IERS Bulletin A is {:.1f} days "
"old. ".format(time_since_last_update.to(u.day).value) +
IERS_A_WARNING)
warnings.warn(warnmsg, OldEarthOrientationDataWarning)
return table
else:
raise OSError("No IERS A table has been downloaded.")
def download_IERS_A(show_progress=True):
"""
Download and cache the IERS Bulletin A table.
If one is already cached, download a new one and overwrite the old. Store
table in the astropy cache, and undo the monkey patching done by
`~astroplan.get_IERS_A_or_workaround`.
Parameters
----------
show_progress : bool
`True` shows a progress bar during the download.
"""
urls = (iers.conf.iers_auto_url, iers.conf.iers_auto_url_mirror)
if IERS_A_in_cache():
for url in urls:
clear_download_cache(url)
for i, url in enumerate(urls):
try:
local_iers_a_path = download_file(url, cache=True,
show_progress=show_progress)
except urllib.error.URLError:
if i == len(urls) - 1:
raise
# Undo monkey patch set up by get_IERS_A_or_workaround
iers.IERS.iers_table = iers.IERS_A.open(local_iers_a_path)
Time._get_delta_ut1_utc = BACKUP_Time_get_delta_ut1_utc
@u.quantity_input(time_resolution=u.hour)
def time_grid_from_range(time_range, time_resolution=0.5*u.hour):
"""
Get linearly-spaced sequence of times.
Parameters
----------
time_range : `~astropy.time.Time` (length = 2)
Lower and upper bounds on time sequence.
time_resolution : `~astropy.units.quantity` (optional)
Time-grid spacing
Returns
-------
times : `~astropy.time.Time`
Linearly-spaced sequence of times
"""
try:
start_time, end_time = time_range
except ValueError:
raise ValueError("time_range should have a length of 2: lower and "
"upper bounds on the time sequence.")
return Time(np.arange(start_time.jd, end_time.jd,
time_resolution.to(u.day).value), format='jd')
def _mock_remote_data():
"""
Apply mocks (i.e. monkey-patches) to avoid the need for internet access
for certain things.
This is currently called in `astroplan/conftest.py` when the tests are run
and the `--remote-data` option isn't used.
The way this setup works is that for functionality that usually requires
internet access, but has mocks in place, it is possible to write the test
without adding a `@remote_data` decorator, and `py.test` will do the right
thing when running the tests:
1. Access the internet and use the normal code if `--remote-data` is used
2. Not access the internet and use the mock code if `--remote-data` is not used
Both of these cases are tested on travis-ci.
"""
from .target import FixedTarget
from astropy.coordinates import EarthLocation
if not hasattr(FixedTarget, '_real_from_name'):
FixedTarget._real_from_name = FixedTarget.from_name
FixedTarget.from_name = FixedTarget._from_name_mock
if not hasattr(EarthLocation, '_real_of_site'):
EarthLocation._real_of_site = EarthLocation.of_site
EarthLocation.of_site = EarthLocation_mock.of_site_mock
# otherwise already mocked
def _unmock_remote_data():
"""
undo _mock_remote_data
currently unused
"""
from .target import FixedTarget
if hasattr(FixedTarget, '_real_from_name'):
FixedTarget.from_name = FixedTarget._real_from_name
del FixedTarget._real_from_name
if hasattr(EarthLocation, '_real_of_site'):
EarthLocation.of_site = EarthLocation._real_of_site
del EarthLocation._real_of_site
# otherwise assume it's already correct
def _set_mpl_style_sheet(style_sheet):
"""
Import matplotlib, set the style sheet to ``style_sheet`` using
the most backward compatible import pattern.
"""
import matplotlib
matplotlib.rcdefaults()
matplotlib.rcParams.update(style_sheet)
def stride_array(arr, window_width):
"""
Computes all possible sequential subarrays of arr with length = window_width
Parameters
----------
arr : array-like (length = n)
Linearly-spaced sequence
window_width : int
Number of elements in each new sub-array
Returns
-------
strided_arr : array (shape = (n-window_width, window_width))
Linearly-spaced sequence of times
"""
as_strided = np.lib.stride_tricks.as_strided
new_shape = (len(arr) - window_width + 1, window_width)
strided_arr = as_strided(arr, new_shape, (arr.strides[0], arr.strides[0]))
return strided_arr
class EarthLocation_mock(EarthLocation):
"""
Mock the EarthLocation class if no remote data for locations commonly
used in the tests.
"""
@classmethod
def of_site_mock(cls, string):
subaru = EarthLocation.from_geodetic(-155.4761111111111*u.deg,
19.825555555555564*u.deg,
4139*u.m)
lco = EarthLocation.from_geodetic(-70.70166666666665*u.deg,
-29.003333333333327*u.deg,
2282*u.m)
aao = EarthLocation.from_geodetic(149.06608611111113*u.deg,
-31.277038888888896*u.deg,
1164*u.m)
vbo = EarthLocation.from_geodetic(78.8266*u.deg,
12.576659999999999*u.deg,
725*u.m)
apo = EarthLocation.from_geodetic(-105.82*u.deg,
32.78*u.deg,
2798*u.m)
keck = EarthLocation.from_geodetic(-155.47833333333332*u.deg,
19.828333333333326*u.deg,
4160*u.m)
kpno = EarthLocation.from_geodetic(-111.6*u.deg,
31.963333333333342*u.deg,
2120*u.m)
lapalma = EarthLocation.from_geodetic(-17.879999*u.deg,
28.758333*u.deg,
2327*u.m)
observatories = dict(lco=lco, subaru=subaru, aao=aao, vbo=vbo, apo=apo,
keck=keck, kpno=kpno, lapalma=lapalma)
return observatories[string.lower()]
def _open_shelve(shelffn, withclosing=False):
"""
Opens a shelf file. If ``withclosing`` is True, it will be opened with
closing, allowing use like:
with _open_shelve('somefile',True) as s:
...
This workaround can be removed in favour of using shelve.open() directly
once support for Python <3.4 is dropped.
"""
import shelve
import contextlib
shelf = shelve.open(shelffn, protocol=2)
if withclosing:
return contextlib.closing(shelf)
else:
return shelf
|
bsd-3-clause
|
sketchytechky/zipline
|
zipline/assets/assets.py
|
4
|
23193
|
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
from functools import partial
from numbers import Integral
from operator import getitem, itemgetter
import warnings
from logbook import Logger
import numpy as np
import pandas as pd
from pandas.tseries.tools import normalize_date
from six import with_metaclass, string_types
import sqlalchemy as sa
from toolz import compose
from zipline.errors import (
MultipleSymbolsFound,
RootSymbolNotFound,
SidNotFound,
SymbolNotFound,
MapAssetIdentifierIndexError,
)
from zipline.assets import (
Asset, Equity, Future,
)
from zipline.assets.asset_writer import (
FUTURE_TABLE_FIELDS,
EQUITY_TABLE_FIELDS,
split_delimited_symbol,
)
log = Logger('assets.py')
# A set of fields that need to be converted to strings before building an
# Asset to avoid unicode fields
_asset_str_fields = frozenset({
'symbol',
'asset_name',
'exchange',
})
# A set of fields that need to be converted to timestamps in UTC
_asset_timestamp_fields = frozenset({
'start_date',
'end_date',
'first_traded'
'notice_date',
'expiration_date',
'auto_close_date',
})
def _convert_asset_str_fields(dict):
"""
Takes in a dict of Asset init args and converts from unicode to string
where applicable
"""
for key, value in dict.items():
if key in _asset_str_fields:
dict[key] = str(value)
def _convert_asset_timestamp_fields(dict):
"""
Takes in a dict of Asset init args and converts dates to pd.Timestamps
"""
for key, value in dict.items():
if (key in _asset_timestamp_fields) and (value is not None):
dict[key] = pd.Timestamp(value, tz='UTC')
class AssetFinder(object):
# Token used as a substitute for pickling objects that contain a
# reference to an AssetFinder
PERSISTENT_TOKEN = "<AssetFinder>"
def __init__(self, engine, allow_sid_assignment=True):
self.allow_sid_assignment = allow_sid_assignment
self.engine = engine
metadata = sa.MetaData(bind=engine)
self.equities = equities = sa.Table(
'equities',
metadata,
autoload=True,
autoload_with=engine,
)
self.futures_exchanges = sa.Table(
'futures_exchanges',
metadata,
autoload=True,
autoload_with=engine,
)
self.futures_root_symbols = sa.Table(
'futures_root_symbols',
metadata,
autoload=True,
autoload_with=engine,
)
self.futures_contracts = futures_contracts = sa.Table(
'futures_contracts',
metadata,
autoload=True,
autoload_with=engine,
)
self.asset_router = sa.Table(
'asset_router',
metadata,
autoload=True,
autoload_with=engine,
)
# Create the equity and future queries once.
_equity_sid = equities.c.sid
_equity_by_sid = sa.select(
tuple(map(partial(getitem, equities.c), EQUITY_TABLE_FIELDS)),
)
def select_equity_by_sid(sid):
return _equity_by_sid.where(_equity_sid == int(sid))
self.select_equity_by_sid = select_equity_by_sid
_future_sid = futures_contracts.c.sid
_future_by_sid = sa.select(
tuple(map(
partial(getitem, futures_contracts.c),
FUTURE_TABLE_FIELDS,
)),
)
def select_future_by_sid(sid):
return _future_by_sid.where(_future_sid == int(sid))
self.select_future_by_sid = select_future_by_sid
# Cache for lookup of assets by sid, the objects in the asset lookp may
# be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset, _retrieve_equity etc. will populate the cache on
# first retrieval.
self._asset_cache = {}
self._equity_cache = {}
self._future_cache = {}
self._asset_type_cache = {}
# Populated on first call to `lifetimes`.
self._asset_lifetimes = None
def asset_type_by_sid(self, sid):
"""
Retrieve the asset type of a given sid.
"""
try:
return self._asset_type_cache[sid]
except KeyError:
pass
asset_type = sa.select((self.asset_router.c.asset_type,)).where(
self.asset_router.c.sid == int(sid),
).scalar()
if asset_type is not None:
self._asset_type_cache[sid] = asset_type
return asset_type
def retrieve_asset(self, sid, default_none=False):
"""
Retrieve the Asset object of a given sid.
"""
if isinstance(sid, Asset):
return sid
try:
asset = self._asset_cache[sid]
except KeyError:
asset_type = self.asset_type_by_sid(sid)
if asset_type == 'equity':
asset = self._retrieve_equity(sid)
elif asset_type == 'future':
asset = self._retrieve_futures_contract(sid)
else:
asset = None
# Cache the asset if it has been retrieved
if asset is not None:
self._asset_cache[sid] = asset
if asset is not None:
return asset
elif default_none:
return None
else:
raise SidNotFound(sid=sid)
def retrieve_all(self, sids, default_none=False):
return [self.retrieve_asset(sid) for sid in sids]
def _retrieve_equity(self, sid):
"""
Retrieve the Equity object of a given sid.
"""
try:
return self._equity_cache[sid]
except KeyError:
pass
data = self.select_equity_by_sid(sid).execute().fetchone()
# Convert 'data' from a RowProxy object to a dict, to allow assignment
data = dict(data.items())
if data:
_convert_asset_str_fields(data)
_convert_asset_timestamp_fields(data)
equity = Equity(**data)
else:
equity = None
self._equity_cache[sid] = equity
return equity
def _retrieve_futures_contract(self, sid):
"""
Retrieve the Future object of a given sid.
"""
try:
return self._future_cache[sid]
except KeyError:
pass
data = self.select_future_by_sid(sid).execute().fetchone()
# Convert 'data' from a RowProxy object to a dict, to allow assignment
data = dict(data.items())
if data:
_convert_asset_str_fields(data)
_convert_asset_timestamp_fields(data)
future = Future(**data)
else:
future = None
self._future_cache[sid] = future
return future
def lookup_symbol(self, symbol, as_of_date, fuzzy=False):
"""
Return matching Equity of name symbol in database.
If multiple Equities are found and as_of_date is not set,
raises MultipleSymbolsFound.
If no Equity was active at as_of_date raises SymbolNotFound.
"""
# Format inputs
symbol = symbol.upper()
if as_of_date is not None:
as_of_date = pd.Timestamp(normalize_date(as_of_date))
company_symbol, share_class_symbol, fuzzy_symbol = \
split_delimited_symbol(symbol)
equities_cols = self.equities.c
if as_of_date:
ad_value = as_of_date.value
if fuzzy:
# Search for a single exact match on the fuzzy column
fuzzy_candidates = sa.select((equities_cols.sid,)).where(
(equities_cols.fuzzy_symbol == fuzzy_symbol) &
(equities_cols.start_date <= ad_value) &
(equities_cols.end_date >= ad_value),
).execute().fetchall()
# If exactly one SID exists for fuzzy_symbol, return that sid
if len(fuzzy_candidates) == 1:
return self._retrieve_equity(fuzzy_candidates[0]['sid'])
# Search for exact matches of the split-up company_symbol and
# share_class_symbol
candidates = sa.select((equities_cols.sid,)).where(
(equities_cols.company_symbol == company_symbol) &
(equities_cols.share_class_symbol == share_class_symbol) &
(equities_cols.start_date <= ad_value) &
(equities_cols.end_date >= ad_value),
).execute().fetchall()
# If exactly one SID exists for symbol, return that symbol
if len(candidates) == 1:
return self._retrieve_equity(candidates[0]['sid'])
# If no SID exists for symbol, return SID with the
# highest-but-not-over end_date
elif not candidates:
sid = sa.select((equities_cols.sid,)).where(
(equities_cols.company_symbol == company_symbol) &
(equities_cols.share_class_symbol == share_class_symbol) &
(equities_cols.start_date <= ad_value),
).order_by(
equities_cols.end_date.desc(),
).scalar()
if sid is not None:
return self._retrieve_equity(sid)
# If multiple SIDs exist for symbol, return latest start_date with
# end_date as a tie-breaker
elif len(candidates) > 1:
sid = sa.select((equities_cols.sid,)).where(
(equities_cols.company_symbol == company_symbol) &
(equities_cols.share_class_symbol == share_class_symbol) &
(equities_cols.start_date <= ad_value),
).order_by(
equities_cols.start_date.desc(),
equities_cols.end_date.desc(),
).scalar()
if sid is not None:
return self._retrieve_equity(sid)
raise SymbolNotFound(symbol=symbol)
else:
# If this is a fuzzy look-up, check if there is exactly one match
# for the fuzzy symbol
if fuzzy:
fuzzy_sids = sa.select((equities_cols.sid,)).where(
(equities_cols.fuzzy_symbol == fuzzy_symbol)
).execute().fetchall()
if len(fuzzy_sids) == 1:
return self._retrieve_equity(fuzzy_sids[0]['sid'])
sids = sa.select((equities_cols.sid,)).where(
(equities_cols.company_symbol == company_symbol) &
(equities_cols.share_class_symbol == share_class_symbol)
).execute().fetchall()
if len(sids) == 1:
return self._retrieve_equity(sids[0]['sid'])
elif not sids:
raise SymbolNotFound(symbol=symbol)
else:
raise MultipleSymbolsFound(
symbol=symbol,
options=list(map(
compose(self._retrieve_equity, itemgetter('sid')),
sids,
))
)
def lookup_future_chain(self, root_symbol, as_of_date, knowledge_date):
""" Return the futures chain for a given root symbol.
Parameters
----------
root_symbol : str
Root symbol of the desired future.
as_of_date : pd.Timestamp or pd.NaT
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date is first after this
date is the primary contract, etc. If NaT is given, the
chain is unbounded, and all contracts for this root symbol
are returned.
knowledge_date : pd.Timestamp or pd.NaT
Date for determining which contracts exist for inclusion in
this chain. Contracts exist only if they have a start_date
on or before this date. If NaT is given and as_of_date is
is not NaT, the value of as_of_date is used for
knowledge_date.
Returns
-------
list
A list of Future objects, the chain for the given
parameters.
Raises
------
RootSymbolNotFound
Raised when a future chain could not be found for the given
root symbol.
"""
fc_cols = self.futures_contracts.c
if as_of_date is pd.NaT:
# If the as_of_date is NaT, get all contracts for this
# root symbol.
sids = list(map(
itemgetter('sid'),
sa.select((fc_cols.sid,)).where(
(fc_cols.root_symbol == root_symbol),
).order_by(
fc_cols.notice_date.asc(),
).execute().fetchall()))
else:
as_of_date = as_of_date.value
if knowledge_date is pd.NaT:
# If knowledge_date is NaT, default to using as_of_date
knowledge_date = as_of_date
else:
knowledge_date = knowledge_date.value
sids = list(map(
itemgetter('sid'),
sa.select((fc_cols.sid,)).where(
(fc_cols.root_symbol == root_symbol) &
(fc_cols.notice_date > as_of_date) &
(fc_cols.start_date <= knowledge_date),
).order_by(
fc_cols.notice_date.asc(),
).execute().fetchall()
))
if not sids:
# Check if root symbol exists.
count = sa.select((sa.func.count(fc_cols.sid),)).where(
fc_cols.root_symbol == root_symbol,
).scalar()
if count == 0:
raise RootSymbolNotFound(root_symbol=root_symbol)
return list(map(self._retrieve_futures_contract, sids))
@property
def sids(self):
return tuple(map(
itemgetter('sid'),
sa.select((self.asset_router.c.sid,)).execute().fetchall(),
))
def _lookup_generic_scalar(self,
asset_convertible,
as_of_date,
matches,
missing):
"""
Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing.
"""
if isinstance(asset_convertible, Asset):
matches.append(asset_convertible)
elif isinstance(asset_convertible, Integral):
try:
result = self.retrieve_asset(int(asset_convertible))
except SidNotFound:
missing.append(asset_convertible)
return None
matches.append(result)
elif isinstance(asset_convertible, string_types):
try:
matches.append(
self.lookup_symbol(asset_convertible, as_of_date)
)
except SymbolNotFound:
missing.append(asset_convertible)
return None
else:
raise NotAssetConvertible(
"Input was %s, not AssetConvertible."
% asset_convertible
)
def lookup_generic(self,
asset_convertible_or_iterable,
as_of_date):
"""
Convert a AssetConvertible or iterable of AssetConvertibles into
a list of Asset objects.
This method exists primarily as a convenience for implementing
user-facing APIs that can handle multiple kinds of input. It should
not be used for internal code where we already know the expected types
of our inputs.
Returns a pair of objects, the first of which is the result of the
conversion, and the second of which is a list containing any values
that couldn't be resolved.
"""
matches = []
missing = []
# Interpret input as scalar.
if isinstance(asset_convertible_or_iterable, AssetConvertible):
self._lookup_generic_scalar(
asset_convertible=asset_convertible_or_iterable,
as_of_date=as_of_date,
matches=matches,
missing=missing,
)
try:
return matches[0], missing
except IndexError:
if hasattr(asset_convertible_or_iterable, '__int__'):
raise SidNotFound(sid=asset_convertible_or_iterable)
else:
raise SymbolNotFound(symbol=asset_convertible_or_iterable)
# Interpret input as iterable.
try:
iterator = iter(asset_convertible_or_iterable)
except TypeError:
raise NotAssetConvertible(
"Input was not a AssetConvertible "
"or iterable of AssetConvertible."
)
for obj in iterator:
self._lookup_generic_scalar(obj, as_of_date, matches, missing)
return matches, missing
def map_identifier_index_to_sids(self, index, as_of_date):
"""
This method is for use in sanitizing a user's DataFrame or Panel
inputs.
Takes the given index of identifiers, checks their types, builds assets
if necessary, and returns a list of the sids that correspond to the
input index.
Parameters
----------
index : Iterable
An iterable containing ints, strings, or Assets
as_of_date : pandas.Timestamp
A date to be used to resolve any dual-mapped symbols
Returns
-------
List
A list of integer sids corresponding to the input index
"""
# This method assumes that the type of the objects in the index is
# consistent and can, therefore, be taken from the first identifier
first_identifier = index[0]
# Ensure that input is AssetConvertible (integer, string, or Asset)
if not isinstance(first_identifier, AssetConvertible):
raise MapAssetIdentifierIndexError(obj=first_identifier)
# If sids are provided, no mapping is necessary
if isinstance(first_identifier, Integral):
return index
# Look up all Assets for mapping
matches = []
missing = []
for identifier in index:
self._lookup_generic_scalar(identifier, as_of_date,
matches, missing)
# Handle missing assets
if len(missing) > 0:
warnings.warn("Missing assets for identifiers: " + missing)
# Return a list of the sids of the found assets
return [asset.sid for asset in matches]
def _compute_asset_lifetimes(self):
"""
Compute and cache a recarry of asset lifetimes.
"""
equities_cols = self.equities.c
buf = np.array(
tuple(
sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.end_date,
)).execute(),
), dtype='<f8', # use doubles so we get NaNs
)
lifetimes = np.recarray(
buf=buf,
shape=(len(buf),),
dtype=[
('sid', '<f8'),
('start', '<f8'),
('end', '<f8')
],
)
start = lifetimes.start
end = lifetimes.end
start[np.isnan(start)] = 0 # convert missing starts to 0
end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX
# Cast the results back down to int.
return lifetimes.astype([
('sid', '<i8'),
('start', '<i8'),
('end', '<i8'),
])
def lifetimes(self, dates, include_start_date):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.modelling.engine.SimpleFFCEngine._compute_root_mask
"""
# This is a less than ideal place to do this, because if someone adds
# assets to the finder after we've touched lifetimes we won't have
# those new assets available. Mutability is not my favorite
# programming feature.
if self._asset_lifetimes is None:
self._asset_lifetimes = self._compute_asset_lifetimes()
lifetimes = self._asset_lifetimes
raw_dates = dates.asi8[:, None]
if include_start_date:
mask = lifetimes.start <= raw_dates
else:
mask = lifetimes.start < raw_dates
mask &= (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Integral)
AssetConvertible.register(Asset)
# Use six.string_types for Python2/3 compatibility
for _type in string_types:
AssetConvertible.register(_type)
class NotAssetConvertible(ValueError):
pass
|
apache-2.0
|
grhawk/ASE
|
tools/doc/ase/phonons.py
|
2
|
2330
|
# creates: Al_phonon.png Al_mode.gif Al_mode.pdf
from ase.lattice import bulk
from ase.calculators.emt import EMT
from ase.dft.kpoints import ibz_points, get_bandpath
from ase.phonons import Phonons
# Setup crystal and EMT calculator
atoms = bulk('Al', a=4.05)
calc = EMT()
# Phonon calculator
N = 6
ph = Phonons(atoms, calc, supercell=(N, N, N))
ph.run()
# Read forces and assemble the dynamical matrix
ph.read(acoustic=True)
# High-symmetry points in the Brillouin zone
points = ibz_points['fcc']
G = points['Gamma']
X = points['X']
W = points['W']
K = points['K']
L = points['L']
U = points['U']
point_names = ['$\Gamma$', 'X', 'U', 'L', '$\Gamma$', 'K']
path = [G, X, U, L, G, K]
path_kc, q, Q = get_bandpath(path, atoms.cell, 100)
omega_kn = 1000 * ph.band_structure(path_kc)
# DOS
omega_e, dos_e = ph.dos(kpts=(50, 50, 50), npts=5000, delta=1e-4)
omega_e *= 1000
# Plot phonon dispersion
import matplotlib
#matplotlib.use('Agg')
import pylab as plt
plt.figure(1, (8, 6))
plt.axes([.1, .07, .67, .85])
for n in range(len(omega_kn[0])):
omega_n = omega_kn[:, n]
plt.plot(q, omega_n, 'k-', lw=2)
plt.xticks(Q, point_names, fontsize=18)
plt.yticks(fontsize=18)
plt.xlim(q[0], q[-1])
plt.ylim(0, 35)
plt.ylabel("Frequency ($\mathrm{meV}$)", fontsize=22)
plt.grid('on')
plt.axes([.8, .07, .17, .85])
plt.fill_between(dos_e, omega_e, y2=0, color='lightgrey', edgecolor='k', lw=2)
plt.ylim(0, 35)
plt.xticks([], [])
plt.yticks([], [])
plt.xlabel("DOS", fontsize=18)
plt.savefig('Al_phonon.png')
# Write modes for specific q-vector to trajectory files
ph.write_modes([l/2 for l in L], branches=[2], repeat=(8, 8, 8), kT=3e-4,
center=True)
# Generate png animation
from subprocess import call
from ase.io import PickleTrajectory, write
trajfile = 'phonon.mode.2.traj'
trajectory = PickleTrajectory(trajfile, 'r')
for i, atoms in enumerate(trajectory):
write('picture%02i.png' %i, atoms, show_unit_cell=2,
rotation='-36x,26.5y,-25z')
# Flatten images for better quality
call(['convert', '-flatten', 'picture%02i.png' %i, 'picture%02i.png' %i])
# Make static pdf image for pdflatex
call(['convert', 'picture00.png', 'Al_mode.pdf'])
# Concatenate to gif animation
call(['convert', '-delay', '5', '-loop', '0', '-dispose', 'Previous', 'picture*.png',
'Al_mode.gif'])
|
gpl-2.0
|
ssaeger/scikit-learn
|
examples/applications/plot_model_complexity_influence.py
|
323
|
6372
|
"""
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
|
bsd-3-clause
|
jreback/pandas
|
pandas/tests/tools/test_to_datetime.py
|
2
|
93371
|
""" test to_datetime """
import calendar
from collections import deque
from datetime import datetime, timedelta
import locale
from dateutil.parser import parse
from dateutil.tz.tz import tzoffset
import numpy as np
import pytest
import pytz
from pandas._libs import tslib
from pandas._libs.tslibs import iNaT, parsing
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_datetime64_ns_dtype
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
NaT,
Series,
Timestamp,
date_range,
isna,
to_datetime,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray
from pandas.core.tools import datetimes as tools
class TestTimeConversionFormats:
@pytest.mark.parametrize("readonly", [True, False])
def test_to_datetime_readonly(self, readonly):
# GH#34857
arr = np.array([], dtype=object)
if readonly:
arr.setflags(write=False)
result = to_datetime(arr)
expected = to_datetime([])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format(self, cache):
values = ["1/1/2000", "1/2/2000", "1/3/2000"]
results1 = [Timestamp("20000101"), Timestamp("20000201"), Timestamp("20000301")]
results2 = [Timestamp("20000101"), Timestamp("20000102"), Timestamp("20000103")]
for vals, expecteds in [
(values, (Index(results1), Index(results2))),
(Series(values), (Series(results1), Series(results2))),
(values[0], (results1[0], results2[0])),
(values[1], (results1[1], results2[1])),
(values[2], (results1[2], results2[2])),
]:
for i, fmt in enumerate(["%d/%m/%Y", "%m/%d/%Y"]):
result = to_datetime(vals, format=fmt, cache=cache)
expected = expecteds[i]
if isinstance(expected, Series):
tm.assert_series_equal(result, Series(expected))
elif isinstance(expected, Timestamp):
assert result == expected
else:
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_YYYYMMDD(self, cache):
s = Series([19801222, 19801222] + [19810105] * 5)
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
result = to_datetime(s.apply(str), format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
# with NaT
expected = Series(
[Timestamp("19801222"), Timestamp("19801222")] + [Timestamp("19810105")] * 5
)
expected[2] = np.nan
s[2] = np.nan
result = to_datetime(s, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
# string with NaT
s = s.apply(str)
s[2] = "nat"
result = to_datetime(s, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
# coercion
# GH 7930
s = Series([20121231, 20141231, 99991231])
result = pd.to_datetime(s, format="%Y%m%d", errors="ignore", cache=cache)
expected = Series(
[datetime(2012, 12, 31), datetime(2014, 12, 31), datetime(9999, 12, 31)],
dtype=object,
)
tm.assert_series_equal(result, expected)
result = pd.to_datetime(s, format="%Y%m%d", errors="coerce", cache=cache)
expected = Series(["20121231", "20141231", "NaT"], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"input_s",
[
# Null values with Strings
["19801222", "20010112", None],
["19801222", "20010112", np.nan],
["19801222", "20010112", pd.NaT],
["19801222", "20010112", "NaT"],
# Null values with Integers
[19801222, 20010112, None],
[19801222, 20010112, np.nan],
[19801222, 20010112, pd.NaT],
[19801222, 20010112, "NaT"],
],
)
def test_to_datetime_format_YYYYMMDD_with_none(self, input_s):
# GH 30011
# format='%Y%m%d'
# with None
expected = Series([Timestamp("19801222"), Timestamp("20010112"), pd.NaT])
result = Series(pd.to_datetime(input_s, format="%Y%m%d"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"input_s, expected",
[
# NaN before strings with invalid date values
[
Series(["19801222", np.nan, "20010012", "10019999"]),
Series([Timestamp("19801222"), np.nan, np.nan, np.nan]),
],
# NaN after strings with invalid date values
[
Series(["19801222", "20010012", "10019999", np.nan]),
Series([Timestamp("19801222"), np.nan, np.nan, np.nan]),
],
# NaN before integers with invalid date values
[
Series([20190813, np.nan, 20010012, 20019999]),
Series([Timestamp("20190813"), np.nan, np.nan, np.nan]),
],
# NaN after integers with invalid date values
[
Series([20190813, 20010012, np.nan, 20019999]),
Series([Timestamp("20190813"), np.nan, np.nan, np.nan]),
],
],
)
def test_to_datetime_format_YYYYMMDD_overflow(self, input_s, expected):
# GH 25512
# format='%Y%m%d', errors='coerce'
result = pd.to_datetime(input_s, format="%Y%m%d", errors="coerce")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_integer(self, cache):
# GH 10178
s = Series([2000, 2001, 2002])
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format="%Y", cache=cache)
tm.assert_series_equal(result, expected)
s = Series([200001, 200105, 200206])
expected = Series([Timestamp(x[:4] + "-" + x[4:]) for x in s.apply(str)])
result = to_datetime(s, format="%Y%m", cache=cache)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"int_date, expected",
[
# valid date, length == 8
[20121030, datetime(2012, 10, 30)],
# short valid date, length == 6
[199934, datetime(1999, 3, 4)],
# long integer date partially parsed to datetime(2012,1,1), length > 8
[2012010101, 2012010101],
# invalid date partially parsed to datetime(2012,9,9), length == 8
[20129930, 20129930],
# short integer date partially parsed to datetime(2012,9,9), length < 8
[2012993, 2012993],
# short invalid date, length == 4
[2121, 2121],
],
)
def test_int_to_datetime_format_YYYYMMDD_typeerror(self, int_date, expected):
# GH 26583
result = to_datetime(int_date, format="%Y%m%d", errors="ignore")
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_microsecond(self, cache):
# these are locale dependent
lang, _ = locale.getlocale()
month_abbr = calendar.month_abbr[4]
val = f"01-{month_abbr}-2011 00:00:01.978"
format = "%d-%b-%Y %H:%M:%S.%f"
result = to_datetime(val, format=format, cache=cache)
exp = datetime.strptime(val, format)
assert result == exp
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_time(self, cache):
data = [
["01/10/2010 15:20", "%m/%d/%Y %H:%M", Timestamp("2010-01-10 15:20")],
["01/10/2010 05:43", "%m/%d/%Y %I:%M", Timestamp("2010-01-10 05:43")],
[
"01/10/2010 13:56:01",
"%m/%d/%Y %H:%M:%S",
Timestamp("2010-01-10 13:56:01"),
] # ,
# ['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 20:14')],
# ['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 07:40')],
# ['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p',
# Timestamp('2010-01-10 09:12:56')]
]
for s, format, dt in data:
assert to_datetime(s, format=format, cache=cache) == dt
@td.skip_if_has_locale
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_with_non_exact(self, cache):
# GH 10834
# 8904
# exact kw
s = Series(
["19MAY11", "foobar19MAY11", "19MAY11:00:00:00", "19MAY11 00:00:00Z"]
)
result = to_datetime(s, format="%d%b%y", exact=False, cache=cache)
expected = to_datetime(
s.str.extract(r"(\d+\w+\d+)", expand=False), format="%d%b%y", cache=cache
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_parse_nanoseconds_with_formula(self, cache):
# GH8989
# truncating the nanoseconds when a format was provided
for v in [
"2012-01-01 09:00:00.000000001",
"2012-01-01 09:00:00.000001",
"2012-01-01 09:00:00.001",
"2012-01-01 09:00:00.001000",
"2012-01-01 09:00:00.001000000",
]:
expected = pd.to_datetime(v, cache=cache)
result = pd.to_datetime(v, format="%Y-%m-%d %H:%M:%S.%f", cache=cache)
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_weeks(self, cache):
data = [
["2009324", "%Y%W%w", Timestamp("2009-08-13")],
["2013020", "%Y%U%w", Timestamp("2013-01-13")],
]
for s, format, dt in data:
assert to_datetime(s, format=format, cache=cache) == dt
@pytest.mark.parametrize(
"fmt,dates,expected_dates",
[
[
"%Y-%m-%d %H:%M:%S %Z",
["2010-01-01 12:00:00 UTC"] * 2,
[Timestamp("2010-01-01 12:00:00", tz="UTC")] * 2,
],
[
"%Y-%m-%d %H:%M:%S %Z",
[
"2010-01-01 12:00:00 UTC",
"2010-01-01 12:00:00 GMT",
"2010-01-01 12:00:00 US/Pacific",
],
[
Timestamp("2010-01-01 12:00:00", tz="UTC"),
Timestamp("2010-01-01 12:00:00", tz="GMT"),
Timestamp("2010-01-01 12:00:00", tz="US/Pacific"),
],
],
[
"%Y-%m-%d %H:%M:%S%z",
["2010-01-01 12:00:00+0100"] * 2,
[Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60))] * 2,
],
[
"%Y-%m-%d %H:%M:%S %z",
["2010-01-01 12:00:00 +0100"] * 2,
[Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60))] * 2,
],
[
"%Y-%m-%d %H:%M:%S %z",
["2010-01-01 12:00:00 +0100", "2010-01-01 12:00:00 -0100"],
[
Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60)),
Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(-60)),
],
],
[
"%Y-%m-%d %H:%M:%S %z",
["2010-01-01 12:00:00 Z", "2010-01-01 12:00:00 Z"],
[
Timestamp(
"2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0)
), # pytz coerces to UTC
Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0)),
],
],
],
)
def test_to_datetime_parse_tzname_or_tzoffset(self, fmt, dates, expected_dates):
# GH 13486
result = pd.to_datetime(dates, format=fmt)
expected = Index(expected_dates)
tm.assert_equal(result, expected)
def test_to_datetime_parse_tzname_or_tzoffset_different_tz_to_utc(self):
# GH 32792
dates = [
"2010-01-01 12:00:00 +0100",
"2010-01-01 12:00:00 -0100",
"2010-01-01 12:00:00 +0300",
"2010-01-01 12:00:00 +0400",
]
expected_dates = [
"2010-01-01 11:00:00+00:00",
"2010-01-01 13:00:00+00:00",
"2010-01-01 09:00:00+00:00",
"2010-01-01 08:00:00+00:00",
]
fmt = "%Y-%m-%d %H:%M:%S %z"
result = pd.to_datetime(dates, format=fmt, utc=True)
expected = DatetimeIndex(expected_dates)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"offset", ["+0", "-1foo", "UTCbar", ":10", "+01:000:01", ""]
)
def test_to_datetime_parse_timezone_malformed(self, offset):
fmt = "%Y-%m-%d %H:%M:%S %z"
date = "2010-01-01 12:00:00 " + offset
msg = "does not match format|unconverted data remains"
with pytest.raises(ValueError, match=msg):
pd.to_datetime([date], format=fmt)
def test_to_datetime_parse_timezone_keeps_name(self):
# GH 21697
fmt = "%Y-%m-%d %H:%M:%S %z"
arg = Index(["2010-01-01 12:00:00 Z"], name="foo")
result = pd.to_datetime(arg, format=fmt)
expected = DatetimeIndex(["2010-01-01 12:00:00"], tz="UTC", name="foo")
tm.assert_index_equal(result, expected)
class TestToDatetime:
@pytest.mark.parametrize(
"s, _format, dt",
[
["2015-1-1", "%G-%V-%u", datetime(2014, 12, 29, 0, 0)],
["2015-1-4", "%G-%V-%u", datetime(2015, 1, 1, 0, 0)],
["2015-1-7", "%G-%V-%u", datetime(2015, 1, 4, 0, 0)],
],
)
def test_to_datetime_iso_week_year_format(self, s, _format, dt):
# See GH#16607
assert to_datetime(s, format=_format) == dt
@pytest.mark.parametrize(
"msg, s, _format",
[
[
"ISO week directive '%V' must be used with the ISO year directive "
"'%G' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 50",
"%Y %V",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 51",
"%G %V",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 Monday",
"%G %A",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 Mon",
"%G %a",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 6",
"%G %w",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 6",
"%G %u",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"2051",
"%G",
],
[
"Day of the year directive '%j' is not compatible with ISO year "
"directive '%G'. Use '%Y' instead.",
"1999 51 6 256",
"%G %V %u %j",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 Sunday",
"%Y %V %A",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 Sun",
"%Y %V %a",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 1",
"%Y %V %w",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 1",
"%Y %V %u",
],
[
"ISO week directive '%V' must be used with the ISO year directive "
"'%G' and a weekday directive '%A', '%a', '%w', or '%u'.",
"20",
"%V",
],
],
)
def test_error_iso_week_year(self, msg, s, _format):
# See GH#16607
# This test checks for errors thrown when giving the wrong format
# However, as discussed on PR#25541, overriding the locale
# causes a different error to be thrown due to the format being
# locale specific, but the test data is in english.
# Therefore, the tests only run when locale is not overwritten,
# as a sort of solution to this problem.
if locale.getlocale() != ("zh_CN", "UTF-8") and locale.getlocale() != (
"it_IT",
"UTF-8",
):
with pytest.raises(ValueError, match=msg):
to_datetime(s, format=_format)
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_to_datetime_dtarr(self, tz):
# DatetimeArray
dti = date_range("1965-04-03", periods=19, freq="2W", tz=tz)
arr = DatetimeArray(dti)
result = to_datetime(arr)
assert result is arr
result = to_datetime(arr)
assert result is arr
def test_to_datetime_pydatetime(self):
actual = pd.to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
def test_to_datetime_YYYYMMDD(self):
actual = pd.to_datetime("20080115")
assert actual == datetime(2008, 1, 15)
def test_to_datetime_unparseable_ignore(self):
# unparseable
s = "Month 1, 1999"
assert pd.to_datetime(s, errors="ignore") == s
@td.skip_if_windows # `tm.set_timezone` does not work in windows
def test_to_datetime_now(self):
# See GH#18666
with tm.set_timezone("US/Eastern"):
npnow = np.datetime64("now").astype("datetime64[ns]")
pdnow = pd.to_datetime("now")
pdnow2 = pd.to_datetime(["now"])[0]
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdnow.value - npnow.astype(np.int64)) < 1e10
assert abs(pdnow2.value - npnow.astype(np.int64)) < 1e10
assert pdnow.tzinfo is None
assert pdnow2.tzinfo is None
@td.skip_if_windows # `tm.set_timezone` does not work in windows
def test_to_datetime_today(self):
# See GH#18666
# Test with one timezone far ahead of UTC and another far behind, so
# one of these will _almost_ always be in a different day from UTC.
# Unfortunately this test between 12 and 1 AM Samoa time
# this both of these timezones _and_ UTC will all be in the same day,
# so this test will not detect the regression introduced in #18666.
with tm.set_timezone("Pacific/Auckland"): # 12-13 hours ahead of UTC
nptoday = np.datetime64("today").astype("datetime64[ns]").astype(np.int64)
pdtoday = pd.to_datetime("today")
pdtoday2 = pd.to_datetime(["today"])[0]
tstoday = Timestamp("today")
tstoday2 = Timestamp.today()
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdtoday.normalize().value - nptoday) < 1e10
assert abs(pdtoday2.normalize().value - nptoday) < 1e10
assert abs(pdtoday.value - tstoday.value) < 1e10
assert abs(pdtoday.value - tstoday2.value) < 1e10
assert pdtoday.tzinfo is None
assert pdtoday2.tzinfo is None
with tm.set_timezone("US/Samoa"): # 11 hours behind UTC
nptoday = np.datetime64("today").astype("datetime64[ns]").astype(np.int64)
pdtoday = pd.to_datetime("today")
pdtoday2 = pd.to_datetime(["today"])[0]
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdtoday.normalize().value - nptoday) < 1e10
assert abs(pdtoday2.normalize().value - nptoday) < 1e10
assert pdtoday.tzinfo is None
assert pdtoday2.tzinfo is None
def test_to_datetime_today_now_unicode_bytes(self):
to_datetime(["now"])
to_datetime(["today"])
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_dt64s(self, cache):
in_bound_dts = [np.datetime64("2000-01-01"), np.datetime64("2000-01-02")]
for dt in in_bound_dts:
assert pd.to_datetime(dt, cache=cache) == Timestamp(dt)
@pytest.mark.parametrize(
"dt", [np.datetime64("1000-01-01"), np.datetime64("5000-01-02")]
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_dt64s_out_of_bounds(self, cache, dt):
msg = f"Out of bounds nanosecond timestamp: {dt}"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.to_datetime(dt, errors="raise")
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp(dt)
assert pd.to_datetime(dt, errors="coerce", cache=cache) is NaT
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize("unit", ["s", "D"])
def test_to_datetime_array_of_dt64s(self, cache, unit):
# https://github.com/pandas-dev/pandas/issues/31491
# Need at least 50 to ensure cache is used.
dts = [
np.datetime64("2000-01-01", unit),
np.datetime64("2000-01-02", unit),
] * 30
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
tm.assert_index_equal(
pd.to_datetime(dts, cache=cache),
DatetimeIndex([Timestamp(x).asm8 for x in dts]),
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64("9999-01-01")]
msg = "Out of bounds nanosecond timestamp: 9999-01-01 00:00:00"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.to_datetime(dts_with_oob, errors="raise")
tm.assert_index_equal(
pd.to_datetime(dts_with_oob, errors="coerce", cache=cache),
DatetimeIndex(
[Timestamp(dts_with_oob[0]).asm8, Timestamp(dts_with_oob[1]).asm8] * 30
+ [pd.NaT],
),
)
# With errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
tm.assert_index_equal(
pd.to_datetime(dts_with_oob, errors="ignore", cache=cache),
Index([dt.item() for dt in dts_with_oob]),
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_tz(self, cache):
# xref 8260
# uniform returns a DatetimeIndex
arr = [
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
result = pd.to_datetime(arr, cache=cache)
expected = DatetimeIndex(
["2013-01-01 13:00:00", "2013-01-02 14:00:00"], tz="US/Pacific"
)
tm.assert_index_equal(result, expected)
# mixed tzs will raise
arr = [
Timestamp("2013-01-01 13:00:00", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00", tz="US/Eastern"),
]
msg = (
"Tz-aware datetime.datetime cannot be "
"converted to datetime64 unless utc=True"
)
with pytest.raises(ValueError, match=msg):
pd.to_datetime(arr, cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_different_offsets(self, cache):
# inspired by asv timeseries.ToDatetimeNONISO8601 benchmark
# see GH-26097 for more
ts_string_1 = "March 1, 2018 12:00:00+0400"
ts_string_2 = "March 1, 2018 12:00:00+0500"
arr = [ts_string_1] * 5 + [ts_string_2] * 5
expected = Index([parse(x) for x in arr])
result = pd.to_datetime(arr, cache=cache)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_tz_pytz(self, cache):
# see gh-8260
us_eastern = pytz.timezone("US/Eastern")
arr = np.array(
[
us_eastern.localize(
datetime(year=2000, month=1, day=1, hour=3, minute=0)
),
us_eastern.localize(
datetime(year=2000, month=6, day=1, hour=3, minute=0)
),
],
dtype=object,
)
result = pd.to_datetime(arr, utc=True, cache=cache)
expected = DatetimeIndex(
["2000-01-01 08:00:00+00:00", "2000-06-01 07:00:00+00:00"],
dtype="datetime64[ns, UTC]",
freq=None,
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize(
"init_constructor, end_constructor, test_method",
[
(Index, DatetimeIndex, tm.assert_index_equal),
(list, DatetimeIndex, tm.assert_index_equal),
(np.array, DatetimeIndex, tm.assert_index_equal),
(Series, Series, tm.assert_series_equal),
],
)
def test_to_datetime_utc_true(
self, cache, init_constructor, end_constructor, test_method
):
# See gh-11934 & gh-6415
data = ["20100102 121314", "20100102 121315"]
expected_data = [
Timestamp("2010-01-02 12:13:14", tz="utc"),
Timestamp("2010-01-02 12:13:15", tz="utc"),
]
result = pd.to_datetime(
init_constructor(data), format="%Y%m%d %H%M%S", utc=True, cache=cache
)
expected = end_constructor(expected_data)
test_method(result, expected)
# Test scalar case as well
for scalar, expected in zip(data, expected_data):
result = pd.to_datetime(
scalar, format="%Y%m%d %H%M%S", utc=True, cache=cache
)
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_utc_true_with_series_single_value(self, cache):
# GH 15760 UTC=True with Series
ts = 1.5e18
result = pd.to_datetime(Series([ts]), utc=True, cache=cache)
expected = Series([Timestamp(ts, tz="utc")])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_utc_true_with_series_tzaware_string(self, cache):
ts = "2013-01-01 00:00:00-01:00"
expected_ts = "2013-01-01 01:00:00"
data = Series([ts] * 3)
result = pd.to_datetime(data, utc=True, cache=cache)
expected = Series([Timestamp(expected_ts, tz="utc")] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize(
"date, dtype",
[
("2013-01-01 01:00:00", "datetime64[ns]"),
("2013-01-01 01:00:00", "datetime64[ns, UTC]"),
],
)
def test_to_datetime_utc_true_with_series_datetime_ns(self, cache, date, dtype):
expected = Series([Timestamp("2013-01-01 01:00:00", tz="UTC")])
result = pd.to_datetime(Series([date], dtype=dtype), utc=True, cache=cache)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
@td.skip_if_no("psycopg2")
def test_to_datetime_tz_psycopg2(self, cache):
# xref 8260
import psycopg2
# misc cases
tz1 = psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None)
tz2 = psycopg2.tz.FixedOffsetTimezone(offset=-240, name=None)
arr = np.array(
[
datetime(2000, 1, 1, 3, 0, tzinfo=tz1),
datetime(2000, 6, 1, 3, 0, tzinfo=tz2),
],
dtype=object,
)
result = pd.to_datetime(arr, errors="coerce", utc=True, cache=cache)
expected = DatetimeIndex(
["2000-01-01 08:00:00+00:00", "2000-06-01 07:00:00+00:00"],
dtype="datetime64[ns, UTC]",
freq=None,
)
tm.assert_index_equal(result, expected)
# dtype coercion
i = DatetimeIndex(
["2000-01-01 08:00:00"],
tz=psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None),
)
assert is_datetime64_ns_dtype(i)
# tz coercion
result = pd.to_datetime(i, errors="coerce", cache=cache)
tm.assert_index_equal(result, i)
result = pd.to_datetime(i, errors="coerce", utc=True, cache=cache)
expected = DatetimeIndex(["2000-01-01 13:00:00"], dtype="datetime64[ns, UTC]")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_datetime_bool(self, cache):
# GH13176
msg = r"dtype bool cannot be converted to datetime64\[ns\]"
with pytest.raises(TypeError, match=msg):
to_datetime(False)
assert to_datetime(False, errors="coerce", cache=cache) is NaT
assert to_datetime(False, errors="ignore", cache=cache) is False
with pytest.raises(TypeError, match=msg):
to_datetime(True)
assert to_datetime(True, errors="coerce", cache=cache) is NaT
assert to_datetime(True, errors="ignore", cache=cache) is True
msg = f"{type(cache)} is not convertible to datetime"
with pytest.raises(TypeError, match=msg):
to_datetime([False, datetime.today()], cache=cache)
with pytest.raises(TypeError, match=msg):
to_datetime(["20130101", True], cache=cache)
tm.assert_index_equal(
to_datetime([0, False, NaT, 0.0], errors="coerce", cache=cache),
DatetimeIndex(
[to_datetime(0, cache=cache), NaT, NaT, to_datetime(0, cache=cache)]
),
)
def test_datetime_invalid_datatype(self):
# GH13176
msg = "is not convertible to datetime"
with pytest.raises(TypeError, match=msg):
pd.to_datetime(bool)
with pytest.raises(TypeError, match=msg):
pd.to_datetime(pd.to_datetime)
@pytest.mark.parametrize("value", ["a", "00:01:99"])
@pytest.mark.parametrize("infer", [True, False])
@pytest.mark.parametrize("format", [None, "H%:M%:S%"])
def test_datetime_invalid_scalar(self, value, format, infer):
# GH24763
res = pd.to_datetime(
value, errors="ignore", format=format, infer_datetime_format=infer
)
assert res == value
res = pd.to_datetime(
value, errors="coerce", format=format, infer_datetime_format=infer
)
assert res is pd.NaT
msg = (
"is a bad directive in format|"
"second must be in 0..59|"
"Given date string not likely a datetime"
)
with pytest.raises(ValueError, match=msg):
pd.to_datetime(
value, errors="raise", format=format, infer_datetime_format=infer
)
@pytest.mark.parametrize("value", ["3000/12/11 00:00:00"])
@pytest.mark.parametrize("infer", [True, False])
@pytest.mark.parametrize("format", [None, "H%:M%:S%"])
def test_datetime_outofbounds_scalar(self, value, format, infer):
# GH24763
res = pd.to_datetime(
value, errors="ignore", format=format, infer_datetime_format=infer
)
assert res == value
res = pd.to_datetime(
value, errors="coerce", format=format, infer_datetime_format=infer
)
assert res is pd.NaT
if format is not None:
msg = "is a bad directive in format|Out of bounds nanosecond timestamp"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(
value, errors="raise", format=format, infer_datetime_format=infer
)
else:
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.to_datetime(
value, errors="raise", format=format, infer_datetime_format=infer
)
@pytest.mark.parametrize("values", [["a"], ["00:01:99"], ["a", "b", "99:00:00"]])
@pytest.mark.parametrize("infer", [True, False])
@pytest.mark.parametrize("format", [None, "H%:M%:S%"])
def test_datetime_invalid_index(self, values, format, infer):
# GH24763
res = pd.to_datetime(
values, errors="ignore", format=format, infer_datetime_format=infer
)
tm.assert_index_equal(res, Index(values))
res = pd.to_datetime(
values, errors="coerce", format=format, infer_datetime_format=infer
)
tm.assert_index_equal(res, DatetimeIndex([pd.NaT] * len(values)))
msg = (
"is a bad directive in format|"
"Given date string not likely a datetime|"
"second must be in 0..59"
)
with pytest.raises(ValueError, match=msg):
pd.to_datetime(
values, errors="raise", format=format, infer_datetime_format=infer
)
@pytest.mark.parametrize("utc", [True, None])
@pytest.mark.parametrize("format", ["%Y%m%d %H:%M:%S", None])
@pytest.mark.parametrize("constructor", [list, tuple, np.array, Index, deque])
def test_to_datetime_cache(self, utc, format, constructor):
date = "20130101 00:00:00"
test_dates = [date] * 10 ** 5
data = constructor(test_dates)
result = pd.to_datetime(data, utc=utc, format=format, cache=True)
expected = pd.to_datetime(data, utc=utc, format=format, cache=False)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"listlike",
[
(deque([Timestamp("2010-06-02 09:30:00")] * 51)),
([Timestamp("2010-06-02 09:30:00")] * 51),
(tuple([Timestamp("2010-06-02 09:30:00")] * 51)),
],
)
def test_no_slicing_errors_in_should_cache(self, listlike):
# GH 29403
assert tools.should_cache(listlike) is True
def test_to_datetime_from_deque(self):
# GH 29403
result = pd.to_datetime(deque([Timestamp("2010-06-02 09:30:00")] * 51))
expected = pd.to_datetime([Timestamp("2010-06-02 09:30:00")] * 51)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("utc", [True, None])
@pytest.mark.parametrize("format", ["%Y%m%d %H:%M:%S", None])
def test_to_datetime_cache_series(self, utc, format):
date = "20130101 00:00:00"
test_dates = [date] * 10 ** 5
data = Series(test_dates)
result = pd.to_datetime(data, utc=utc, format=format, cache=True)
expected = pd.to_datetime(data, utc=utc, format=format, cache=False)
tm.assert_series_equal(result, expected)
def test_to_datetime_cache_scalar(self):
date = "20130101 00:00:00"
result = pd.to_datetime(date, cache=True)
expected = Timestamp("20130101 00:00:00")
assert result == expected
@pytest.mark.parametrize(
"date, format",
[
("2017-20", "%Y-%W"),
("20 Sunday", "%W %A"),
("20 Sun", "%W %a"),
("2017-21", "%Y-%U"),
("20 Sunday", "%U %A"),
("20 Sun", "%U %a"),
],
)
def test_week_without_day_and_calendar_year(self, date, format):
# GH16774
msg = "Cannot use '%W' or '%U' without day and year"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(date, format=format)
def test_to_datetime_coerce(self):
# GH 26122
ts_strings = [
"March 1, 2018 12:00:00+0400",
"March 1, 2018 12:00:00+0500",
"20100240",
]
result = to_datetime(ts_strings, errors="coerce")
expected = Index(
[
datetime(2018, 3, 1, 12, 0, tzinfo=tzoffset(None, 14400)),
datetime(2018, 3, 1, 12, 0, tzinfo=tzoffset(None, 18000)),
NaT,
]
)
tm.assert_index_equal(result, expected)
def test_to_datetime_coerce_malformed(self):
# GH 28299
ts_strings = ["200622-12-31", "111111-24-11"]
result = to_datetime(ts_strings, errors="coerce")
expected = Index([NaT, NaT])
tm.assert_index_equal(result, expected)
def test_iso_8601_strings_with_same_offset(self):
# GH 17697, 11736
ts_str = "2015-11-18 15:30:00+05:30"
result = to_datetime(ts_str)
expected = Timestamp(ts_str)
assert result == expected
expected = DatetimeIndex([Timestamp(ts_str)] * 2)
result = to_datetime([ts_str] * 2)
tm.assert_index_equal(result, expected)
result = DatetimeIndex([ts_str] * 2)
tm.assert_index_equal(result, expected)
def test_iso_8601_strings_with_different_offsets(self):
# GH 17697, 11736
ts_strings = ["2015-11-18 15:30:00+05:30", "2015-11-18 16:30:00+06:30", NaT]
result = to_datetime(ts_strings)
expected = np.array(
[
datetime(2015, 11, 18, 15, 30, tzinfo=tzoffset(None, 19800)),
datetime(2015, 11, 18, 16, 30, tzinfo=tzoffset(None, 23400)),
NaT,
],
dtype=object,
)
# GH 21864
expected = Index(expected)
tm.assert_index_equal(result, expected)
result = to_datetime(ts_strings, utc=True)
expected = DatetimeIndex(
[Timestamp(2015, 11, 18, 10), Timestamp(2015, 11, 18, 10), NaT], tz="UTC"
)
tm.assert_index_equal(result, expected)
def test_iso8601_strings_mixed_offsets_with_naive(self):
# GH 24992
result = pd.to_datetime(
[
"2018-11-28T00:00:00",
"2018-11-28T00:00:00+12:00",
"2018-11-28T00:00:00",
"2018-11-28T00:00:00+06:00",
"2018-11-28T00:00:00",
],
utc=True,
)
expected = pd.to_datetime(
[
"2018-11-28T00:00:00",
"2018-11-27T12:00:00",
"2018-11-28T00:00:00",
"2018-11-27T18:00:00",
"2018-11-28T00:00:00",
],
utc=True,
)
tm.assert_index_equal(result, expected)
items = ["2018-11-28T00:00:00+12:00", "2018-11-28T00:00:00"]
result = pd.to_datetime(items, utc=True)
expected = pd.to_datetime(list(reversed(items)), utc=True)[::-1]
tm.assert_index_equal(result, expected)
def test_mixed_offsets_with_native_datetime_raises(self):
# GH 25978
s = Series(
[
"nan",
Timestamp("1990-01-01"),
"2015-03-14T16:15:14.123-08:00",
"2019-03-04T21:56:32.620-07:00",
None,
]
)
with pytest.raises(ValueError, match="Tz-aware datetime.datetime"):
pd.to_datetime(s)
def test_non_iso_strings_with_tz_offset(self):
result = to_datetime(["March 1, 2018 12:00:00+0400"] * 2)
expected = DatetimeIndex(
[datetime(2018, 3, 1, 12, tzinfo=pytz.FixedOffset(240))] * 2
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"ts, expected",
[
(Timestamp("2018-01-01"), Timestamp("2018-01-01", tz="UTC")),
(
Timestamp("2018-01-01", tz="US/Pacific"),
Timestamp("2018-01-01 08:00", tz="UTC"),
),
],
)
def test_timestamp_utc_true(self, ts, expected):
# GH 24415
result = to_datetime(ts, utc=True)
assert result == expected
@pytest.mark.parametrize("dt_str", ["00010101", "13000101", "30000101", "99990101"])
def test_to_datetime_with_format_out_of_bounds(self, dt_str):
# GH 9107
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.to_datetime(dt_str, format="%Y%m%d")
def test_to_datetime_utc(self):
arr = np.array([parse("2012-06-13T01:39:00Z")], dtype=object)
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
def test_to_datetime_fixed_offset(self):
from pandas.tests.indexes.datetimes.test_timezones import fixed_off
dates = [
datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off),
]
result = to_datetime(dates)
assert result.tz == fixed_off
class TestToDatetimeUnit:
@pytest.mark.parametrize("cache", [True, False])
def test_unit(self, cache):
# GH 11758
# test proper behavior with errors
msg = "cannot specify both format and unit"
with pytest.raises(ValueError, match=msg):
to_datetime([1], unit="D", format="%Y%m%d", cache=cache)
values = [11111111, 1, 1.0, iNaT, NaT, np.nan, "NaT", ""]
result = to_datetime(values, unit="D", errors="ignore", cache=cache)
expected = Index(
[
11111111,
Timestamp("1970-01-02"),
Timestamp("1970-01-02"),
NaT,
NaT,
NaT,
NaT,
NaT,
],
dtype=object,
)
tm.assert_index_equal(result, expected)
result = to_datetime(values, unit="D", errors="coerce", cache=cache)
expected = DatetimeIndex(
["NaT", "1970-01-02", "1970-01-02", "NaT", "NaT", "NaT", "NaT", "NaT"]
)
tm.assert_index_equal(result, expected)
msg = "cannot convert input 11111111 with the unit 'D'"
with pytest.raises(tslib.OutOfBoundsDatetime, match=msg):
to_datetime(values, unit="D", errors="raise", cache=cache)
values = [1420043460000, iNaT, NaT, np.nan, "NaT"]
result = to_datetime(values, errors="ignore", unit="s", cache=cache)
expected = Index([1420043460000, NaT, NaT, NaT, NaT], dtype=object)
tm.assert_index_equal(result, expected)
result = to_datetime(values, errors="coerce", unit="s", cache=cache)
expected = DatetimeIndex(["NaT", "NaT", "NaT", "NaT", "NaT"])
tm.assert_index_equal(result, expected)
msg = "cannot convert input 1420043460000 with the unit 's'"
with pytest.raises(tslib.OutOfBoundsDatetime, match=msg):
to_datetime(values, errors="raise", unit="s", cache=cache)
# if we have a string, then we raise a ValueError
# and NOT an OutOfBoundsDatetime
for val in ["foo", Timestamp("20130101")]:
try:
to_datetime(val, errors="raise", unit="s", cache=cache)
except tslib.OutOfBoundsDatetime as err:
raise AssertionError("incorrect exception raised") from err
except ValueError:
pass
@pytest.mark.parametrize("cache", [True, False])
def test_unit_consistency(self, cache):
# consistency of conversions
expected = Timestamp("1970-05-09 14:25:11")
result = pd.to_datetime(11111111, unit="s", errors="raise", cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
result = pd.to_datetime(11111111, unit="s", errors="coerce", cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
result = pd.to_datetime(11111111, unit="s", errors="ignore", cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
@pytest.mark.parametrize("cache", [True, False])
def test_unit_with_numeric(self, cache):
# GH 13180
# coercions from floats/ints are ok
expected = DatetimeIndex(["2015-06-19 05:33:20", "2015-05-27 22:33:20"])
arr1 = [1.434692e18, 1.432766e18]
arr2 = np.array(arr1).astype("int64")
for errors in ["ignore", "raise", "coerce"]:
result = pd.to_datetime(arr1, errors=errors, cache=cache)
tm.assert_index_equal(result, expected)
result = pd.to_datetime(arr2, errors=errors, cache=cache)
tm.assert_index_equal(result, expected)
# but we want to make sure that we are coercing
# if we have ints/strings
expected = DatetimeIndex(["NaT", "2015-06-19 05:33:20", "2015-05-27 22:33:20"])
arr = ["foo", 1.434692e18, 1.432766e18]
result = pd.to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
expected = DatetimeIndex(
["2015-06-19 05:33:20", "2015-05-27 22:33:20", "NaT", "NaT"]
)
arr = [1.434692e18, 1.432766e18, "foo", "NaT"]
result = pd.to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_unit_mixed(self, cache):
# mixed integers/datetimes
expected = DatetimeIndex(["2013-01-01", "NaT", "NaT"])
arr = [Timestamp("20130101"), 1.434692e18, 1.432766e18]
result = pd.to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
msg = "mixed datetimes and integers in passed array"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(arr, errors="raise", cache=cache)
expected = DatetimeIndex(["NaT", "NaT", "2013-01-01"])
arr = [1.434692e18, 1.432766e18, Timestamp("20130101")]
result = pd.to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError, match=msg):
pd.to_datetime(arr, errors="raise", cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_unit_rounding(self, cache):
# GH 14156 & GH 20445: argument will incur floating point errors
# but no premature rounding
result = pd.to_datetime(1434743731.8770001, unit="s", cache=cache)
expected = Timestamp("2015-06-19 19:55:31.877000192")
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_unit_ignore_keeps_name(self, cache):
# GH 21697
expected = Index([15e9] * 2, name="name")
result = pd.to_datetime(expected, errors="ignore", unit="s", cache=cache)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_dataframe(self, cache):
df = DataFrame(
{
"year": [2015, 2016],
"month": [2, 3],
"day": [4, 5],
"hour": [6, 7],
"minute": [58, 59],
"second": [10, 11],
"ms": [1, 1],
"us": [2, 2],
"ns": [3, 3],
}
)
result = to_datetime(
{"year": df["year"], "month": df["month"], "day": df["day"]}, cache=cache
)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:0:00")]
)
tm.assert_series_equal(result, expected)
# dict-like
result = to_datetime(df[["year", "month", "day"]].to_dict(), cache=cache)
tm.assert_series_equal(result, expected)
# dict but with constructable
df2 = df[["year", "month", "day"]].to_dict()
df2["month"] = 2
result = to_datetime(df2, cache=cache)
expected2 = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160205 00:0:00")]
)
tm.assert_series_equal(result, expected2)
# unit mappings
units = [
{
"year": "years",
"month": "months",
"day": "days",
"hour": "hours",
"minute": "minutes",
"second": "seconds",
},
{
"year": "year",
"month": "month",
"day": "day",
"hour": "hour",
"minute": "minute",
"second": "second",
},
]
for d in units:
result = to_datetime(df[list(d.keys())].rename(columns=d), cache=cache)
expected = Series(
[Timestamp("20150204 06:58:10"), Timestamp("20160305 07:59:11")]
)
tm.assert_series_equal(result, expected)
d = {
"year": "year",
"month": "month",
"day": "day",
"hour": "hour",
"minute": "minute",
"second": "second",
"ms": "ms",
"us": "us",
"ns": "ns",
}
result = to_datetime(df.rename(columns=d), cache=cache)
expected = Series(
[
Timestamp("20150204 06:58:10.001002003"),
Timestamp("20160305 07:59:11.001002003"),
]
)
tm.assert_series_equal(result, expected)
# coerce back to int
result = to_datetime(df.astype(str), cache=cache)
tm.assert_series_equal(result, expected)
# passing coerce
df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]})
msg = (
"cannot assemble the datetimes: time data .+ does not "
r"match format '%Y%m%d' \(match\)"
)
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
result = to_datetime(df2, errors="coerce", cache=cache)
expected = Series([Timestamp("20150204 00:00:00"), NaT])
tm.assert_series_equal(result, expected)
# extra columns
msg = r"extra keys have been passed to the datetime assemblage: \[foo\]"
with pytest.raises(ValueError, match=msg):
df2 = df.copy()
df2["foo"] = 1
to_datetime(df2, cache=cache)
# not enough
msg = (
r"to assemble mappings requires at least that \[year, month, "
r"day\] be specified: \[.+\] is missing"
)
for c in [
["year"],
["year", "month"],
["year", "month", "second"],
["month", "day"],
["year", "day", "second"],
]:
with pytest.raises(ValueError, match=msg):
to_datetime(df[c], cache=cache)
# duplicates
msg = "cannot assemble with duplicate keys"
df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]})
df2.columns = ["year", "year", "day"]
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
df2 = DataFrame(
{"year": [2015, 2016], "month": [2, 20], "day": [4, 5], "hour": [4, 5]}
)
df2.columns = ["year", "month", "day", "day"]
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_dataframe_dtypes(self, cache):
# #13451
df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
# int16
result = to_datetime(df.astype("int16"), cache=cache)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:00:00")]
)
tm.assert_series_equal(result, expected)
# mixed dtypes
df["month"] = df["month"].astype("int8")
df["day"] = df["day"].astype("int8")
result = to_datetime(df, cache=cache)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:00:00")]
)
tm.assert_series_equal(result, expected)
# float
df = DataFrame({"year": [2000, 2001], "month": [1.5, 1], "day": [1, 1]})
msg = "cannot assemble the datetimes: unconverted data remains: 1"
with pytest.raises(ValueError, match=msg):
to_datetime(df, cache=cache)
def test_dataframe_utc_true(self):
# GH 23760
df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
result = pd.to_datetime(df, utc=True)
expected = Series(
np.array(["2015-02-04", "2016-03-05"], dtype="datetime64[ns]")
).dt.tz_localize("UTC")
tm.assert_series_equal(result, expected)
def test_to_datetime_errors_ignore_utc_true(self):
# GH 23758
result = pd.to_datetime([1], unit="s", utc=True, errors="ignore")
expected = DatetimeIndex(["1970-01-01 00:00:01"], tz="UTC")
tm.assert_index_equal(result, expected)
# TODO: this is moved from tests.series.test_timeseries, may be redundant
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([epoch + t for t in range(20)])
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
)
tm.assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)]).astype(float)
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
)
tm.assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)] + [iNaT])
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
+ [NaT]
)
tm.assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)] + [iNaT]).astype(float)
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
+ [NaT]
)
tm.assert_series_equal(result, expected)
# GH13834
s = Series([epoch + t for t in np.arange(0, 2, 0.25)] + [iNaT]).astype(float)
result = to_datetime(s, unit="s")
expected = Series(
[
Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t)
for t in np.arange(0, 2, 0.25)
]
+ [NaT]
)
# GH20455 argument will incur floating point errors but no premature rounding
result = result.round("ms")
tm.assert_series_equal(result, expected)
s = pd.concat(
[Series([epoch + t for t in range(20)]).astype(float), Series([np.nan])],
ignore_index=True,
)
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
+ [NaT]
)
tm.assert_series_equal(result, expected)
result = to_datetime([1, 2, "NaT", pd.NaT, np.nan], unit="D")
expected = DatetimeIndex(
[Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 3
)
tm.assert_index_equal(result, expected)
msg = "non convertible value foo with the unit 'D'"
with pytest.raises(ValueError, match=msg):
to_datetime([1, 2, "foo"], unit="D")
msg = "cannot convert input 111111111 with the unit 'D'"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime([1, 2, 111111111], unit="D")
# coerce we can process
expected = DatetimeIndex(
[Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 1
)
result = to_datetime([1, 2, "foo"], unit="D", errors="coerce")
tm.assert_index_equal(result, expected)
result = to_datetime([1, 2, 111111111], unit="D", errors="coerce")
tm.assert_index_equal(result, expected)
class TestToDatetimeMisc:
def test_to_datetime_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that dropping nanos would result
# in an in-bounds datetime
arr = np.array(["2262-04-11 23:47:16.854775808"], dtype=object)
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime(arr)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_iso8601(self, cache):
result = to_datetime(["2012-01-01 00:00:00"], cache=cache)
exp = Timestamp("2012-01-01 00:00:00")
assert result[0] == exp
result = to_datetime(["20121001"], cache=cache) # bad iso 8601
exp = Timestamp("2012-10-01")
assert result[0] == exp
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_default(self, cache):
rs = to_datetime("2001", cache=cache)
xp = datetime(2001, 1, 1)
assert rs == xp
# dayfirst is essentially broken
# to_datetime('01-13-2012', dayfirst=True)
# pytest.raises(ValueError, to_datetime('01-13-2012',
# dayfirst=True))
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_on_datetime64_series(self, cache):
# #2699
s = Series(date_range("1/1/2000", periods=10))
result = to_datetime(s, cache=cache)
assert result[0] == s[0]
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_with_space_in_series(self, cache):
# GH 6428
s = Series(["10/18/2006", "10/18/2008", " "])
msg = r"(\(')?String does not contain a date(:', ' '\))?"
with pytest.raises(ValueError, match=msg):
to_datetime(s, errors="raise", cache=cache)
result_coerce = to_datetime(s, errors="coerce", cache=cache)
expected_coerce = Series([datetime(2006, 10, 18), datetime(2008, 10, 18), NaT])
tm.assert_series_equal(result_coerce, expected_coerce)
result_ignore = to_datetime(s, errors="ignore", cache=cache)
tm.assert_series_equal(result_ignore, s)
@td.skip_if_has_locale
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_with_apply(self, cache):
# this is only locale tested with US/None locales
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(["May 04", "Jun 02", "Dec 11"], index=[1, 2, 3])
expected = pd.to_datetime(td, format="%b %y", cache=cache)
result = td.apply(pd.to_datetime, format="%b %y", cache=cache)
tm.assert_series_equal(result, expected)
td = Series(["May 04", "Jun 02", ""], index=[1, 2, 3])
msg = r"time data '' does not match format '%b %y' \(match\)"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(td, format="%b %y", errors="raise", cache=cache)
with pytest.raises(ValueError, match=msg):
td.apply(pd.to_datetime, format="%b %y", errors="raise", cache=cache)
expected = pd.to_datetime(td, format="%b %y", errors="coerce", cache=cache)
result = td.apply(
lambda x: pd.to_datetime(x, format="%b %y", errors="coerce", cache=cache)
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_types(self, cache):
# empty string
result = to_datetime("", cache=cache)
assert result is NaT
result = to_datetime(["", ""], cache=cache)
assert isna(result).all()
# ints
result = Timestamp(0)
expected = to_datetime(0, cache=cache)
assert result == expected
# GH 3888 (strings)
expected = to_datetime(["2012"], cache=cache)[0]
result = to_datetime("2012", cache=cache)
assert result == expected
# array = ['2012','20120101','20120101 12:01:01']
array = ["20120101", "20120101 12:01:01"]
expected = list(to_datetime(array, cache=cache))
result = [Timestamp(date_str) for date_str in array]
tm.assert_almost_equal(result, expected)
# currently fails ###
# result = Timestamp('2012')
# expected = to_datetime('2012')
# assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_unprocessable_input(self, cache):
# GH 4928
# GH 21864
result = to_datetime([1, "1"], errors="ignore", cache=cache)
expected = Index(np.array([1, "1"], dtype="O"))
tm.assert_equal(result, expected)
msg = "invalid string coercion to datetime"
with pytest.raises(TypeError, match=msg):
to_datetime([1, "1"], errors="raise", cache=cache)
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view("M8[us]")
as_obj = scalar.astype("O")
index = DatetimeIndex([scalar])
assert index[0] == scalar.astype("O")
value = Timestamp(scalar)
assert value == as_obj
def test_to_datetime_list_of_integers(self):
rng = date_range("1/1/2000", periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
tm.assert_index_equal(rng, result)
def test_to_datetime_overflow(self):
# gh-17637
# we are overflowing Timedelta range here
msg = (
"(Python int too large to convert to C long)|"
"(long too big to convert)|"
"(int too big to convert)"
)
with pytest.raises(OverflowError, match=msg):
date_range(start="1/1/1700", freq="B", periods=100000)
@pytest.mark.parametrize("cache", [True, False])
def test_string_na_nat_conversion(self, cache):
# GH #999, #858
strings = np.array(
["1/1/2000", "1/2/2000", np.nan, "1/4/2000, 12:34:56"], dtype=object
)
expected = np.empty(4, dtype="M8[ns]")
for i, val in enumerate(strings):
if isna(val):
expected[i] = iNaT
else:
expected[i] = parse(val)
result = tslib.array_to_datetime(strings)[0]
tm.assert_almost_equal(result, expected)
result2 = to_datetime(strings, cache=cache)
assert isinstance(result2, DatetimeIndex)
tm.assert_numpy_array_equal(result, result2.values)
malformed = np.array(["1/100/2000", np.nan], dtype=object)
# GH 10636, default is now 'raise'
msg = r"Unknown string format:|day is out of range for month"
with pytest.raises(ValueError, match=msg):
to_datetime(malformed, errors="raise", cache=cache)
result = to_datetime(malformed, errors="ignore", cache=cache)
# GH 21864
expected = Index(malformed)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError, match=msg):
to_datetime(malformed, errors="raise", cache=cache)
idx = ["a", "b", "c", "d", "e"]
series = Series(
["1/1/2000", np.nan, "1/3/2000", np.nan, "1/5/2000"], index=idx, name="foo"
)
dseries = Series(
[
to_datetime("1/1/2000", cache=cache),
np.nan,
to_datetime("1/3/2000", cache=cache),
np.nan,
to_datetime("1/5/2000", cache=cache),
],
index=idx,
name="foo",
)
result = to_datetime(series, cache=cache)
dresult = to_datetime(dseries, cache=cache)
expected = Series(np.empty(5, dtype="M8[ns]"), index=idx)
for i in range(5):
x = series[i]
if isna(x):
expected[i] = pd.NaT
else:
expected[i] = to_datetime(x, cache=cache)
tm.assert_series_equal(result, expected, check_names=False)
assert result.name == "foo"
tm.assert_series_equal(dresult, expected, check_names=False)
assert dresult.name == "foo"
@pytest.mark.parametrize(
"dtype",
[
"datetime64[h]",
"datetime64[m]",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
@pytest.mark.parametrize("cache", [True, False])
def test_dti_constructor_numpy_timeunits(self, cache, dtype):
# GH 9114
base = pd.to_datetime(
["2000-01-01T00:00", "2000-01-02T00:00", "NaT"], cache=cache
)
values = base.values.astype(dtype)
tm.assert_index_equal(DatetimeIndex(values), base)
tm.assert_index_equal(to_datetime(values, cache=cache), base)
@pytest.mark.parametrize("cache", [True, False])
def test_dayfirst(self, cache):
# GH 5917
arr = ["10/02/2014", "11/02/2014", "12/02/2014"]
expected = DatetimeIndex(
[datetime(2014, 2, 10), datetime(2014, 2, 11), datetime(2014, 2, 12)]
)
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True, cache=cache)
idx4 = to_datetime(np.array(arr), dayfirst=True, cache=cache)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
tm.assert_index_equal(expected, idx1)
tm.assert_index_equal(expected, idx2)
tm.assert_index_equal(expected, idx3)
tm.assert_index_equal(expected, idx4)
tm.assert_index_equal(expected, idx5)
tm.assert_index_equal(expected, idx6)
@pytest.mark.parametrize("klass", [DatetimeIndex, DatetimeArray])
def test_to_datetime_dta_tz(self, klass):
# GH#27733
dti = date_range("2015-04-05", periods=3).rename("foo")
expected = dti.tz_localize("UTC")
obj = klass(dti)
expected = klass(expected)
result = to_datetime(obj, utc=True)
tm.assert_equal(result, expected)
class TestGuessDatetimeFormat:
@td.skip_if_not_us_locale
def test_guess_datetime_format_for_array(self):
expected_format = "%Y-%m-%d %H:%M:%S.%f"
dt_string = datetime(2011, 12, 30, 0, 0, 0).strftime(expected_format)
test_arrays = [
np.array([dt_string, dt_string, dt_string], dtype="O"),
np.array([np.nan, np.nan, dt_string], dtype="O"),
np.array([dt_string, "random_string"], dtype="O"),
]
for test_array in test_arrays:
assert tools._guess_datetime_format_for_array(test_array) == expected_format
format_for_string_of_nans = tools._guess_datetime_format_for_array(
np.array([np.nan, np.nan, np.nan], dtype="O")
)
assert format_for_string_of_nans is None
class TestToDatetimeInferFormat:
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_consistent_format(self, cache):
s = Series(pd.date_range("20000101", periods=50, freq="H"))
test_formats = ["%m-%d-%Y", "%m/%d/%Y %H:%M:%S.%f", "%Y-%m-%dT%H:%M:%S.%f"]
for test_format in test_formats:
s_as_dt_strings = s.apply(lambda x: x.strftime(test_format))
with_format = pd.to_datetime(
s_as_dt_strings, format=test_format, cache=cache
)
no_infer = pd.to_datetime(
s_as_dt_strings, infer_datetime_format=False, cache=cache
)
yes_infer = pd.to_datetime(
s_as_dt_strings, infer_datetime_format=True, cache=cache
)
# Whether the format is explicitly passed, it is inferred, or
# it is not inferred, the results should all be the same
tm.assert_series_equal(with_format, no_infer)
tm.assert_series_equal(no_infer, yes_infer)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_inconsistent_format(self, cache):
s = Series(
np.array(
["01/01/2011 00:00:00", "01-02-2011 00:00:00", "2011-01-03T00:00:00"]
)
)
# When the format is inconsistent, infer_datetime_format should just
# fallback to the default parsing
tm.assert_series_equal(
pd.to_datetime(s, infer_datetime_format=False, cache=cache),
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
s = Series(np.array(["Jan/01/2011", "Feb/01/2011", "Mar/01/2011"]))
tm.assert_series_equal(
pd.to_datetime(s, infer_datetime_format=False, cache=cache),
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_series_with_nans(self, cache):
s = Series(
np.array(["01/01/2011 00:00:00", np.nan, "01/03/2011 00:00:00", np.nan])
)
tm.assert_series_equal(
pd.to_datetime(s, infer_datetime_format=False, cache=cache),
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_series_start_with_nans(self, cache):
s = Series(
np.array(
[
np.nan,
np.nan,
"01/01/2011 00:00:00",
"01/02/2011 00:00:00",
"01/03/2011 00:00:00",
]
)
)
tm.assert_series_equal(
pd.to_datetime(s, infer_datetime_format=False, cache=cache),
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
@pytest.mark.parametrize(
"tz_name, offset", [("UTC", 0), ("UTC-3", 180), ("UTC+3", -180)]
)
def test_infer_datetime_format_tz_name(self, tz_name, offset):
# GH 33133
s = Series([f"2019-02-02 08:07:13 {tz_name}"])
result = to_datetime(s, infer_datetime_format=True)
expected = Series(
[Timestamp("2019-02-02 08:07:13").tz_localize(pytz.FixedOffset(offset))]
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_iso8601_noleading_0s(self, cache):
# GH 11871
s = Series(["2014-1-1", "2014-2-2", "2015-3-3"])
expected = Series(
[
Timestamp("2014-01-01"),
Timestamp("2014-02-02"),
Timestamp("2015-03-03"),
]
)
tm.assert_series_equal(pd.to_datetime(s, cache=cache), expected)
tm.assert_series_equal(
pd.to_datetime(s, format="%Y-%m-%d", cache=cache), expected
)
class TestDaysInMonth:
# tests for issue #10154
@pytest.mark.parametrize("cache", [True, False])
def test_day_not_in_month_coerce(self, cache):
assert isna(to_datetime("2015-02-29", errors="coerce", cache=cache))
assert isna(
to_datetime("2015-02-29", format="%Y-%m-%d", errors="coerce", cache=cache)
)
assert isna(
to_datetime("2015-02-32", format="%Y-%m-%d", errors="coerce", cache=cache)
)
assert isna(
to_datetime("2015-04-31", format="%Y-%m-%d", errors="coerce", cache=cache)
)
@pytest.mark.parametrize("cache", [True, False])
def test_day_not_in_month_raise(self, cache):
msg = "day is out of range for month"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-02-29", errors="raise", cache=cache)
msg = "time data 2015-02-29 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-02-29", errors="raise", format="%Y-%m-%d", cache=cache)
msg = "time data 2015-02-32 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-02-32", errors="raise", format="%Y-%m-%d", cache=cache)
msg = "time data 2015-04-31 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-04-31", errors="raise", format="%Y-%m-%d", cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_day_not_in_month_ignore(self, cache):
assert to_datetime("2015-02-29", errors="ignore", cache=cache) == "2015-02-29"
assert (
to_datetime("2015-02-29", errors="ignore", format="%Y-%m-%d", cache=cache)
== "2015-02-29"
)
assert (
to_datetime("2015-02-32", errors="ignore", format="%Y-%m-%d", cache=cache)
== "2015-02-32"
)
assert (
to_datetime("2015-04-31", errors="ignore", format="%Y-%m-%d", cache=cache)
== "2015-04-31"
)
class TestDatetimeParsingWrappers:
@pytest.mark.parametrize(
"date_str,expected",
list(
{
"2011-01-01": datetime(2011, 1, 1),
"2Q2005": datetime(2005, 4, 1),
"2Q05": datetime(2005, 4, 1),
"2005Q1": datetime(2005, 1, 1),
"05Q1": datetime(2005, 1, 1),
"2011Q3": datetime(2011, 7, 1),
"11Q3": datetime(2011, 7, 1),
"3Q2011": datetime(2011, 7, 1),
"3Q11": datetime(2011, 7, 1),
# quarterly without space
"2000Q4": datetime(2000, 10, 1),
"00Q4": datetime(2000, 10, 1),
"4Q2000": datetime(2000, 10, 1),
"4Q00": datetime(2000, 10, 1),
"2000q4": datetime(2000, 10, 1),
"2000-Q4": datetime(2000, 10, 1),
"00-Q4": datetime(2000, 10, 1),
"4Q-2000": datetime(2000, 10, 1),
"4Q-00": datetime(2000, 10, 1),
"00q4": datetime(2000, 10, 1),
"2005": datetime(2005, 1, 1),
"2005-11": datetime(2005, 11, 1),
"2005 11": datetime(2005, 11, 1),
"11-2005": datetime(2005, 11, 1),
"11 2005": datetime(2005, 11, 1),
"200511": datetime(2020, 5, 11),
"20051109": datetime(2005, 11, 9),
"20051109 10:15": datetime(2005, 11, 9, 10, 15),
"20051109 08H": datetime(2005, 11, 9, 8, 0),
"2005-11-09 10:15": datetime(2005, 11, 9, 10, 15),
"2005-11-09 08H": datetime(2005, 11, 9, 8, 0),
"2005/11/09 10:15": datetime(2005, 11, 9, 10, 15),
"2005/11/09 08H": datetime(2005, 11, 9, 8, 0),
"Thu Sep 25 10:36:28 2003": datetime(2003, 9, 25, 10, 36, 28),
"Thu Sep 25 2003": datetime(2003, 9, 25),
"Sep 25 2003": datetime(2003, 9, 25),
"January 1 2014": datetime(2014, 1, 1),
# GHE10537
"2014-06": datetime(2014, 6, 1),
"06-2014": datetime(2014, 6, 1),
"2014-6": datetime(2014, 6, 1),
"6-2014": datetime(2014, 6, 1),
"20010101 12": datetime(2001, 1, 1, 12),
"20010101 1234": datetime(2001, 1, 1, 12, 34),
"20010101 123456": datetime(2001, 1, 1, 12, 34, 56),
}.items()
),
)
@pytest.mark.parametrize("cache", [True, False])
def test_parsers(self, date_str, expected, cache):
# dateutil >= 2.5.0 defaults to yearfirst=True
# https://github.com/dateutil/dateutil/issues/217
yearfirst = True
result1, _ = parsing.parse_time_string(date_str, yearfirst=yearfirst)
result2 = to_datetime(date_str, yearfirst=yearfirst)
result3 = to_datetime([date_str], yearfirst=yearfirst)
# result5 is used below
result4 = to_datetime(
np.array([date_str], dtype=object), yearfirst=yearfirst, cache=cache
)
result6 = DatetimeIndex([date_str], yearfirst=yearfirst)
# result7 is used below
result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst)
result9 = DatetimeIndex(Series([date_str]), yearfirst=yearfirst)
for res in [result1, result2]:
assert res == expected
for res in [result3, result4, result6, result8, result9]:
exp = DatetimeIndex([Timestamp(expected)])
tm.assert_index_equal(res, exp)
# these really need to have yearfirst, but we don't support
if not yearfirst:
result5 = Timestamp(date_str)
assert result5 == expected
result7 = date_range(date_str, freq="S", periods=1, yearfirst=yearfirst)
assert result7 == expected
@pytest.mark.parametrize("cache", [True, False])
def test_na_values_with_cache(
self, cache, unique_nulls_fixture, unique_nulls_fixture2
):
# GH22305
expected = Index([NaT, NaT], dtype="datetime64[ns]")
result = to_datetime([unique_nulls_fixture, unique_nulls_fixture2], cache=cache)
tm.assert_index_equal(result, expected)
def test_parsers_nat(self):
# Test that each of several string-accepting methods return pd.NaT
result1, _ = parsing.parse_time_string("NaT")
result2 = to_datetime("NaT")
result3 = Timestamp("NaT")
result4 = DatetimeIndex(["NaT"])[0]
assert result1 is NaT
assert result2 is NaT
assert result3 is NaT
assert result4 is NaT
@pytest.mark.parametrize("cache", [True, False])
def test_parsers_dayfirst_yearfirst(self, cache):
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2012-10-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# bug fix in 2.5.2
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# revert of bug in 2.5.2
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=1] -> month must be in 1..12
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# str : dayfirst, yearfirst, expected
cases = {
"10-11-12": [
(False, False, datetime(2012, 10, 11)),
(True, False, datetime(2012, 11, 10)),
(False, True, datetime(2010, 11, 12)),
(True, True, datetime(2010, 12, 11)),
],
"20/12/21": [
(False, False, datetime(2021, 12, 20)),
(True, False, datetime(2021, 12, 20)),
(False, True, datetime(2020, 12, 21)),
(True, True, datetime(2020, 12, 21)),
],
}
for date_str, values in cases.items():
for dayfirst, yearfirst, expected in values:
# compare with dateutil result
dateutil_result = parse(
date_str, dayfirst=dayfirst, yearfirst=yearfirst
)
assert dateutil_result == expected
result1, _ = parsing.parse_time_string(
date_str, dayfirst=dayfirst, yearfirst=yearfirst
)
# we don't support dayfirst/yearfirst here:
if not dayfirst and not yearfirst:
result2 = Timestamp(date_str)
assert result2 == expected
result3 = to_datetime(
date_str, dayfirst=dayfirst, yearfirst=yearfirst, cache=cache
)
result4 = DatetimeIndex(
[date_str], dayfirst=dayfirst, yearfirst=yearfirst
)[0]
assert result1 == expected
assert result3 == expected
assert result4 == expected
@pytest.mark.parametrize("cache", [True, False])
def test_parsers_timestring(self, cache):
# must be the same as dateutil result
cases = {
"10:15": (parse("10:15"), datetime(1, 1, 1, 10, 15)),
"9:05": (parse("9:05"), datetime(1, 1, 1, 9, 5)),
}
for date_str, (exp_now, exp_def) in cases.items():
result1, _ = parsing.parse_time_string(date_str)
result2 = to_datetime(date_str)
result3 = to_datetime([date_str])
result4 = Timestamp(date_str)
result5 = DatetimeIndex([date_str])[0]
# parse time string return time string based on default date
# others are not, and can't be changed because it is used in
# time series plot
assert result1 == exp_def
assert result2 == exp_now
assert result3 == exp_now
assert result4 == exp_now
assert result5 == exp_now
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize(
"dt_string, tz, dt_string_repr",
[
(
"2013-01-01 05:45+0545",
pytz.FixedOffset(345),
"Timestamp('2013-01-01 05:45:00+0545', tz='pytz.FixedOffset(345)')",
),
(
"2013-01-01 05:30+0530",
pytz.FixedOffset(330),
"Timestamp('2013-01-01 05:30:00+0530', tz='pytz.FixedOffset(330)')",
),
],
)
def test_parsers_timezone_minute_offsets_roundtrip(
self, cache, dt_string, tz, dt_string_repr
):
# GH11708
base = to_datetime("2013-01-01 00:00:00", cache=cache)
base = base.tz_localize("UTC").tz_convert(tz)
dt_time = to_datetime(dt_string, cache=cache)
assert base == dt_time
assert dt_string_repr == repr(dt_time)
@pytest.fixture(params=["D", "s", "ms", "us", "ns"])
def units(request):
"""Day and some time units.
* D
* s
* ms
* us
* ns
"""
return request.param
@pytest.fixture
def epoch_1960():
"""Timestamp at 1960-01-01."""
return Timestamp("1960-01-01")
@pytest.fixture
def units_from_epochs():
return list(range(5))
@pytest.fixture(params=["timestamp", "pydatetime", "datetime64", "str_1960"])
def epochs(epoch_1960, request):
"""Timestamp at 1960-01-01 in various forms.
* Timestamp
* datetime.datetime
* numpy.datetime64
* str
"""
assert request.param in {"timestamp", "pydatetime", "datetime64", "str_1960"}
if request.param == "timestamp":
return epoch_1960
elif request.param == "pydatetime":
return epoch_1960.to_pydatetime()
elif request.param == "datetime64":
return epoch_1960.to_datetime64()
else:
return str(epoch_1960)
@pytest.fixture
def julian_dates():
return pd.date_range("2014-1-1", periods=10).to_julian_date().values
class TestOrigin:
def test_to_basic(self, julian_dates):
# gh-11276, gh-11745
# for origin as julian
result = Series(pd.to_datetime(julian_dates, unit="D", origin="julian"))
expected = Series(
pd.to_datetime(julian_dates - Timestamp(0).to_julian_date(), unit="D")
)
tm.assert_series_equal(result, expected)
result = Series(pd.to_datetime([0, 1, 2], unit="D", origin="unix"))
expected = Series(
[Timestamp("1970-01-01"), Timestamp("1970-01-02"), Timestamp("1970-01-03")]
)
tm.assert_series_equal(result, expected)
# default
result = Series(pd.to_datetime([0, 1, 2], unit="D"))
expected = Series(
[Timestamp("1970-01-01"), Timestamp("1970-01-02"), Timestamp("1970-01-03")]
)
tm.assert_series_equal(result, expected)
def test_julian_round_trip(self):
result = pd.to_datetime(2456658, origin="julian", unit="D")
assert result.to_julian_date() == 2456658
# out-of-bounds
msg = "1 is Out of Bounds for origin='julian'"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(1, origin="julian", unit="D")
def test_invalid_unit(self, units, julian_dates):
# checking for invalid combination of origin='julian' and unit != D
if units != "D":
msg = "unit must be 'D' for origin='julian'"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(julian_dates, unit=units, origin="julian")
def test_invalid_origin(self):
# need to have a numeric specified
msg = "it must be numeric with a unit specified"
with pytest.raises(ValueError, match=msg):
pd.to_datetime("2005-01-01", origin="1960-01-01")
with pytest.raises(ValueError, match=msg):
pd.to_datetime("2005-01-01", origin="1960-01-01", unit="D")
def test_epoch(self, units, epochs, epoch_1960, units_from_epochs):
expected = Series(
[pd.Timedelta(x, unit=units) + epoch_1960 for x in units_from_epochs]
)
result = Series(pd.to_datetime(units_from_epochs, unit=units, origin=epochs))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"origin, exc",
[
("random_string", ValueError),
("epoch", ValueError),
("13-24-1990", ValueError),
(datetime(1, 1, 1), tslib.OutOfBoundsDatetime),
],
)
def test_invalid_origins(self, origin, exc, units, units_from_epochs):
msg = f"origin {origin} (is Out of Bounds|cannot be converted to a Timestamp)"
with pytest.raises(exc, match=msg):
pd.to_datetime(units_from_epochs, unit=units, origin=origin)
def test_invalid_origins_tzinfo(self):
# GH16842
with pytest.raises(ValueError, match="must be tz-naive"):
pd.to_datetime(1, unit="D", origin=datetime(2000, 1, 1, tzinfo=pytz.utc))
@pytest.mark.parametrize("format", [None, "%Y-%m-%d %H:%M:%S"])
def test_to_datetime_out_of_bounds_with_format_arg(self, format):
# see gh-23830
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime("2417-10-27 00:00:00", format=format)
def test_processing_order(self):
# make sure we handle out-of-bounds *before*
# constructing the dates
result = pd.to_datetime(200 * 365, unit="D")
expected = Timestamp("2169-11-13 00:00:00")
assert result == expected
result = pd.to_datetime(200 * 365, unit="D", origin="1870-01-01")
expected = Timestamp("2069-11-13 00:00:00")
assert result == expected
result = pd.to_datetime(300 * 365, unit="D", origin="1870-01-01")
expected = Timestamp("2169-10-20 00:00:00")
assert result == expected
@pytest.mark.parametrize(
"offset,utc,exp",
[
["Z", True, "2019-01-01T00:00:00.000Z"],
["Z", None, "2019-01-01T00:00:00.000Z"],
["-01:00", True, "2019-01-01T01:00:00.000Z"],
["-01:00", None, "2019-01-01T00:00:00.000-01:00"],
],
)
def test_arg_tz_ns_unit(self, offset, utc, exp):
# GH 25546
arg = "2019-01-01T00:00:00.000" + offset
result = to_datetime([arg], unit="ns", utc=utc)
expected = to_datetime([exp])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"listlike,do_caching",
[([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], False), ([1, 1, 1, 1, 4, 5, 6, 7, 8, 9], True)],
)
def test_should_cache(listlike, do_caching):
assert (
tools.should_cache(listlike, check_count=len(listlike), unique_share=0.7)
== do_caching
)
@pytest.mark.parametrize(
"unique_share,check_count, err_message",
[
(0.5, 11, r"check_count must be in next bounds: \[0; len\(arg\)\]"),
(10, 2, r"unique_share must be in next bounds: \(0; 1\)"),
],
)
def test_should_cache_errors(unique_share, check_count, err_message):
arg = [5] * 10
with pytest.raises(AssertionError, match=err_message):
tools.should_cache(arg, unique_share, check_count)
def test_nullable_integer_to_datetime():
# Test for #30050
ser = Series([1, 2, None, 2 ** 61, None])
ser = ser.astype("Int64")
ser_copy = ser.copy()
res = pd.to_datetime(ser, unit="ns")
expected = Series(
[
np.datetime64("1970-01-01 00:00:00.000000001"),
np.datetime64("1970-01-01 00:00:00.000000002"),
np.datetime64("NaT"),
np.datetime64("2043-01-25 23:56:49.213693952"),
np.datetime64("NaT"),
]
)
tm.assert_series_equal(res, expected)
# Check that ser isn't mutated
tm.assert_series_equal(ser, ser_copy)
@pytest.mark.parametrize("klass", [np.array, list])
def test_na_to_datetime(nulls_fixture, klass):
result = pd.to_datetime(klass([nulls_fixture]))
assert result[0] is pd.NaT
def test_empty_string_datetime_coerce__format():
# GH13044
td = Series(["03/24/2016", "03/25/2016", ""])
format = "%m/%d/%Y"
# coerce empty string to pd.NaT
result = pd.to_datetime(td, format=format, errors="coerce")
expected = Series(["2016-03-24", "2016-03-25", pd.NaT], dtype="datetime64[ns]")
tm.assert_series_equal(expected, result)
# raise an exception in case a format is given
with pytest.raises(ValueError, match="does not match format"):
result = pd.to_datetime(td, format=format, errors="raise")
# don't raise an expection in case no format is given
result = pd.to_datetime(td, errors="raise")
tm.assert_series_equal(result, expected)
def test_empty_string_datetime_coerce__unit():
# GH13044
# coerce empty string to pd.NaT
result = pd.to_datetime([1, ""], unit="s", errors="coerce")
expected = DatetimeIndex(["1970-01-01 00:00:01", "NaT"], dtype="datetime64[ns]")
tm.assert_index_equal(expected, result)
# verify that no exception is raised even when errors='raise' is set
result = pd.to_datetime([1, ""], unit="s", errors="raise")
tm.assert_index_equal(expected, result)
|
bsd-3-clause
|
chenyyx/scikit-learn-doc-zh
|
examples/en/model_selection/plot_randomized_search.py
|
47
|
3287
|
"""
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from scipy.stats import randint as sp_randint
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(2, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.cv_results_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.cv_results_['params'])))
report(grid_search.cv_results_)
|
gpl-3.0
|
wkerzendorf/wsynphot
|
wsynphot/io/get_filter_data.py
|
1
|
3764
|
import numpy as np
import pandas as pd
import requests
import io
from astropy import units as u
from astropy.io.votable import parse_single_table
# The VOTables fetched from SVO contain only single table element, thus parse_single_table
FLOAT_MAX = np.finfo(np.float64).max
SVO_MAIN_URL = 'http://svo2.cab.inta-csic.es/theory/fps/fps.php'
def data_from_svo(query, error_msg='No data found for requested query'):
"""Get data in response to the query send to SVO FPS
Parameters
----------
query : dict
Used to create a HTTP query string i.e. send to SVO FPS to get data.
In dictionary, specify keys as search parameters (str) and
values as required. List of search parameters can be found at
http://svo2.cab.inta-csic.es/theory/fps/fps.php?FORMAT=metadata
error_msg : str, optional
Error message to be shown in case no table element found in the
responded VOTable. Use this to make error message verbose in context
of the query made (default is 'No data found for requested query')
Returns
-------
astropy.io.votable.tree.Table object
Table element of the VOTable fetched from SVO (in response to query)
"""
response = requests.get(SVO_MAIN_URL, params=query)
response.raise_for_status()
votable = io.BytesIO(response.content)
try:
return parse_single_table(votable)
except IndexError:
# If no table element found in VOTable
raise ValueError(error_msg)
def get_filter_index(wavelength_eff_min=0, wavelength_eff_max=FLOAT_MAX):
"""Get master list (index) of all filters at SVO
Optional parameters can be given to get filters data for specified
Wavelength Eff. range from SVO
Parameters
----------
wavelength_eff_min : float, optional
Minimum value of Wavelength Eff. (default is 0)
wavelength_eff_max : float, optional
Maximum value of Wavelength Eff. (default is a very large no.
FLOAT_MAX - maximum value of np.float64)
Returns
-------
astropy.io.votable.tree.Table object
Table element of the VOTable fetched from SVO (in response to query)
"""
wavelength_eff_min = u.Quantity(wavelength_eff_min, u.angstrom)
wavelength_eff_max = u.Quantity(wavelength_eff_max, u.angstrom)
query = {'WavelengthEff_min': wavelength_eff_min.value,
'WavelengthEff_max': wavelength_eff_max.value}
error_msg = 'No filter found for requested Wavelength Eff. range'
return data_from_svo(query, error_msg)
def get_transmission_data(filter_id):
"""Get transmission data for the requested Filter ID from SVO
Parameters
----------
filter_id : str
Filter ID in the format SVO specifies it: 'facilty/instrument.filter'
Returns
-------
astropy.io.votable.tree.Table object
Table element of the VOTable fetched from SVO (in response to query)
"""
query = {'ID': filter_id}
error_msg = 'No filter found for requested Filter ID'
return data_from_svo(query, error_msg)
def get_filter_list(facility, instrument=None):
"""Get filters data for requested facilty and instrument from SVO
Parameters
----------
facility : str
Facilty for filters
instrument : str, optional
Instrument for filters (default is None).
Leave empty if there are no instruments for specified facilty
Returns
-------
astropy.io.votable.tree.Table object
Table element of the VOTable fetched from SVO (in response to query)
"""
query = {'Facility': facility,
'Instrument': instrument}
error_msg = 'No filter found for requested Facilty (and Instrument)'
return data_from_svo(query, error_msg)
|
bsd-3-clause
|
apache/arrow
|
python/pyarrow/tests/test_flight.py
|
3
|
68243
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ast
import base64
import itertools
import os
import signal
import struct
import tempfile
import threading
import time
import traceback
import numpy as np
import pytest
import pyarrow as pa
from pyarrow.lib import tobytes
from pyarrow.util import pathlib, find_free_port
from pyarrow.tests import util
try:
from pyarrow import flight
from pyarrow.flight import (
FlightClient, FlightServerBase,
ServerAuthHandler, ClientAuthHandler,
ServerMiddleware, ServerMiddlewareFactory,
ClientMiddleware, ClientMiddlewareFactory,
)
except ImportError:
flight = None
FlightClient, FlightServerBase = object, object
ServerAuthHandler, ClientAuthHandler = object, object
ServerMiddleware, ServerMiddlewareFactory = object, object
ClientMiddleware, ClientMiddlewareFactory = object, object
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not flight'
pytestmark = pytest.mark.flight
def test_import():
# So we see the ImportError somewhere
import pyarrow.flight # noqa
def resource_root():
"""Get the path to the test resources directory."""
if not os.environ.get("ARROW_TEST_DATA"):
raise RuntimeError("Test resources not found; set "
"ARROW_TEST_DATA to <repo root>/testing/data")
return pathlib.Path(os.environ["ARROW_TEST_DATA"]) / "flight"
def read_flight_resource(path):
"""Get the contents of a test resource file."""
root = resource_root()
if not root:
return None
try:
with (root / path).open("rb") as f:
return f.read()
except FileNotFoundError:
raise RuntimeError(
"Test resource {} not found; did you initialize the "
"test resource submodule?\n{}".format(root / path,
traceback.format_exc()))
def example_tls_certs():
"""Get the paths to test TLS certificates."""
return {
"root_cert": read_flight_resource("root-ca.pem"),
"certificates": [
flight.CertKeyPair(
cert=read_flight_resource("cert0.pem"),
key=read_flight_resource("cert0.key"),
),
flight.CertKeyPair(
cert=read_flight_resource("cert1.pem"),
key=read_flight_resource("cert1.key"),
),
]
}
def simple_ints_table():
data = [
pa.array([-10, -5, 0, 5, 10])
]
return pa.Table.from_arrays(data, names=['some_ints'])
def simple_dicts_table():
dict_values = pa.array(["foo", "baz", "quux"], type=pa.utf8())
data = [
pa.chunked_array([
pa.DictionaryArray.from_arrays([1, 0, None], dict_values),
pa.DictionaryArray.from_arrays([2, 1], dict_values)
])
]
return pa.Table.from_arrays(data, names=['some_dicts'])
class ConstantFlightServer(FlightServerBase):
"""A Flight server that always returns the same data.
See ARROW-4796: this server implementation will segfault if Flight
does not properly hold a reference to the Table object.
"""
CRITERIA = b"the expected criteria"
def __init__(self, location=None, options=None, **kwargs):
super().__init__(location, **kwargs)
# Ticket -> Table
self.table_factories = {
b'ints': simple_ints_table,
b'dicts': simple_dicts_table,
}
self.options = options
def list_flights(self, context, criteria):
if criteria == self.CRITERIA:
yield flight.FlightInfo(
pa.schema([]),
flight.FlightDescriptor.for_path('/foo'),
[],
-1, -1
)
def do_get(self, context, ticket):
# Return a fresh table, so that Flight is the only one keeping a
# reference.
table = self.table_factories[ticket.ticket]()
return flight.RecordBatchStream(table, options=self.options)
class MetadataFlightServer(FlightServerBase):
"""A Flight server that numbers incoming/outgoing data."""
def __init__(self, options=None, **kwargs):
super().__init__(**kwargs)
self.options = options
def do_get(self, context, ticket):
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
return flight.GeneratorStream(
table.schema,
self.number_batches(table),
options=self.options)
def do_put(self, context, descriptor, reader, writer):
counter = 0
expected_data = [-10, -5, 0, 5, 10]
while True:
try:
batch, buf = reader.read_chunk()
assert batch.equals(pa.RecordBatch.from_arrays(
[pa.array([expected_data[counter]])],
['a']
))
assert buf is not None
client_counter, = struct.unpack('<i', buf.to_pybytes())
assert counter == client_counter
writer.write(struct.pack('<i', counter))
counter += 1
except StopIteration:
return
@staticmethod
def number_batches(table):
for idx, batch in enumerate(table.to_batches()):
buf = struct.pack('<i', idx)
yield batch, buf
class EchoFlightServer(FlightServerBase):
"""A Flight server that returns the last data uploaded."""
def __init__(self, location=None, expected_schema=None, **kwargs):
super().__init__(location, **kwargs)
self.last_message = None
self.expected_schema = expected_schema
def do_get(self, context, ticket):
return flight.RecordBatchStream(self.last_message)
def do_put(self, context, descriptor, reader, writer):
if self.expected_schema:
assert self.expected_schema == reader.schema
self.last_message = reader.read_all()
class EchoStreamFlightServer(EchoFlightServer):
"""An echo server that streams individual record batches."""
def do_get(self, context, ticket):
return flight.GeneratorStream(
self.last_message.schema,
self.last_message.to_batches(max_chunksize=1024))
def list_actions(self, context):
return []
def do_action(self, context, action):
if action.type == "who-am-i":
return [context.peer_identity(), context.peer().encode("utf-8")]
raise NotImplementedError
class GetInfoFlightServer(FlightServerBase):
"""A Flight server that tests GetFlightInfo."""
def get_flight_info(self, context, descriptor):
return flight.FlightInfo(
pa.schema([('a', pa.int32())]),
descriptor,
[
flight.FlightEndpoint(b'', ['grpc://test']),
flight.FlightEndpoint(
b'',
[flight.Location.for_grpc_tcp('localhost', 5005)],
),
],
-1,
-1,
)
def get_schema(self, context, descriptor):
info = self.get_flight_info(context, descriptor)
return flight.SchemaResult(info.schema)
class ListActionsFlightServer(FlightServerBase):
"""A Flight server that tests ListActions."""
@classmethod
def expected_actions(cls):
return [
("action-1", "description"),
("action-2", ""),
flight.ActionType("action-3", "more detail"),
]
def list_actions(self, context):
yield from self.expected_actions()
class ListActionsErrorFlightServer(FlightServerBase):
"""A Flight server that tests ListActions."""
def list_actions(self, context):
yield ("action-1", "")
yield "foo"
class CheckTicketFlightServer(FlightServerBase):
"""A Flight server that compares the given ticket to an expected value."""
def __init__(self, expected_ticket, location=None, **kwargs):
super().__init__(location, **kwargs)
self.expected_ticket = expected_ticket
def do_get(self, context, ticket):
assert self.expected_ticket == ticket.ticket
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
return flight.RecordBatchStream(table)
def do_put(self, context, descriptor, reader):
self.last_message = reader.read_all()
class InvalidStreamFlightServer(FlightServerBase):
"""A Flight server that tries to return messages with differing schemas."""
schema = pa.schema([('a', pa.int32())])
def do_get(self, context, ticket):
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
data2 = [pa.array([-10.0, -5.0, 0.0, 5.0, 10.0], type=pa.float64())]
assert data1.type != data2.type
table1 = pa.Table.from_arrays(data1, names=['a'])
table2 = pa.Table.from_arrays(data2, names=['a'])
assert table1.schema == self.schema
return flight.GeneratorStream(self.schema, [table1, table2])
class NeverSendsDataFlightServer(FlightServerBase):
"""A Flight server that never actually yields data."""
schema = pa.schema([('a', pa.int32())])
def do_get(self, context, ticket):
if ticket.ticket == b'yield_data':
# Check that the server handler will ignore empty tables
# up to a certain extent
data = [
self.schema.empty_table(),
self.schema.empty_table(),
pa.RecordBatch.from_arrays([range(5)], schema=self.schema),
]
return flight.GeneratorStream(self.schema, data)
return flight.GeneratorStream(
self.schema, itertools.repeat(self.schema.empty_table()))
class SlowFlightServer(FlightServerBase):
"""A Flight server that delays its responses to test timeouts."""
def do_get(self, context, ticket):
return flight.GeneratorStream(pa.schema([('a', pa.int32())]),
self.slow_stream())
def do_action(self, context, action):
time.sleep(0.5)
return []
@staticmethod
def slow_stream():
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
yield pa.Table.from_arrays(data1, names=['a'])
# The second message should never get sent; the client should
# cancel before we send this
time.sleep(10)
yield pa.Table.from_arrays(data1, names=['a'])
class ErrorFlightServer(FlightServerBase):
"""A Flight server that uses all the Flight-specific errors."""
def do_action(self, context, action):
if action.type == "internal":
raise flight.FlightInternalError("foo")
elif action.type == "timedout":
raise flight.FlightTimedOutError("foo")
elif action.type == "cancel":
raise flight.FlightCancelledError("foo")
elif action.type == "unauthenticated":
raise flight.FlightUnauthenticatedError("foo")
elif action.type == "unauthorized":
raise flight.FlightUnauthorizedError("foo")
elif action.type == "protobuf":
err_msg = b'this is an error message'
raise flight.FlightUnauthorizedError("foo", err_msg)
raise NotImplementedError
def list_flights(self, context, criteria):
yield flight.FlightInfo(
pa.schema([]),
flight.FlightDescriptor.for_path('/foo'),
[],
-1, -1
)
raise flight.FlightInternalError("foo")
class ExchangeFlightServer(FlightServerBase):
"""A server for testing DoExchange."""
def __init__(self, options=None, **kwargs):
super().__init__(**kwargs)
self.options = options
def do_exchange(self, context, descriptor, reader, writer):
if descriptor.descriptor_type != flight.DescriptorType.CMD:
raise pa.ArrowInvalid("Must provide a command descriptor")
elif descriptor.command == b"echo":
return self.exchange_echo(context, reader, writer)
elif descriptor.command == b"get":
return self.exchange_do_get(context, reader, writer)
elif descriptor.command == b"put":
return self.exchange_do_put(context, reader, writer)
elif descriptor.command == b"transform":
return self.exchange_transform(context, reader, writer)
else:
raise pa.ArrowInvalid(
"Unknown command: {}".format(descriptor.command))
def exchange_do_get(self, context, reader, writer):
"""Emulate DoGet with DoExchange."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
writer.begin(data.schema)
writer.write_table(data)
def exchange_do_put(self, context, reader, writer):
"""Emulate DoPut with DoExchange."""
num_batches = 0
for chunk in reader:
if not chunk.data:
raise pa.ArrowInvalid("All chunks must have data.")
num_batches += 1
writer.write_metadata(str(num_batches).encode("utf-8"))
def exchange_echo(self, context, reader, writer):
"""Run a simple echo server."""
started = False
for chunk in reader:
if not started and chunk.data:
writer.begin(chunk.data.schema, options=self.options)
started = True
if chunk.app_metadata and chunk.data:
writer.write_with_metadata(chunk.data, chunk.app_metadata)
elif chunk.app_metadata:
writer.write_metadata(chunk.app_metadata)
elif chunk.data:
writer.write_batch(chunk.data)
else:
assert False, "Should not happen"
def exchange_transform(self, context, reader, writer):
"""Sum rows in an uploaded table."""
for field in reader.schema:
if not pa.types.is_integer(field.type):
raise pa.ArrowInvalid("Invalid field: " + repr(field))
table = reader.read_all()
sums = [0] * table.num_rows
for column in table:
for row, value in enumerate(column):
sums[row] += value.as_py()
result = pa.Table.from_arrays([pa.array(sums)], names=["sum"])
writer.begin(result.schema)
writer.write_table(result)
class HttpBasicServerAuthHandler(ServerAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, creds):
super().__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
buf = incoming.read()
auth = flight.BasicAuth.deserialize(buf)
if auth.username not in self.creds:
raise flight.FlightUnauthenticatedError("unknown user")
if self.creds[auth.username] != auth.password:
raise flight.FlightUnauthenticatedError("wrong password")
outgoing.write(tobytes(auth.username))
def is_valid(self, token):
if not token:
raise flight.FlightUnauthenticatedError("token not provided")
if token not in self.creds:
raise flight.FlightUnauthenticatedError("unknown user")
return token
class HttpBasicClientAuthHandler(ClientAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, username, password):
super().__init__()
self.basic_auth = flight.BasicAuth(username, password)
self.token = None
def authenticate(self, outgoing, incoming):
auth = self.basic_auth.serialize()
outgoing.write(auth)
self.token = incoming.read()
def get_token(self):
return self.token
class TokenServerAuthHandler(ServerAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, creds):
super().__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
username = incoming.read()
password = incoming.read()
if username in self.creds and self.creds[username] == password:
outgoing.write(base64.b64encode(b'secret:' + username))
else:
raise flight.FlightUnauthenticatedError(
"invalid username/password")
def is_valid(self, token):
token = base64.b64decode(token)
if not token.startswith(b'secret:'):
raise flight.FlightUnauthenticatedError("invalid token")
return token[7:]
class TokenClientAuthHandler(ClientAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, username, password):
super().__init__()
self.username = username
self.password = password
self.token = b''
def authenticate(self, outgoing, incoming):
outgoing.write(self.username)
outgoing.write(self.password)
self.token = incoming.read()
def get_token(self):
return self.token
class NoopAuthHandler(ServerAuthHandler):
"""A no-op auth handler."""
def authenticate(self, outgoing, incoming):
"""Do nothing."""
def is_valid(self, token):
"""
Returning an empty string.
Returning None causes Type error.
"""
return ""
def case_insensitive_header_lookup(headers, lookup_key):
"""Lookup the value of given key in the given headers.
The key lookup is case insensitive.
"""
for key in headers:
if key.lower() == lookup_key.lower():
return headers.get(key)
class ClientHeaderAuthMiddlewareFactory(ClientMiddlewareFactory):
"""ClientMiddlewareFactory that creates ClientAuthHeaderMiddleware."""
def __init__(self):
self.call_credential = []
def start_call(self, info):
return ClientHeaderAuthMiddleware(self)
def set_call_credential(self, call_credential):
self.call_credential = call_credential
class ClientHeaderAuthMiddleware(ClientMiddleware):
"""
ClientMiddleware that extracts the authorization header
from the server.
This is an example of a ClientMiddleware that can extract
the bearer token authorization header from a HTTP header
authentication enabled server.
Parameters
----------
factory : ClientHeaderAuthMiddlewareFactory
This factory is used to set call credentials if an
authorization header is found in the headers from the server.
"""
def __init__(self, factory):
self.factory = factory
def received_headers(self, headers):
auth_header = case_insensitive_header_lookup(headers, 'Authorization')
self.factory.set_call_credential([
b'authorization',
auth_header[0].encode("utf-8")])
class HeaderAuthServerMiddlewareFactory(ServerMiddlewareFactory):
"""Validates incoming username and password."""
def start_call(self, info, headers):
auth_header = case_insensitive_header_lookup(
headers,
'Authorization'
)
values = auth_header[0].split(' ')
token = ''
error_message = 'Invalid credentials'
if values[0] == 'Basic':
decoded = base64.b64decode(values[1])
pair = decoded.decode("utf-8").split(':')
if not (pair[0] == 'test' and pair[1] == 'password'):
raise flight.FlightUnauthenticatedError(error_message)
token = 'token1234'
elif values[0] == 'Bearer':
token = values[1]
if not token == 'token1234':
raise flight.FlightUnauthenticatedError(error_message)
else:
raise flight.FlightUnauthenticatedError(error_message)
return HeaderAuthServerMiddleware(token)
class HeaderAuthServerMiddleware(ServerMiddleware):
"""A ServerMiddleware that transports incoming username and passowrd."""
def __init__(self, token):
self.token = token
def sending_headers(self):
return {'authorization': 'Bearer ' + self.token}
class HeaderAuthFlightServer(FlightServerBase):
"""A Flight server that tests with basic token authentication. """
def do_action(self, context, action):
middleware = context.get_middleware("auth")
if middleware:
auth_header = case_insensitive_header_lookup(
middleware.sending_headers(), 'Authorization')
values = auth_header.split(' ')
return [values[1].encode("utf-8")]
raise flight.FlightUnauthenticatedError(
'No token auth middleware found.')
class ArbitraryHeadersServerMiddlewareFactory(ServerMiddlewareFactory):
"""A ServerMiddlewareFactory that transports arbitrary headers."""
def start_call(self, info, headers):
return ArbitraryHeadersServerMiddleware(headers)
class ArbitraryHeadersServerMiddleware(ServerMiddleware):
"""A ServerMiddleware that transports arbitrary headers."""
def __init__(self, incoming):
self.incoming = incoming
def sending_headers(self):
return self.incoming
class ArbitraryHeadersFlightServer(FlightServerBase):
"""A Flight server that tests multiple arbitrary headers."""
def do_action(self, context, action):
middleware = context.get_middleware("arbitrary-headers")
if middleware:
headers = middleware.sending_headers()
header_1 = case_insensitive_header_lookup(
headers,
'test-header-1'
)
header_2 = case_insensitive_header_lookup(
headers,
'test-header-2'
)
value1 = header_1[0].encode("utf-8")
value2 = header_2[0].encode("utf-8")
return [value1, value2]
raise flight.FlightServerError("No headers middleware found")
class HeaderServerMiddleware(ServerMiddleware):
"""Expose a per-call value to the RPC method body."""
def __init__(self, special_value):
self.special_value = special_value
class HeaderServerMiddlewareFactory(ServerMiddlewareFactory):
"""Expose a per-call hard-coded value to the RPC method body."""
def start_call(self, info, headers):
return HeaderServerMiddleware("right value")
class HeaderFlightServer(FlightServerBase):
"""Echo back the per-call hard-coded value."""
def do_action(self, context, action):
middleware = context.get_middleware("test")
if middleware:
return [middleware.special_value.encode()]
return [b""]
class MultiHeaderFlightServer(FlightServerBase):
"""Test sending/receiving multiple (binary-valued) headers."""
def do_action(self, context, action):
middleware = context.get_middleware("test")
headers = repr(middleware.client_headers).encode("utf-8")
return [headers]
class SelectiveAuthServerMiddlewareFactory(ServerMiddlewareFactory):
"""Deny access to certain methods based on a header."""
def start_call(self, info, headers):
if info.method == flight.FlightMethod.LIST_ACTIONS:
# No auth needed
return
token = headers.get("x-auth-token")
if not token:
raise flight.FlightUnauthenticatedError("No token")
token = token[0]
if token != "password":
raise flight.FlightUnauthenticatedError("Invalid token")
return HeaderServerMiddleware(token)
class SelectiveAuthClientMiddlewareFactory(ClientMiddlewareFactory):
def start_call(self, info):
return SelectiveAuthClientMiddleware()
class SelectiveAuthClientMiddleware(ClientMiddleware):
def sending_headers(self):
return {
"x-auth-token": "password",
}
class RecordingServerMiddlewareFactory(ServerMiddlewareFactory):
"""Record what methods were called."""
def __init__(self):
super().__init__()
self.methods = []
def start_call(self, info, headers):
self.methods.append(info.method)
return None
class RecordingClientMiddlewareFactory(ClientMiddlewareFactory):
"""Record what methods were called."""
def __init__(self):
super().__init__()
self.methods = []
def start_call(self, info):
self.methods.append(info.method)
return None
class MultiHeaderClientMiddlewareFactory(ClientMiddlewareFactory):
"""Test sending/receiving multiple (binary-valued) headers."""
def __init__(self):
# Read in test_middleware_multi_header below.
# The middleware instance will update this value.
self.last_headers = {}
def start_call(self, info):
return MultiHeaderClientMiddleware(self)
class MultiHeaderClientMiddleware(ClientMiddleware):
"""Test sending/receiving multiple (binary-valued) headers."""
EXPECTED = {
"x-text": ["foo", "bar"],
"x-binary-bin": [b"\x00", b"\x01"],
}
def __init__(self, factory):
self.factory = factory
def sending_headers(self):
return self.EXPECTED
def received_headers(self, headers):
# Let the test code know what the last set of headers we
# received were.
self.factory.last_headers = headers
class MultiHeaderServerMiddlewareFactory(ServerMiddlewareFactory):
"""Test sending/receiving multiple (binary-valued) headers."""
def start_call(self, info, headers):
return MultiHeaderServerMiddleware(headers)
class MultiHeaderServerMiddleware(ServerMiddleware):
"""Test sending/receiving multiple (binary-valued) headers."""
def __init__(self, client_headers):
self.client_headers = client_headers
def sending_headers(self):
return MultiHeaderClientMiddleware.EXPECTED
def test_flight_server_location_argument():
locations = [
None,
'grpc://localhost:0',
('localhost', find_free_port()),
]
for location in locations:
with FlightServerBase(location) as server:
assert isinstance(server, FlightServerBase)
def test_server_exit_reraises_exception():
with pytest.raises(ValueError):
with FlightServerBase():
raise ValueError()
@pytest.mark.slow
def test_client_wait_for_available():
location = ('localhost', find_free_port())
server = None
def serve():
global server
time.sleep(0.5)
server = FlightServerBase(location)
server.serve()
client = FlightClient(location)
thread = threading.Thread(target=serve, daemon=True)
thread.start()
started = time.time()
client.wait_for_available(timeout=5)
elapsed = time.time() - started
assert elapsed >= 0.5
def test_flight_list_flights():
"""Try a simple list_flights call."""
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
assert list(client.list_flights()) == []
flights = client.list_flights(ConstantFlightServer.CRITERIA)
assert len(list(flights)) == 1
def test_flight_do_get_ints():
"""Try a simple do_get call."""
table = simple_ints_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
options = pa.ipc.IpcWriteOptions(
metadata_version=pa.ipc.MetadataVersion.V4)
with ConstantFlightServer(options=options) as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
# Also test via RecordBatchReader interface
data = client.do_get(flight.Ticket(b'ints')).to_reader().read_all()
assert data.equals(table)
with pytest.raises(flight.FlightServerError,
match="expected IpcWriteOptions, got <class 'int'>"):
with ConstantFlightServer(options=42) as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_all()
@pytest.mark.pandas
def test_do_get_ints_pandas():
"""Try a simple do_get call."""
table = simple_ints_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_pandas()
assert list(data['some_ints']) == table.column(0).to_pylist()
def test_flight_do_get_dicts():
table = simple_dicts_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'dicts')).read_all()
assert data.equals(table)
def test_flight_do_get_ticket():
"""Make sure Tickets get passed to the server."""
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
with CheckTicketFlightServer(expected_ticket=b'the-ticket') as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'the-ticket')).read_all()
assert data.equals(table)
def test_flight_get_info():
"""Make sure FlightEndpoint accepts string and object URIs."""
with GetInfoFlightServer() as server:
client = FlightClient(('localhost', server.port))
info = client.get_flight_info(flight.FlightDescriptor.for_command(b''))
assert info.total_records == -1
assert info.total_bytes == -1
assert info.schema == pa.schema([('a', pa.int32())])
assert len(info.endpoints) == 2
assert len(info.endpoints[0].locations) == 1
assert info.endpoints[0].locations[0] == flight.Location('grpc://test')
assert info.endpoints[1].locations[0] == \
flight.Location.for_grpc_tcp('localhost', 5005)
def test_flight_get_schema():
"""Make sure GetSchema returns correct schema."""
with GetInfoFlightServer() as server:
client = FlightClient(('localhost', server.port))
info = client.get_schema(flight.FlightDescriptor.for_command(b''))
assert info.schema == pa.schema([('a', pa.int32())])
def test_list_actions():
"""Make sure the return type of ListActions is validated."""
# ARROW-6392
with ListActionsErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(
flight.FlightServerError,
match=("Results of list_actions must be "
"ActionType or tuple")
):
list(client.list_actions())
with ListActionsFlightServer() as server:
client = FlightClient(('localhost', server.port))
assert list(client.list_actions()) == \
ListActionsFlightServer.expected_actions()
class ConvenienceServer(FlightServerBase):
"""
Server for testing various implementation conveniences (auto-boxing, etc.)
"""
@property
def simple_action_results(self):
return [b'foo', b'bar', b'baz']
def do_action(self, context, action):
if action.type == 'simple-action':
return self.simple_action_results
elif action.type == 'echo':
return [action.body]
elif action.type == 'bad-action':
return ['foo']
elif action.type == 'arrow-exception':
raise pa.ArrowMemoryError()
def test_do_action_result_convenience():
with ConvenienceServer() as server:
client = FlightClient(('localhost', server.port))
# do_action as action type without body
results = [x.body for x in client.do_action('simple-action')]
assert results == server.simple_action_results
# do_action with tuple of type and body
body = b'the-body'
results = [x.body for x in client.do_action(('echo', body))]
assert results == [body]
def test_nicer_server_exceptions():
with ConvenienceServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightServerError,
match="a bytes-like object is required"):
list(client.do_action('bad-action'))
# While Flight/C++ sends across the original status code, it
# doesn't get mapped to the equivalent code here, since we
# want to be able to distinguish between client- and server-
# side errors.
with pytest.raises(flight.FlightServerError,
match="ArrowMemoryError"):
list(client.do_action('arrow-exception'))
def test_get_port():
"""Make sure port() works."""
server = GetInfoFlightServer("grpc://localhost:0")
try:
assert server.port > 0
finally:
server.shutdown()
@pytest.mark.skipif(os.name == 'nt',
reason="Unix sockets can't be tested on Windows")
def test_flight_domain_socket():
"""Try a simple do_get call over a Unix domain socket."""
with tempfile.NamedTemporaryFile() as sock:
sock.close()
location = flight.Location.for_grpc_unix(sock.name)
with ConstantFlightServer(location=location):
client = FlightClient(location)
reader = client.do_get(flight.Ticket(b'ints'))
table = simple_ints_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
reader = client.do_get(flight.Ticket(b'dicts'))
table = simple_dicts_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
@pytest.mark.slow
def test_flight_large_message():
"""Try sending/receiving a large message via Flight.
See ARROW-4421: by default, gRPC won't allow us to send messages >
4MiB in size.
"""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024 * 1024))
], names=['a'])
with EchoFlightServer(expected_schema=data.schema) as server:
client = FlightClient(('localhost', server.port))
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
# Write a single giant chunk
writer.write_table(data, 10 * 1024 * 1024)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_generator_stream():
"""Try downloading a flight of RecordBatches in a GeneratorStream."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=['a'])
with EchoStreamFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
writer.write_table(data)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_invalid_generator_stream():
"""Try streaming data with mismatched schemas."""
with InvalidStreamFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(pa.ArrowException):
client.do_get(flight.Ticket(b'')).read_all()
def test_timeout_fires():
"""Make sure timeouts fire on slow requests."""
# Do this in a separate thread so that if it fails, we don't hang
# the entire test process
with SlowFlightServer() as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("", b"")
options = flight.FlightCallOptions(timeout=0.2)
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(flight.FlightTimedOutError):
list(client.do_action(action, options=options))
def test_timeout_passes():
"""Make sure timeouts do not fire on fast requests."""
with ConstantFlightServer() as server:
client = FlightClient(('localhost', server.port))
options = flight.FlightCallOptions(timeout=5.0)
client.do_get(flight.Ticket(b'ints'), options=options).read_all()
basic_auth_handler = HttpBasicServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
token_auth_handler = TokenServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
@pytest.mark.slow
def test_http_basic_unauth():
"""Test that auth fails when not authenticated."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
with pytest.raises(flight.FlightUnauthenticatedError,
match=".*unauthenticated.*"):
list(client.do_action(action))
@pytest.mark.skipif(os.name == 'nt',
reason="ARROW-10013: gRPC on Windows corrupts peer()")
def test_http_basic_auth():
"""Test a Python implementation of HTTP basic authentication."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
client.authenticate(HttpBasicClientAuthHandler('test', 'p4ssw0rd'))
results = client.do_action(action)
identity = next(results)
assert identity.body.to_pybytes() == b'test'
peer_address = next(results)
assert peer_address.body.to_pybytes() != b''
def test_http_basic_auth_invalid_password():
"""Test that auth fails with the wrong password."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
with pytest.raises(flight.FlightUnauthenticatedError,
match=".*wrong password.*"):
client.authenticate(HttpBasicClientAuthHandler('test', 'wrong'))
next(client.do_action(action))
def test_token_auth():
"""Test an auth mechanism that uses a handshake."""
with EchoStreamFlightServer(auth_handler=token_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
client.authenticate(TokenClientAuthHandler('test', 'p4ssw0rd'))
identity = next(client.do_action(action))
assert identity.body.to_pybytes() == b'test'
def test_token_auth_invalid():
"""Test an auth mechanism that uses a handshake."""
with EchoStreamFlightServer(auth_handler=token_auth_handler) as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightUnauthenticatedError):
client.authenticate(TokenClientAuthHandler('test', 'wrong'))
header_auth_server_middleware_factory = HeaderAuthServerMiddlewareFactory()
no_op_auth_handler = NoopAuthHandler()
def test_authenticate_basic_token():
"""Test authenticate_basic_token with bearer token and auth headers."""
with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={
"auth": HeaderAuthServerMiddlewareFactory()
}) as server:
client = FlightClient(('localhost', server.port))
token_pair = client.authenticate_basic_token(b'test', b'password')
assert token_pair[0] == b'authorization'
assert token_pair[1] == b'Bearer token1234'
def test_authenticate_basic_token_invalid_password():
"""Test authenticate_basic_token with an invalid password."""
with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={
"auth": HeaderAuthServerMiddlewareFactory()
}) as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightUnauthenticatedError):
client.authenticate_basic_token(b'test', b'badpassword')
def test_authenticate_basic_token_and_action():
"""Test authenticate_basic_token and doAction after authentication."""
with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={
"auth": HeaderAuthServerMiddlewareFactory()
}) as server:
client = FlightClient(('localhost', server.port))
token_pair = client.authenticate_basic_token(b'test', b'password')
assert token_pair[0] == b'authorization'
assert token_pair[1] == b'Bearer token1234'
options = flight.FlightCallOptions(headers=[token_pair])
result = list(client.do_action(
action=flight.Action('test-action', b''), options=options))
assert result[0].body.to_pybytes() == b'token1234'
def test_authenticate_basic_token_with_client_middleware():
"""Test authenticate_basic_token with client middleware
to intercept authorization header returned by the
HTTP header auth enabled server.
"""
with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={
"auth": HeaderAuthServerMiddlewareFactory()
}) as server:
client_auth_middleware = ClientHeaderAuthMiddlewareFactory()
client = FlightClient(
('localhost', server.port),
middleware=[client_auth_middleware]
)
encoded_credentials = base64.b64encode(b'test:password')
options = flight.FlightCallOptions(headers=[
(b'authorization', b'Basic ' + encoded_credentials)
])
result = list(client.do_action(
action=flight.Action('test-action', b''), options=options))
assert result[0].body.to_pybytes() == b'token1234'
assert client_auth_middleware.call_credential[0] == b'authorization'
assert client_auth_middleware.call_credential[1] == \
b'Bearer ' + b'token1234'
result2 = list(client.do_action(
action=flight.Action('test-action', b''), options=options))
assert result2[0].body.to_pybytes() == b'token1234'
assert client_auth_middleware.call_credential[0] == b'authorization'
assert client_auth_middleware.call_credential[1] == \
b'Bearer ' + b'token1234'
def test_arbitrary_headers_in_flight_call_options():
"""Test passing multiple arbitrary headers to the middleware."""
with ArbitraryHeadersFlightServer(
auth_handler=no_op_auth_handler,
middleware={
"auth": HeaderAuthServerMiddlewareFactory(),
"arbitrary-headers": ArbitraryHeadersServerMiddlewareFactory()
}) as server:
client = FlightClient(('localhost', server.port))
token_pair = client.authenticate_basic_token(b'test', b'password')
assert token_pair[0] == b'authorization'
assert token_pair[1] == b'Bearer token1234'
options = flight.FlightCallOptions(headers=[
token_pair,
(b'test-header-1', b'value1'),
(b'test-header-2', b'value2')
])
result = list(client.do_action(flight.Action(
"test-action", b""), options=options))
assert result[0].body.to_pybytes() == b'value1'
assert result[1].body.to_pybytes() == b'value2'
def test_location_invalid():
"""Test constructing invalid URIs."""
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
flight.connect("%")
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
ConstantFlightServer("%")
def test_location_unknown_scheme():
"""Test creating locations for unknown schemes."""
assert flight.Location("s3://foo").uri == b"s3://foo"
assert flight.Location("https://example.com/bar.parquet").uri == \
b"https://example.com/bar.parquet"
@pytest.mark.slow
@pytest.mark.requires_testing_data
def test_tls_fails():
"""Make sure clients cannot connect when cert verification fails."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
# Ensure client doesn't connect when certificate verification
# fails (this is a slow test since gRPC does retry a few times)
client = FlightClient("grpc+tls://localhost:" + str(s.port))
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints')).read_all()
@pytest.mark.requires_testing_data
def test_tls_do_get():
"""Try a simple do_get call over TLS."""
table = simple_ints_table()
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
client = FlightClient(('localhost', s.port),
tls_root_certs=certs["root_cert"])
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.requires_testing_data
def test_tls_disable_server_verification():
"""Try a simple do_get call over TLS with server verification disabled."""
table = simple_ints_table()
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
try:
client = FlightClient(('localhost', s.port),
disable_server_verification=True)
except NotImplementedError:
pytest.skip('disable_server_verification feature is not available')
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.requires_testing_data
def test_tls_override_hostname():
"""Check that incorrectly overriding the hostname fails."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
client = flight.connect(('localhost', s.port),
tls_root_certs=certs["root_cert"],
override_hostname="fakehostname")
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints'))
def test_flight_do_get_metadata():
"""Try a simple do_get call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
batches = []
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b''))
idx = 0
while True:
try:
batch, metadata = reader.read_chunk()
batches.append(batch)
server_idx, = struct.unpack('<i', metadata.to_pybytes())
assert idx == server_idx
idx += 1
except StopIteration:
break
data = pa.Table.from_batches(batches)
assert data.equals(table)
def test_flight_do_get_metadata_v4():
"""Try a simple do_get call with V4 metadata version."""
table = pa.Table.from_arrays(
[pa.array([-10, -5, 0, 5, 10])], names=['a'])
options = pa.ipc.IpcWriteOptions(
metadata_version=pa.ipc.MetadataVersion.V4)
with MetadataFlightServer(options=options) as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b''))
data = reader.read_all()
assert data.equals(table)
def test_flight_do_put_metadata():
"""Try a simple do_put call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
table.schema)
with writer:
for idx, batch in enumerate(table.to_batches(max_chunksize=1)):
metadata = struct.pack('<i', idx)
writer.write_with_metadata(batch, metadata)
buf = metadata_reader.read()
assert buf is not None
server_idx, = struct.unpack('<i', buf.to_pybytes())
assert idx == server_idx
def test_flight_do_put_limit():
"""Try a simple do_put call with a size limit."""
large_batch = pa.RecordBatch.from_arrays([
pa.array(np.ones(768, dtype=np.int64())),
], names=['a'])
with EchoFlightServer() as server:
client = FlightClient(('localhost', server.port),
write_size_limit_bytes=4096)
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
large_batch.schema)
with writer:
with pytest.raises(flight.FlightWriteSizeExceededError,
match="exceeded soft limit") as excinfo:
writer.write_batch(large_batch)
assert excinfo.value.limit == 4096
smaller_batches = [
large_batch.slice(0, 384),
large_batch.slice(384),
]
for batch in smaller_batches:
writer.write_batch(batch)
expected = pa.Table.from_batches([large_batch])
actual = client.do_get(flight.Ticket(b'')).read_all()
assert expected == actual
@pytest.mark.slow
def test_cancel_do_get():
"""Test canceling a DoGet operation on the client side."""
with ConstantFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b'ints'))
reader.cancel()
with pytest.raises(flight.FlightCancelledError, match=".*Cancel.*"):
reader.read_chunk()
@pytest.mark.slow
def test_cancel_do_get_threaded():
"""Test canceling a DoGet operation from another thread."""
with SlowFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b'ints'))
read_first_message = threading.Event()
stream_canceled = threading.Event()
result_lock = threading.Lock()
raised_proper_exception = threading.Event()
def block_read():
reader.read_chunk()
read_first_message.set()
stream_canceled.wait(timeout=5)
try:
reader.read_chunk()
except flight.FlightCancelledError:
with result_lock:
raised_proper_exception.set()
thread = threading.Thread(target=block_read, daemon=True)
thread.start()
read_first_message.wait(timeout=5)
reader.cancel()
stream_canceled.set()
thread.join(timeout=1)
with result_lock:
assert raised_proper_exception.is_set()
def test_roundtrip_types():
"""Make sure serializable types round-trip."""
ticket = flight.Ticket("foo")
assert ticket == flight.Ticket.deserialize(ticket.serialize())
desc = flight.FlightDescriptor.for_command("test")
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
desc = flight.FlightDescriptor.for_path("a", "b", "test.arrow")
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
info = flight.FlightInfo(
pa.schema([('a', pa.int32())]),
desc,
[
flight.FlightEndpoint(b'', ['grpc://test']),
flight.FlightEndpoint(
b'',
[flight.Location.for_grpc_tcp('localhost', 5005)],
),
],
-1,
-1,
)
info2 = flight.FlightInfo.deserialize(info.serialize())
assert info.schema == info2.schema
assert info.descriptor == info2.descriptor
assert info.total_bytes == info2.total_bytes
assert info.total_records == info2.total_records
assert info.endpoints == info2.endpoints
def test_roundtrip_errors():
"""Ensure that Flight errors propagate from server to client."""
with ErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightInternalError, match=".*foo.*"):
list(client.do_action(flight.Action("internal", b"")))
with pytest.raises(flight.FlightTimedOutError, match=".*foo.*"):
list(client.do_action(flight.Action("timedout", b"")))
with pytest.raises(flight.FlightCancelledError, match=".*foo.*"):
list(client.do_action(flight.Action("cancel", b"")))
with pytest.raises(flight.FlightUnauthenticatedError, match=".*foo.*"):
list(client.do_action(flight.Action("unauthenticated", b"")))
with pytest.raises(flight.FlightUnauthorizedError, match=".*foo.*"):
list(client.do_action(flight.Action("unauthorized", b"")))
with pytest.raises(flight.FlightInternalError, match=".*foo.*"):
list(client.list_flights())
def test_do_put_independent_read_write():
"""Ensure that separate threads can read/write on a DoPut."""
# ARROW-6063: previously this would cause gRPC to abort when the
# writer was closed (due to simultaneous reads), or would hang
# forever.
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
table.schema)
count = [0]
def _reader_thread():
while metadata_reader.read() is not None:
count[0] += 1
thread = threading.Thread(target=_reader_thread)
thread.start()
batches = table.to_batches(max_chunksize=1)
with writer:
for idx, batch in enumerate(batches):
metadata = struct.pack('<i', idx)
writer.write_with_metadata(batch, metadata)
# Causes the server to stop writing and end the call
writer.done_writing()
# Thus reader thread will break out of loop
thread.join()
# writer.close() won't segfault since reader thread has
# stopped
assert count[0] == len(batches)
def test_server_middleware_same_thread():
"""Ensure that server middleware run on the same thread as the RPC."""
with HeaderFlightServer(middleware={
"test": HeaderServerMiddlewareFactory(),
}) as server:
client = FlightClient(('localhost', server.port))
results = list(client.do_action(flight.Action(b"test", b"")))
assert len(results) == 1
value = results[0].body.to_pybytes()
assert b"right value" == value
def test_middleware_reject():
"""Test rejecting an RPC with server middleware."""
with HeaderFlightServer(middleware={
"test": SelectiveAuthServerMiddlewareFactory(),
}) as server:
client = FlightClient(('localhost', server.port))
# The middleware allows this through without auth.
with pytest.raises(pa.ArrowNotImplementedError):
list(client.list_actions())
# But not anything else.
with pytest.raises(flight.FlightUnauthenticatedError):
list(client.do_action(flight.Action(b"", b"")))
client = FlightClient(
('localhost', server.port),
middleware=[SelectiveAuthClientMiddlewareFactory()]
)
response = next(client.do_action(flight.Action(b"", b"")))
assert b"password" == response.body.to_pybytes()
def test_middleware_mapping():
"""Test that middleware records methods correctly."""
server_middleware = RecordingServerMiddlewareFactory()
client_middleware = RecordingClientMiddlewareFactory()
with FlightServerBase(middleware={"test": server_middleware}) as server:
client = FlightClient(
('localhost', server.port),
middleware=[client_middleware]
)
descriptor = flight.FlightDescriptor.for_command(b"")
with pytest.raises(NotImplementedError):
list(client.list_flights())
with pytest.raises(NotImplementedError):
client.get_flight_info(descriptor)
with pytest.raises(NotImplementedError):
client.get_schema(descriptor)
with pytest.raises(NotImplementedError):
client.do_get(flight.Ticket(b""))
with pytest.raises(NotImplementedError):
writer, _ = client.do_put(descriptor, pa.schema([]))
writer.close()
with pytest.raises(NotImplementedError):
list(client.do_action(flight.Action(b"", b"")))
with pytest.raises(NotImplementedError):
list(client.list_actions())
with pytest.raises(NotImplementedError):
writer, _ = client.do_exchange(descriptor)
writer.close()
expected = [
flight.FlightMethod.LIST_FLIGHTS,
flight.FlightMethod.GET_FLIGHT_INFO,
flight.FlightMethod.GET_SCHEMA,
flight.FlightMethod.DO_GET,
flight.FlightMethod.DO_PUT,
flight.FlightMethod.DO_ACTION,
flight.FlightMethod.LIST_ACTIONS,
flight.FlightMethod.DO_EXCHANGE,
]
assert server_middleware.methods == expected
assert client_middleware.methods == expected
def test_extra_info():
with ErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
try:
list(client.do_action(flight.Action("protobuf", b"")))
assert False
except flight.FlightUnauthorizedError as e:
assert e.extra_info is not None
ei = e.extra_info
assert ei == b'this is an error message'
@pytest.mark.requires_testing_data
def test_mtls():
"""Test mutual TLS (mTLS) with gRPC."""
certs = example_tls_certs()
table = simple_ints_table()
with ConstantFlightServer(
tls_certificates=[certs["certificates"][0]],
verify_client=True,
root_certificates=certs["root_cert"]) as s:
client = FlightClient(
('localhost', s.port),
tls_root_certs=certs["root_cert"],
cert_chain=certs["certificates"][0].cert,
private_key=certs["certificates"][0].key)
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
def test_doexchange_get():
"""Emulate DoGet with DoExchange."""
expected = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"get")
writer, reader = client.do_exchange(descriptor)
with writer:
table = reader.read_all()
assert expected == table
def test_doexchange_put():
"""Emulate DoPut with DoExchange."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
batches = data.to_batches(max_chunksize=512)
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"put")
writer, reader = client.do_exchange(descriptor)
with writer:
writer.begin(data.schema)
for batch in batches:
writer.write_batch(batch)
writer.done_writing()
chunk = reader.read_chunk()
assert chunk.data is None
expected_buf = str(len(batches)).encode("utf-8")
assert chunk.app_metadata == expected_buf
def test_doexchange_echo():
"""Try a DoExchange echo server."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
batches = data.to_batches(max_chunksize=512)
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"echo")
writer, reader = client.do_exchange(descriptor)
with writer:
# Read/write metadata before starting data.
for i in range(10):
buf = str(i).encode("utf-8")
writer.write_metadata(buf)
chunk = reader.read_chunk()
assert chunk.data is None
assert chunk.app_metadata == buf
# Now write data without metadata.
writer.begin(data.schema)
for batch in batches:
writer.write_batch(batch)
assert reader.schema == data.schema
chunk = reader.read_chunk()
assert chunk.data == batch
assert chunk.app_metadata is None
# And write data with metadata.
for i, batch in enumerate(batches):
buf = str(i).encode("utf-8")
writer.write_with_metadata(batch, buf)
chunk = reader.read_chunk()
assert chunk.data == batch
assert chunk.app_metadata == buf
def test_doexchange_echo_v4():
"""Try a DoExchange echo server using the V4 metadata version."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
batches = data.to_batches(max_chunksize=512)
options = pa.ipc.IpcWriteOptions(
metadata_version=pa.ipc.MetadataVersion.V4)
with ExchangeFlightServer(options=options) as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"echo")
writer, reader = client.do_exchange(descriptor)
with writer:
# Now write data without metadata.
writer.begin(data.schema, options=options)
for batch in batches:
writer.write_batch(batch)
assert reader.schema == data.schema
chunk = reader.read_chunk()
assert chunk.data == batch
assert chunk.app_metadata is None
def test_doexchange_transform():
"""Transform a table with a service."""
data = pa.Table.from_arrays([
pa.array(range(0, 1024)),
pa.array(range(1, 1025)),
pa.array(range(2, 1026)),
], names=["a", "b", "c"])
expected = pa.Table.from_arrays([
pa.array(range(3, 1024 * 3 + 3, 3)),
], names=["sum"])
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"transform")
writer, reader = client.do_exchange(descriptor)
with writer:
writer.begin(data.schema)
writer.write_table(data)
writer.done_writing()
table = reader.read_all()
assert expected == table
def test_middleware_multi_header():
"""Test sending/receiving multiple (binary-valued) headers."""
with MultiHeaderFlightServer(middleware={
"test": MultiHeaderServerMiddlewareFactory(),
}) as server:
headers = MultiHeaderClientMiddlewareFactory()
client = FlightClient(('localhost', server.port), middleware=[headers])
response = next(client.do_action(flight.Action(b"", b"")))
# The server echoes the headers it got back to us.
raw_headers = response.body.to_pybytes().decode("utf-8")
client_headers = ast.literal_eval(raw_headers)
# Don't directly compare; gRPC may add headers like User-Agent.
for header, values in MultiHeaderClientMiddleware.EXPECTED.items():
assert client_headers.get(header) == values
assert headers.last_headers.get(header) == values
@pytest.mark.requires_testing_data
def test_generic_options():
"""Test setting generic client options."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
# Try setting a string argument that will make requests fail
options = [("grpc.ssl_target_name_override", "fakehostname")]
client = flight.connect(('localhost', s.port),
tls_root_certs=certs["root_cert"],
generic_options=options)
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints'))
# Try setting an int argument that will make requests fail
options = [("grpc.max_receive_message_length", 32)]
client = flight.connect(('localhost', s.port),
tls_root_certs=certs["root_cert"],
generic_options=options)
with pytest.raises(pa.ArrowInvalid):
client.do_get(flight.Ticket(b'ints'))
class CancelFlightServer(FlightServerBase):
"""A server for testing StopToken."""
def do_get(self, context, ticket):
schema = pa.schema([])
rb = pa.RecordBatch.from_arrays([], schema=schema)
return flight.GeneratorStream(schema, itertools.repeat(rb))
def do_exchange(self, context, descriptor, reader, writer):
schema = pa.schema([])
rb = pa.RecordBatch.from_arrays([], schema=schema)
writer.begin(schema)
while not context.is_cancelled():
writer.write_batch(rb)
time.sleep(0.5)
def test_interrupt():
if threading.current_thread().ident != threading.main_thread().ident:
pytest.skip("test only works from main Python thread")
# Skips test if not available
raise_signal = util.get_raise_signal()
def signal_from_thread():
time.sleep(0.5)
raise_signal(signal.SIGINT)
exc_types = (KeyboardInterrupt, pa.ArrowCancelled)
def test(read_all):
try:
try:
t = threading.Thread(target=signal_from_thread)
with pytest.raises(exc_types) as exc_info:
t.start()
read_all()
finally:
t.join()
except KeyboardInterrupt:
# In case KeyboardInterrupt didn't interrupt read_all
# above, at least prevent it from stopping the test suite
pytest.fail("KeyboardInterrupt didn't interrupt Flight read_all")
e = exc_info.value.__context__
assert isinstance(e, pa.ArrowCancelled) or \
isinstance(e, KeyboardInterrupt)
with CancelFlightServer() as server:
client = FlightClient(("localhost", server.port))
reader = client.do_get(flight.Ticket(b""))
test(reader.read_all)
descriptor = flight.FlightDescriptor.for_command(b"echo")
writer, reader = client.do_exchange(descriptor)
test(reader.read_all)
def test_never_sends_data():
# Regression test for ARROW-12779
match = "application server implementation error"
with NeverSendsDataFlightServer() as server:
client = flight.connect(('localhost', server.port))
with pytest.raises(flight.FlightServerError, match=match):
client.do_get(flight.Ticket(b'')).read_all()
# Check that the server handler will ignore empty tables
# up to a certain extent
table = client.do_get(flight.Ticket(b'yield_data')).read_all()
assert table.num_rows == 5
|
apache-2.0
|
sgenoud/scikit-learn
|
sklearn/ensemble/forest.py
|
1
|
40721
|
"""Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremly randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe, Brian Holt
# License: BSD 3
import itertools
import numpy as np
from abc import ABCMeta, abstractmethod
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..feature_selection.selector_mixin import SelectorMixin
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor, \
ExtraTreeClassifier, ExtraTreeRegressor
from ..utils import check_random_state
from ..metrics import r2_score
from .base import BaseEnsemble
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor"]
MAX_INT = np.iinfo(np.int32).max
def _parallel_build_trees(n_trees, forest, X, y,
sample_mask, X_argsorted, seed, verbose):
"""Private function used to build a batch of trees within a job."""
random_state = check_random_state(seed)
trees = []
for i in xrange(n_trees):
if verbose > 1:
print("building tree %d of %d" % (i + 1, n_trees))
seed = random_state.randint(MAX_INT)
tree = forest._make_estimator(append=False)
tree.set_params(compute_importances=forest.compute_importances)
tree.set_params(random_state=check_random_state(seed))
if forest.bootstrap:
n_samples = X.shape[0]
indices = random_state.randint(0, n_samples, n_samples)
tree.fit(X[indices], y[indices],
sample_mask=sample_mask, X_argsorted=X_argsorted)
tree.indices_ = indices
else:
tree.fit(X, y,
sample_mask=sample_mask, X_argsorted=X_argsorted)
trees.append(tree)
return trees
def _parallel_predict_proba(trees, X, n_classes, n_outputs):
"""Private function used to compute a batch of predictions within a job."""
n_samples = X.shape[0]
p = []
for k in xrange(n_outputs):
p.append(np.zeros((n_samples, n_classes[k])))
for tree in trees:
p_tree = tree.predict_proba(X)
if n_outputs == 1:
p_tree = [p_tree]
for k in xrange(n_outputs):
if n_classes[k] == tree.n_classes_[k]:
p[k] += p_tree[k]
else:
for j, c in enumerate(tree.classes_[k]):
p[k][:, c] += p_tree[k][:, j]
return p
def _parallel_predict_regression(trees, X):
"""Private function used to compute a batch of predictions within a job."""
return sum(tree.predict(X) for tree in trees)
def _partition_trees(forest):
"""Private function used to partition trees between jobs."""
# Compute the number of jobs
if forest.n_jobs == -1:
n_jobs = min(cpu_count(), forest.n_estimators)
else:
n_jobs = min(forest.n_jobs, forest.n_estimators)
# Partition trees between jobs
n_trees = [int(forest.n_estimators / n_jobs)] * n_jobs
for i in xrange(forest.n_estimators % n_jobs):
n_trees[i] += 1
starts = [0] * (n_jobs + 1)
for i in xrange(1, n_jobs + 1):
starts[i] = starts[i - 1] + n_trees[i - 1]
return n_jobs, n_trees, starts
def _parallel_X_argsort(X):
"""Private function used to sort the features of X."""
return np.asarray(np.argsort(X.T, axis=1).T, dtype=np.int32, order="F")
def _partition_features(forest, n_total_features):
"""Private function used to partition features between jobs."""
# Compute the number of jobs
if forest.n_jobs == -1:
n_jobs = min(cpu_count(), n_total_features)
else:
n_jobs = min(forest.n_jobs, n_total_features)
# Partition features between jobs
n_features = [n_total_features / n_jobs] * n_jobs
for i in xrange(n_total_features % n_jobs):
n_features[i] += 1
starts = [0] * (n_jobs + 1)
for i in xrange(1, n_jobs + 1):
starts[i] = starts[i - 1] + n_features[i - 1]
return n_jobs, n_features, starts
class BaseForest(BaseEnsemble, SelectorMixin):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, base_estimator,
n_estimators=10,
estimator_params=[],
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.compute_importances = compute_importances
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = check_random_state(random_state)
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.feature_importances_ = None
self.verbose = verbose
def fit(self, X, y):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (integers that correspond to classes in
classification, real numbers in regression).
Returns
-------
self : object
Returns self.
"""
# Precompute some data
X = np.atleast_2d(X)
n_samples, self.n_features_ = X.shape
if self.bootstrap:
sample_mask = None
X_argsorted = None
else:
if self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
sample_mask = np.ones((n_samples,), dtype=np.bool)
n_jobs, _, starts = _partition_features(self, self.n_features_)
all_X_argsorted = Parallel(n_jobs=n_jobs)(
delayed(_parallel_X_argsort)(
X[:, starts[i]:starts[i + 1]])
for i in xrange(n_jobs))
X_argsorted = np.asfortranarray(np.hstack(all_X_argsorted))
y = np.copy(y)
y = np.atleast_1d(y)
if y.ndim == 1:
y = y[:, np.newaxis]
self.classes_ = []
self.n_classes_ = []
self.n_outputs_ = y.shape[1]
if isinstance(self.base_estimator, ClassifierMixin):
for k in xrange(self.n_outputs_):
unique = np.unique(y[:, k])
self.classes_.append(unique)
self.n_classes_.append(unique.shape[0])
y[:, k] = np.searchsorted(unique, y[:, k])
# Assign chunk of trees to jobs
n_jobs, n_trees, _ = _partition_trees(self)
# Parallel loop
all_trees = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_build_trees)(
n_trees[i],
self,
X,
y,
sample_mask,
X_argsorted,
self.random_state.randint(MAX_INT),
verbose=self.verbose)
for i in xrange(n_jobs))
# Reduce
self.estimators_ = [tree for tree in itertools.chain(*all_trees)]
# Calculate out of bag predictions and score
if self.oob_score:
if isinstance(self, ClassifierMixin):
self.oob_decision_function_ = []
self.oob_score_ = 0.0
predictions = []
for k in xrange(self.n_outputs_):
predictions.append(np.zeros((n_samples,
self.n_classes_[k])))
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
p_estimator = estimator.predict_proba(X[mask, :])
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in xrange(self.n_outputs_):
predictions[k][mask, :] += p_estimator[k]
for k in xrange(self.n_outputs_):
decision = predictions[k] \
/ predictions[k].sum(axis=1)[:, np.newaxis]
self.oob_decision_function_.append(decision)
self.oob_score_ += np.mean(y[:, k] \
== np.argmax(predictions[k], axis=1))
if self.n_outputs_ == 1:
self.oob_decision_function_ = \
self.oob_decision_function_[0]
self.oob_score_ /= self.n_outputs_
else:
# Regression:
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
p_estimator = estimator.predict(X[mask, :])
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[mask, :] += p_estimator
n_predictions[mask, :] += 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in xrange(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k], predictions[:, k])
self.oob_score_ /= self.n_outputs_
# Sum the importances
if self.compute_importances:
self.feature_importances_ = \
sum(tree.feature_importances_ for tree in self.estimators_) \
/ self.n_estimators
return self
class ForestClassifier(BaseForest, ClassifierMixin):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, base_estimator,
n_estimators=10,
estimator_params=[],
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is computed as the majority
prediction of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
n_samples = len(X)
P = self.predict_proba(X)
if self.n_outputs_ == 1:
P = [P]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(P[k], axis=1),
axis=0)
if self.n_outputs_ == 1:
predictions = predictions.reshape((n_samples, ))
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
# Check data
X = np.atleast_2d(X)
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_trees(self)
# Parallel loop
all_p = Parallel(n_jobs=n_jobs)(
delayed(_parallel_predict_proba)(
self.estimators_[starts[i]:starts[i + 1]],
X,
self.n_classes_,
self.n_outputs_)
for i in xrange(n_jobs))
# Reduce
p = all_p[0]
for j in xrange(1, self.n_jobs):
for k in xrange(self.n_outputs_):
p[k] += all_p[j][k]
for k in xrange(self.n_outputs_):
p[k] /= self.n_estimators
if self.n_outputs_ == 1:
return p[0]
else:
return p
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the mean predicted class log-probabilities of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(BaseForest, RegressorMixin):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, base_estimator,
n_estimators=10,
estimator_params=[],
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = np.atleast_2d(X)
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_trees(self)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs)(
delayed(_parallel_predict_regression)(
self.estimators_[starts[i]:starts[i + 1]], X)
for i in xrange(n_jobs))
# Reduce
y_hat = sum(all_y_hat) / self.n_estimators
return y_hat
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of classifical
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
Note: this parameter is tree-specific.
max_features : int, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If "auto", then `max_features=sqrt(n_features)` on
classification tasks and `max_features=n_features` on regression
problems.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
compute_importances : boolean, optional (default=True)
Whether feature importances are computed and stored into the
``feature_importances_`` attribute when calling fit.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel. If -1, then the number of jobs
is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controlls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
`feature_importances_` : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_decision_function_` : array, shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self, n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
bootstrap=True,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density", "max_features",
"random_state"),
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifical
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
Note: this parameter is tree-specific.
max_features : int, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If "auto", then `max_features=sqrt(n_features)` on
classification tasks and `max_features=n_features`
on regression problems.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
compute_importances : boolean, optional (default=True)
Whether feature importances are computed and stored into the
``feature_importances_`` attribute when calling fit.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel. If -1, then the number of jobs
is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controlls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
`feature_importances_` : array of shape = [n_features]
The feature mportances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_prediction_` : array, shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self, n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
bootstrap=True,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density", "max_features",
"random_state"),
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
Note: this parameter is tree-specific.
max_features : int, string or None, optional (default="auto")
The number of features to consider when looking for the best split.
- If "auto", then `max_features=sqrt(n_features)` on
classification tasks and `max_features=n_features`
on regression problems.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
compute_importances : boolean, optional (default=True)
Whether feature importances are computed and stored into the
``feature_importances_`` attribute when calling fit.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel. If -1, then the number of jobs
is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controlls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
`feature_importances_` : array of shape = [n_features]
The feature mportances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_decision_function_` : array, shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self, n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density", "max_features",
"random_state"),
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
Note: this parameter is tree-specific.
max_features : int, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If "auto", then `max_features=sqrt(n_features)` on
classification tasks and `max_features=n_features`
on regression problems.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
compute_importances : boolean, optional (default=True)
Whether feature importances are computed and stored into the
``feature_importances_`` attribute when calling fit.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel. If -1, then the number of jobs
is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controlls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
`feature_importances_` : array of shape = [n_features]
The feature mportances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_prediction_` : array, shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self, n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density", "max_features",
"random_state"),
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
|
bsd-3-clause
|
ntvis/tushare
|
tushare/datayes/options.py
|
17
|
1613
|
# -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Options():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def Opt(self, contractStatus='', optID='', secID='', ticker='', varSecID='', varticker='', field=''):
"""
获取期权合约编码,交易代码,交易市场,标的等相关信息
"""
code, result = self.client.getData(vs.OPT%(contractStatus, optID, secID, ticker,
varSecID, varticker, field))
return _ret_data(code, result)
def OptVar(self, exchangeCD='', secID='', ticker='', contractType='', exerType='', field=''):
"""
获取期权品种名称、生效日期、履约方式、交割方式、申报单位等相关信息。
"""
code, result = self.client.getData(vs.OPTVAR%(exchangeCD, secID, ticker,
contractType, exerType, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
|
bsd-3-clause
|
sannecottaar/burnman
|
examples/example_gibbs_modifiers.py
|
4
|
7846
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_gibbs_modifiers
----------------
This example script demonstrates the modifications to
the gibbs free energy (and derivatives) that can be applied
as masks over the results from the equations of state.
These modifications currently take the forms:
- Landau corrections (implementations of Putnis (1992)
and Holland and Powell (2011)
- Bragg-Williams corrections
(implementation of Holland and Powell (1996))
- Linear (a simple delta_E + delta_V*P - delta_S*T
- Magnetic (Chin, Hertzman and Sundman (1987))
*Uses:*
* :doc:`mineral_database`
*Demonstrates:*
* creating a mineral with excess contributions
* calculating thermodynamic properties
"""
from __future__ import absolute_import
# Here we import standard python modules that are required for
# usage of BurnMan. In particular, numpy is used for handling
# numerical arrays and mathematical operations on them, and
# matplotlib is used for generating plots of results of calculations
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
# hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1, os.path.abspath('..'))
# Here we import the relevant modules from BurnMan. The burnman
# module imports several of the most important functionalities of
# the library, including the ability to make composites, and compute
# thermoelastic properties of them. The minerals module includes
# the mineral physical parameters for the predefined minerals in
# BurnMan
import burnman
from burnman import minerals
if __name__ == "__main__":
# Here we show the interesting features of Landau transitions
# First, lets look at the P wave velocity in stishovite as it transforms
# to the CaCl2 structure at high pressure
stv = minerals.SLB_2011.stishovite()
T = 1500.
pressures = np.linspace(60.e9, 80.e9, 101)
v_ps = np.empty_like(pressures)
for i, P in enumerate(pressures):
stv.set_state(P, T)
v_ps[i] = stv.v_p
plt.plot(pressures / 1.e9, v_ps / 1.e3, label='stishovite')
plt.xlabel('P (GPa)')
plt.ylabel('V_p (km/s)')
plt.legend(loc="lower right")
plt.show()
# Landau transitions also cause spikes in heat capacity
# Here we show an example of troilite, as implemented by
# Evans et al. (2010) and incorporated into the dataset
# of Holland and Powell (2011)
# Here we show you how to create a mineral with a
# Landau transition.
# A special feature of burnman is that you can have
# more than one Landau (or any other type of)
# contribution.
# Here's a copy of lot (low-temperature troilite) from
# Holland and Powell (2011), with the Landau transition
# of tro also included.
from burnman.processchemistry import dictionarize_formula, formula_mass
class lot (burnman.Mineral):
def __init__(self):
formula = 'Fe1.0S1.0'
formula = dictionarize_formula(formula)
self.params = {
'name': 'lot',
'formula': formula,
'equation_of_state': 'hp_tmt',
'H_0': -102160.0,
'S_0': 60.0,
'V_0': 1.818e-05,
'Cp': [50.2, 0.011052, -940000.0, 0.0],
'a_0': 4.93e-05,
'K_0': 65800000000.0,
'Kprime_0': 4.17,
'Kdprime_0': -6.3e-11,
'n': sum(formula.values()),
'molar_mass': formula_mass(formula)}
self.property_modifiers = [
['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 420.0,
'S_D': 10.0,
'V_D': 0.0}],
['landau_hp', {'P_0': 100000.0,
'T_0': 298.15,
'Tc_0': 598.0,
'S_D': 12.0,
'V_D': 4.1e-7}]]
burnman.Mineral.__init__(self)
troilite = lot()
lot = minerals.HP_2011_ds62.lot()
tro = minerals.HP_2011_ds62.tro()
P = 1.e5
temperatures = np.linspace(300., 1300., 101)
C_ps_troilite = np.empty_like(temperatures)
C_ps_lot = np.empty_like(temperatures)
C_ps_tro = np.empty_like(temperatures)
for i, T in enumerate(temperatures):
troilite.set_state(P, T)
C_ps_troilite[i] = troilite.C_p
lot.set_state(P, T)
C_ps_lot[i] = lot.C_p
tro.set_state(P, T)
C_ps_tro[i] = tro.C_p
plt.plot(temperatures, C_ps_lot, 'r--', label='low temperature (HP2011)')
plt.plot(temperatures, C_ps_tro, 'g--', label='high temperature (HP2011)')
plt.plot(temperatures, C_ps_troilite, 'b-', label='troilite')
plt.xlabel('T (K)')
plt.ylabel('C_p (J/K/mol)')
plt.legend(loc="lower right")
plt.show()
# Spinel is a mineral with a Bragg-Williams type model
sp = minerals.HP_2011_ds62.sp()
P = 1.e5
temperatures = np.linspace(300., 1300., 101)
C_ps = np.empty_like(temperatures)
for i, T in enumerate(temperatures):
sp.set_state(P, T)
C_ps[i] = sp.C_p
# print sp._property_modifiers
plt.plot(temperatures, C_ps, label='spinel')
plt.xlabel('T (K)')
plt.ylabel('C_p (J/K/mol)')
plt.legend(loc="lower right")
plt.show()
# Wuestite has a Landau-type transition at low temperature,
# but we could also choose to simplify things by just having an excess entropy
# to estimate the thermal properties at high temperature
# Here we ignore the 0 Pa, 0 K gibbs and volume contributions, as the endmember
# properties would need refitting too...
class wuestite (burnman.Mineral):
def __init__(self):
formula = 'FeO'
formula = dictionarize_formula(formula)
self.params = {
'name': 'Wuestite',
'formula': formula,
'equation_of_state': 'slb3',
'F_0': -242000.0,
'V_0': 1.226e-05,
'K_0': 1.79e+11,
'Kprime_0': 4.9,
'Debye_0': 454.0,
'grueneisen_0': 1.53,
'q_0': 1.7,
'G_0': 59000000000.0,
'Gprime_0': 1.4,
'eta_s_0': -0.1,
'n': sum(formula.values()),
'molar_mass': formula_mass(formula)}
self.property_modifiers = [
['linear', {'delta_E': 0., 'delta_S': 12., 'delta_V': 0.}]]
self.uncertainties = {
'err_F_0': 1000.0,
'err_V_0': 0.0,
'err_K_0': 1000000000.0,
'err_K_prime_0': 0.2,
'err_Debye_0': 21.0,
'err_grueneisen_0': 0.13,
'err_q_0': 1.0,
'err_G_0': 1000000000.0,
'err_Gprime_0': 0.1,
'err_eta_s_0': 1.0}
burnman.Mineral.__init__(self)
wus = wuestite()
wus_HP = burnman.minerals.HP_2011_ds62.fper()
P = 1.e5
temperatures = np.linspace(300., 1300., 101)
Ss = np.empty_like(temperatures)
Ss_HP = np.empty_like(temperatures)
for i, T in enumerate(temperatures):
wus.set_state(P, T)
Ss[i] = wus.S
wus_HP.set_state(P, T)
Ss_HP[i] = wus_HP.S
plt.plot(temperatures, Ss, label='linear')
plt.plot(temperatures, Ss_HP, label='HP_2011_ds62')
plt.xlabel('T (K)')
plt.ylabel('S (J/K/mol)')
plt.legend(loc="lower right")
plt.show()
|
gpl-2.0
|
yl565/statsmodels
|
examples/python/tsa_dates.py
|
29
|
1169
|
## Dates in timeseries models
from __future__ import print_function
import statsmodels.api as sm
import pandas as pd
# ## Getting started
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
# ## Using Pandas
#
# Make a pandas TimeSeries or DataFrame
endog = pd.TimeSeries(data.endog, index=dates)
# Instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print(pred)
# ## Using explicit dates
ar_model = sm.tsa.AR(data.endog, dates=dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print(pred)
# This just returns a regular array, but since the model has date information attached, you can get the prediction dates in a roundabout way.
print(ar_res.data.predict_dates)
# Note: This attribute only exists if predict has been called. It holds the dates associated with the last call to predict.
|
bsd-3-clause
|
joernleu/Pythorient
|
src/point_densities.py
|
1
|
2755
|
# gaussian filter
import numpy as np
import scipy.ndimage as ndi
import matplotlib.pyplot as plt
import time
def grid_density_gaussian_filter(x0, y0, x1, y1, w, h, data):
kx = (w - 1) / (x1 - x0)
ky = (h - 1) / (y1 - y0)
r = 20
border = r
imgw = (w + 2 * border)
imgh = (h + 2 * border)
img = np.zeros((imgh,imgw))
for x, y in data:
ix = int((x - x0) * kx) + border
iy = int((y - y0) * ky) + border
if 0 <= ix < imgw and 0 <= iy < imgh:
img[iy][ix] += 1
return ndi.gaussian_filter(img, (r,r)) ## gaussian convolution
def boxsum(img, w, h, r):
st = [0] * (w+1) * (h+1)
for x in xrange(w):
st[x+1] = st[x] + img[x]
for y in xrange(h):
st[(y+1)*(w+1)] = st[y*(w+1)] + img[y*w]
for x in xrange(w):
st[(y+1)*(w+1)+(x+1)] = st[(y+1)*(w+1)+x] + st[y*(w+1)+(x+1)] - st[y*(w+1)+x] + img[y*w+x]
for y in xrange(h):
y0 = max(0, y - r)
y1 = min(h, y + r + 1)
for x in xrange(w):
x0 = max(0, x - r)
x1 = min(w, x + r + 1)
img[y*w+x] = st[y0*(w+1)+x0] + st[y1*(w+1)+x1] - st[y1*(w+1)+x0] - st[y0*(w+1)+x1]
def grid_density_boxsum(x0, y0, x1, y1, w, h, data):
kx = (w - 1) / (x1 - x0)
ky = (h - 1) / (y1 - y0)
r = 15
border = r * 2
imgw = (w + 2 * border)
imgh = (h + 2 * border)
img = [0] * (imgw * imgh)
for x, y in data:
ix = int((x - x0) * kx) + border
iy = int((y - y0) * ky) + border
if 0 <= ix < imgw and 0 <= iy < imgh:
img[iy * imgw + ix] += 1
for p in xrange(4):
boxsum(img, imgw, imgh, r)
a = np.array(img).reshape(imgh,imgw)
b = a[border:(border+h),border:(border+w)]
return b
if __name__ == '__main__':
n = 1000
# data points range
data_ymin = -2.
data_ymax = 2.
data_xmin = -2.
data_xmax = 2.
# view area range
view_ymin = -.5
view_ymax = .5
view_xmin = -.5
view_xmax = .5
# generate data
xl = np.random.uniform(data_xmin, data_xmax, n)
yl = np.random.uniform(data_ymin, data_ymax, n)
zl = np.random.uniform(0, 1, n)
# get visible data points
xlvis = []
ylvis = []
for i in range(0,len(xl)):
if view_xmin < xl[i] < view_xmax and view_ymin < yl[i] < view_ymax:
xlvis.append(xl[i])
ylvis.append(yl[i])
fig = plt.figure()
t0 = time.clock()
zd = grid_density_gaussian_filter(view_xmin, view_ymin, view_xmax, view_ymax, 256, 256, zip(xl, yl))
#plt.title('ndi.gaussian_filter - '+str(time.clock()-t0)+"sec")
plt.imshow(zd , origin='lower', extent=[view_xmin, view_xmax, view_ymin, view_ymax])
plt.scatter(xlvis, ylvis)
plt.show()
|
mit
|
Socrats/EvoSim
|
evosim/gui.py
|
1
|
1123
|
# ==============================================================================
# EvoSim
# Copyright © 2016 Elias F. Domingos. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import matplotlib.pyplot as plt
import numpy as np
plt.ion() # Note this correction
fig = plt.figure()
plt.axis([0, 1000, 0, 1])
i = 0
x = list()
y = list()
while i < 1000:
temp_y = np.random.random()
x.append(i)
y.append(temp_y)
plt.scatter(i, temp_y)
i += 1
plt.show()
plt.pause(0.0001) # Note this correction
|
apache-2.0
|
dialounke/pylayers
|
pylayers/antprop/coeffModel.py
|
2
|
7181
|
"""
.. currentmodule:: pylayers.antprop.coeffModel
.. autosummary::
:members:
"""
from __future__ import print_function
import doctest
import os
import glob
import doctest
import pdb
import numpy as np
import scipy as sp
import scipy.special as special
import matplotlib.pylab as plt
from numpy import zeros
def relative_error(Eth_original, Eph_original,Eth_model, Eph_model,theta, phi, dsf=1,kf=-1):
""" calculate relative error between original and model
Parameters
----------
Eth_original : np.array
Eph_original : np.array
Eth_model : np.array
Eph_model : np.array
theta : np.array
phi : np.phi
dsf : int
down sampling factor
kf : int
"""
st = np.sin(theta).reshape((len(theta), 1))
#
# Construct difference between reference and reconstructed
#
if kf!=-1:
dTh = (Eth_model[kf, :, :] - Eth_original[kf, ::dsf, ::dsf])
dPh = (Eph_model[kf, :, :] - Eph_original[kf, ::dsf, ::dsf])
#
# squaring + Jacobian
#
dTh2 = np.real(dTh * np.conj(dTh)) * st
dPh2 = np.real(dPh * np.conj(dPh)) * st
vTh2 = np.real(Eth_original[kf, ::dsf, ::dsf] \
* np.conj(Eth_original[kf, ::dsf, ::dsf])) * st
vPh2 = np.real(Eph_original[kf, ::dsf, ::dsf] \
* np.conj(Eph_original[kf, ::dsf, ::dsf])) * st
mvTh2 = np.sum(vTh2)
mvPh2 = np.sum(vPh2)
errTh = np.sum(dTh2)
errPh = np.sum(dPh2)
else:
dTh = (Eth_model[:, :, :] - Eth_original[:, ::dsf, ::dsf])
dPh = (Eph_model[:, :, :] - Eph_original[:, ::dsf, ::dsf])
#
# squaring + Jacobian
#
dTh2 = np.real(dTh * np.conj(dTh)) * st
dPh2 = np.real(dPh * np.conj(dPh)) * st
vTh2 = np.real(Eth_original[:, ::dsf, ::dsf] \
* np.conj(Eth_original[:, ::dsf, ::dsf])) * st
vPh2 = np.real(Eph_original[:, ::dsf, ::dsf] \
* np.conj(Eph_original[:, ::dsf, ::dsf])) * st
mvTh2 = np.sum(vTh2)
mvPh2 = np.sum(vPh2)
errTh = np.sum(dTh2)
errPh = np.sum(dPh2)
errelTh = (errTh / mvTh2)
errelPh = (errPh / mvPh2)
errel =( (errTh + errPh) / (mvTh2 + mvPh2))
return(errelTh, errelPh, errel)
def RepAzimuth1 (Ec, theta, phi, th= np.pi/2,typ = 'Gain'):
""" response in azimuth
Parameters
----------
Ec
theta :
phi :
th :
typ : string
'Gain'
"""
pos_th = np.where(theta == th)[0][0]
start = pos_th*len(phi)
stop = start + len(phi)
if typ=='Gain':
V = np.sqrt(np.real(Ec[0,:,start:stop]*
np.conj(Ec[0,:,start:stop]) +
Ec[1,:,start:stop]*np.conj(Ec[1,:,start:stop]) +
Ec[2,:,start:stop]*np.conj(Ec[2,:,start:stop])))
if typ=='Ex':
V = np.abs(Ec[0,:,start:stop])
if typ=='Ey':
V = np.abs(Ec[1,:,start:stop])
if typ=='Ez':
V = np.abs(Ec[2,:,start:stop])
VdB = 20*np.log10(V)
VdBmin = -40
VdB = VdB - VdBmin
V = VdB
#plt.polar(phi,V)
#plt.title('theta = '+str(th))
return V
def mode_energy(C,M,L =20, ifreq = 46):
""" calculates mode energy
Parameters
----------
C :
M :
L : int
ifreq : int
shape C = (dim = 3,Ncoef = (1+L)**2)
"""
Em = []
Lc = (1+L)**2
for m in range(M+1):
im = m*(2*L+3-m)/2
bind = (1+L)*(L+2)/2 + im-L-1
if ifreq > 0:
if m == 0:
em = np.sum(np.abs(C[:,ifreq,im:im+L-m+1])**2)
else:
em = np.sum(np.abs(C[:,ifreq,im:im+L-m+1])**2) + np.sum(np.abs(C[:,ifreq,bind:bind + L-m+1])**2)
Et = np.sum(np.abs(C[:,ifreq,:])**2)
Em.append(em)
return np.array(Em)/Et
def mode_energy2(A,m, ifreq=46, L= 20):
""" calculates mode energy (version 2)
Parameters
----------
A :
m :
ifreq
L :
"""
cx = lmreshape(A.S.Cx.s2)
cy = lmreshape(A.S.Cy.s2)
cz = lmreshape(A.S.Cz.s2)
if ifreq >0:
em = np.sum(np.abs(cx[ifreq,:,L+m])**2+np.abs(cy[ifreq,:,L+m])**2+np.abs(cz[ifreq,:,L+m])**2)
Et = np.sum(np.abs(cx[ifreq])**2+np.abs(cy[ifreq])**2+np.abs(cz[ifreq])**2)
return em/Et
def level_energy(A,l, ifreq = 46,L=20):
""" calculates energy of the level l
Parameters
----------
A : Antenna
l : int
level
ifreq
L
"""
cx = lmreshape(A.S.Cx.s2)
cy = lmreshape(A.S.Cy.s2)
cz = lmreshape(A.S.Cz.s2)
if ifreq >0:
el = np.sum(np.abs(cx[ifreq,l,:])**2+np.abs(cy[ifreq,l,:])**2+np.abs(cz[ifreq,l,:])**2)
Et = np.sum(np.abs(cx[ifreq])**2+np.abs(cy[ifreq])**2+np.abs(cz[ifreq])**2)
return el/Et
def modeMax(coeff,L= 20, ifreq = 46):
""" calculates maximal mode
Parameters
----------
coeff :
L : int
maximum level
ifreq : int
"""
Em_dB = 20*np.log10(mode_energy(C = coeff,M = L))
max_mode = np.where(Em_dB <-20 )[0][0]-1
return max_mode
def lmreshape(coeff,L= 20):
""" level and mode reshaping
Parameters
----------
coeff
L : int
maximum level
"""
sh = coeff.shape
coeff_lm = zeros(shape = (sh[0],1+L, 1+2*L), dtype = complex )
for m in range(0,1+L):
im = m*(2*L+3-m)/2
coeff_lm[:,m:L+1,L+m] = coeff[:,im:im +L+1-m]
for m in range(1,L):
im = m*(2*L+3-m)/2
bind = (1+L)*(L+2)/2 + im-L-1
coeff_lm[:,m:L+1,L-m]= coeff[:,bind: bind + L-m+1]
return coeff_lm
def sshModel(c,d, L = 20):
""" calculates sshModel
Parameters
----------
c : ssh coeff
free space antenna coeff
d : float
distance (meters)
L : int
Returns
-------
cm : ssh coeff
perturbed antenna coeff
"""
Lc = (1+L)**2
sh = np.shape(c)
cm = np.zeros(shape = sh , dtype = complex)
m0 = modeMax(c, L= 20, ifreq = 46)
im0 = m0*(2*L+3-m0)/2
M = m0 + int(0.06*d) + 4
a0 = 0.002*d+0.55
am = -0.002*d + 1.55
alpha = -0.006*d+1.22
for m in range(0,m0):
im = m*(2*L+3-m)/2
if m == 0:
dephm = 0
cm[:,:,im: im+L+1-m] = a0*c[:,:,im: im+L+1-m]
else:
dephm = (m-m0)*alpha
cm[:,:,im: im+L+1-m] = a0*c[:,:,im: im+L+1-m]*np.exp(1j*dephm)
bind = (1+L)*(L+2)/2 + im-L-1
cm[:,:,bind: bind + L-m+1] = ((-1)**m)*cm[:,:,im: im+L+1-m]
for m in range(m0,M+1):
dephm = (m-m0)*alpha
if m == m0:
im = m*(2*L+3-m)/2
cm[:,:,im: im+L+1-m] = (am/(m-m0+1))*c[:,:,im0 : im0+L-m+1]*np.exp(1j*dephm)
bind = (1+L)*(L+2)/2 + im -L-1
cm[:,:,bind: bind + L-m+1] = ((-1)**m)*(cm[:,:,im: im+L+1-m])
else:
im = m*(2*L+3-m)/2
cm[:,:,im: im+L+1-m] = (am/(m-m0+1))*c[:,:,im0 : im0+L-m+1]*np.exp(1j*dephm)
bind = (1+L)*(L+2)/2 + im -L-1
cm[:,:,bind: bind + L-m+1] = ((-1)**m)*(cm[:,:,im: im+L+1-m])
cm[0:2] = c[0:2]
return cm
if (__name__=="__main__"):
doctest.testmod()
|
mit
|
Hiyorimi/scikit-image
|
doc/examples/features_detection/plot_multiblock_local_binary_pattern.py
|
9
|
2603
|
"""
===========================================================
Multi-Block Local Binary Pattern for texture classification
===========================================================
This example shows how to compute multi-block local binary pattern (MB-LBP)
features as well as how to visualize them.
The features are calculated similarly to local binary patterns (LBPs), except
that summed blocks are used instead of individual pixel values.
MB-LBP is an extension of LBP that can be computed on multiple scales in
constant time using the integral image. 9 equally-sized rectangles are used to
compute a feature. For each rectangle, the sum of the pixel intensities is
computed. Comparisons of these sums to that of the central rectangle determine
the feature, similarly to LBP (See `LBP <plot_local_binary_pattern.html>`_).
First, we generate an image to illustrate the functioning of MB-LBP: consider
a (9, 9) rectangle and divide it into (3, 3) block, upon which we then apply
MB-LBP.
"""
from __future__ import print_function
from skimage.feature import multiblock_lbp
import numpy as np
from numpy.testing import assert_equal
from skimage.transform import integral_image
# Create test matrix where first and fifth rectangles starting
# from top left clockwise have greater value than the central one.
test_img = np.zeros((9, 9), dtype='uint8')
test_img[3:6, 3:6] = 1
test_img[:3, :3] = 50
test_img[6:, 6:] = 50
# First and fifth bits should be filled. This correct value will
# be compared to the computed one.
correct_answer = 0b10001000
int_img = integral_image(test_img)
lbp_code = multiblock_lbp(int_img, 0, 0, 3, 3)
assert_equal(correct_answer, lbp_code)
######################################################################
# Now let's apply the operator to a real image and see how the visualization
# works.
from skimage import data
from matplotlib import pyplot as plt
from skimage.feature import draw_multiblock_lbp
test_img = data.coins()
int_img = integral_image(test_img)
lbp_code = multiblock_lbp(int_img, 0, 0, 90, 90)
img = draw_multiblock_lbp(test_img, 0, 0, 90, 90,
lbp_code=lbp_code, alpha=0.5)
plt.imshow(img, interpolation='nearest')
plt.show()
######################################################################
# On the above plot we see the result of computing a MB-LBP and visualization
# of the computed feature. The rectangles that have less intensities' sum
# than the central rectangle are marked in cyan. The ones that have higher
# intensity values are marked in white. The central rectangle is left
# untouched.
|
bsd-3-clause
|
balazs-bamer/FreeCAD-Surface
|
src/Mod/Plot/Plot.py
|
1
|
12232
|
#***************************************************************************
#* *
#* Copyright (c) 2011, 2012 *
#* Jose Luis Cercos Pita <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD
import PySide
from PySide import QtCore, QtGui
try:
import matplotlib
matplotlib.use('Qt4Agg')
matplotlib.rcParams['backend.qt4']='PySide'
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
except ImportError:
msg = PySide.QtGui.QApplication.translate(
"plot_console",
"matplotlib not found, so Plot module can not be loaded",
None,
PySide.QtGui.QApplication.UnicodeUTF8)
FreeCAD.Console.PrintMessage(msg + '\n')
raise ImportError("matplotlib not installed")
def getMainWindow():
""" Return the FreeCAD main window. """
toplevel = PySide.QtGui.qApp.topLevelWidgets()
for i in toplevel:
if i.metaObject().className() == "Gui::MainWindow":
return i
return None
def getMdiArea():
""" Return FreeCAD MdiArea. """
mw = getMainWindow()
if not mw:
return None
childs = mw.children()
for c in childs:
if isinstance(c, PySide.QtGui.QMdiArea):
return c
return None
def getPlot():
""" Return the selected Plot document if exist. """
# Get active tab
mdi = getMdiArea()
if not mdi:
return None
sub = mdi.activeSubWindow()
if not sub:
return None
# Explore childrens looking for Plot class
for i in sub.children():
if i.metaObject().className() == "Plot":
return i
return None
def closePlot():
""" closePlot(): Close the active plot window. """
# Get active tab
mdi = getMdiArea()
if not mdi:
return None
sub = mdi.activeSubWindow()
if not sub:
return None
# Explore childrens looking for Plot class
for i in sub.children():
if i.metaObject().className() == "Plot":
sub.close()
def figure(winTitle="plot"):
"""Create a new plot subwindow/tab.
Keyword arguments:
winTitle -- Plot tab title.
"""
mdi = getMdiArea()
if not mdi:
return None
win = Plot(winTitle)
sub = mdi.addSubWindow(win)
sub.show()
return win
def plot(x, y, name=None):
"""Plots a new serie (as line plot)
Keyword arguments:
x -- X values
y -- Y values
name -- Data serie name (for legend).
"""
# Get active plot, or create another one if don't exist
plt = getPlot()
if not plt:
plt = figure()
# Call to plot
return plt.plot(x, y, name)
def series():
"""Return all the lines from a selected plot."""
plt = getPlot()
if not plt:
return []
return plt.series
def removeSerie(index):
"""Remove a data serie from the active plot.
Keyword arguments:
index -- Index of the serie to remove.
"""
# Get active series
plt = getPlot()
if not plt:
return
plots = plt.series
if not plots:
return
# Remove line from plot
axes = plots[index].axes
axes.lines.pop(plots[index].lid)
# Remove serie from list
del plt.series[index]
# Update GUI
plt.update()
def legend(status=True, pos=None, fontsize=None):
"""Show/Hide the legend from the active plot.
Keyword arguments:
status -- True if legend must be shown, False otherwise.
pos -- Legend position.
fontsize -- Font size
"""
plt = getPlot()
if not plt:
return
plt.legend = status
if fontsize:
plt.legSiz = fontsize
# Hide all legends
for axes in plt.axesList:
axes.legend_ = None
# Legend must be activated on last axes
axes = plt.axesList[-1]
if status:
# Setup legend handles and names
lines = series()
handles = []
names = []
for l in lines:
if l.name is not None:
handles.append(l.line)
names.append(l.name)
# Show the legend (at selected position or at best)
if pos:
l = axes.legend(handles, names, bbox_to_anchor=pos)
plt.legPos = pos
else:
l = axes.legend(handles, names, loc='best')
# Update canvas in order to compute legend data
plt.canvas.draw()
# Get resultant position
fax = axes.get_frame().get_extents()
fl = l.get_frame()
plt.legPos = (
(fl._x + fl._width - fax.x0) / fax.width,
(fl._y + fl._height - fax.y0) / fax.height)
# Set fontsize
for t in l.get_texts():
t.set_fontsize(plt.legSiz)
plt.update()
def grid(status=True):
"""Show/Hide the grid from the active plot.
Keyword arguments:
status -- True if grid must be shown, False otherwise.
"""
plt = getPlot()
if not plt:
return
plt.grid = status
axes = plt.axes
axes.grid(status)
plt.update()
def title(string):
"""Setup the plot title.
Keyword arguments:
string -- Plot title.
"""
plt = getPlot()
if not plt:
return
axes = plt.axes
axes.set_title(string)
plt.update()
def xlabel(string):
"""Setup the x label.
Keyword arguments:
string -- Title to set.
"""
plt = getPlot()
if not plt:
return
axes = plt.axes
axes.set_xlabel(string)
plt.update()
def ylabel(string):
"""Setup the y label.
Keyword arguments:
string -- Title to set.
"""
plt = getPlot()
if not plt:
return
axes = plt.axes
axes.set_ylabel(string)
plt.update()
def axesList():
"""Return the plot axes sets list. """
plt = getPlot()
if not plt:
return []
return plt.axesList
def axes():
"""Return the active plot axes."""
plt = getPlot()
if not plt:
return None
return plt.axes
def addNewAxes(rect=None, frameon=True, patchcolor='none'):
"""Add new axes to plot, setting it as the active one.
Keyword arguments:
rect -- Axes area, None to copy from the last axes data.
frameon -- True to show frame, False otherwise.
patchcolor -- Patch color, 'none' for transparent plot.
"""
plt = getPlot()
if not plt:
return None
fig = plt.fig
if rect is None:
rect = plt.axes.get_position()
ax = fig.add_axes(rect, frameon=frameon)
ax.xaxis.set_ticks_position('bottom')
ax.spines['top'].set_color('none')
ax.yaxis.set_ticks_position('left')
ax.spines['right'].set_color('none')
ax.patch.set_facecolor(patchcolor)
plt.axesList.append(ax)
plt.setActiveAxes(-1)
plt.update()
return ax
def save(path, figsize=None, dpi=None):
"""Save plot.
Keyword arguments:
path -- Destination file path.
figsize -- w,h figure size tuple in inches.
dpi -- Dots per inch.
"""
plt = getPlot()
if not plt:
return
# Backup figure options
fig = plt.fig
sizeBack = fig.get_size_inches()
dpiBack = fig.get_dpi()
# Save figure with new options
if figsize:
fig.set_size_inches(figsize[0], figsize[1])
if dpi:
fig.set_dpi(dpi)
plt.canvas.print_figure(path)
# Restore figure options
fig.set_size_inches(sizeBack[0], sizeBack[1])
fig.set_dpi(dpiBack)
plt.update()
class Line():
def __init__(self, axes, x, y, name):
"""Construct a new plot serie.
Keyword arguments:
axes -- Active axes
x -- X values
y -- Y values
name -- Data serie name (for legend).
"""
self.axes = axes
self.x = x
self.y = y
self.name = name
self.lid = len(axes.lines)
self.line, = axes.plot(x, y)
def setp(self, prop, value):
"""Change a line property value.
Keyword arguments:
prop -- Property name.
value -- New property value.
"""
plt.setp(self.line, prop, value)
def getp(self, prop):
"""Get line property value.
Keyword arguments:
prop -- Property name.
"""
return plt.getp(self.line, prop)
class Plot(PySide.QtGui.QWidget):
def __init__(self,
winTitle="plot",
parent=None,
flags=PySide.QtCore.Qt.WindowFlags(0)):
"""Construct a new plot widget.
Keyword arguments:
winTitle -- Tab title.
parent -- Widget parent.
flags -- QWidget flags
"""
PySide.QtGui.QWidget.__init__(self, parent, flags)
self.setWindowTitle(winTitle)
# Create matplotlib canvas
self.fig = Figure()
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
# Get axes
self.axes = self.fig.add_subplot(111)
self.axesList = [self.axes]
self.axes.xaxis.set_ticks_position('bottom')
self.axes.spines['top'].set_color('none')
self.axes.yaxis.set_ticks_position('left')
self.axes.spines['right'].set_color('none')
# Setup layout
vbox = PySide.QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
self.setLayout(vbox)
# Active series
self.series = []
# Indicators
self.skip = False
self.legend = False
self.legPos = (1.0, 1.0)
self.legSiz = 14
self.grid = False
def plot(self, x, y, name=None):
"""Plot a new line and return it.
Keyword arguments:
x -- X values
y -- Y values
name -- Serie name (for legend). """
l = Line(self.axes, x, y, name)
self.series.append(l)
# Update window
self.update()
return l
def update(self):
"""Update the plot, redrawing the canvas."""
if not self.skip:
self.skip = True
if self.legend:
legend(self.legend, self.legPos, self.legSiz)
self.canvas.draw()
self.skip = False
def isGrid(self):
"""Return True if Grid is active, False otherwise."""
return bool(self.grid)
def isLegend(self):
"""Return True if Legend is active, False otherwise."""
return bool(self.legend)
def setActiveAxes(self, index):
"""Change the current active axes.
Keyword arguments:
index -- Index of the new active axes set.
"""
self.axes = self.axesList[index]
self.fig.sca(self.axes)
|
lgpl-2.1
|
public-ink/public-ink
|
server/appengine/lib/matplotlib/widgets.py
|
4
|
73547
|
"""
GUI neutral widgets
===================
Widgets that are designed to work for any of the GUI backends.
All of these widgets require you to predefine a :class:`matplotlib.axes.Axes`
instance and pass that as the first arg. matplotlib doesn't try to
be too smart with respect to layout -- you will have to figure out how
wide and tall you want your Axes to be to accommodate your widget.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
import six
from six.moves import zip
import numpy as np
from .mlab import dist
from .patches import Circle, Rectangle, Ellipse
from .lines import Line2D
from .transforms import blended_transform_factory
class LockDraw(object):
"""
Some widgets, like the cursor, draw onto the canvas, and this is not
desirable under all circumstances, like when the toolbar is in
zoom-to-rect mode and drawing a rectangle. The module level "lock"
allows someone to grab the lock and prevent other widgets from
drawing. Use ``matplotlib.widgets.lock(someobj)`` to prevent
other widgets from drawing while you're interacting with the canvas.
"""
def __init__(self):
self._owner = None
def __call__(self, o):
"""reserve the lock for *o*"""
if not self.available(o):
raise ValueError('already locked')
self._owner = o
def release(self, o):
"""release the lock"""
if not self.available(o):
raise ValueError('you do not own this lock')
self._owner = None
def available(self, o):
"""drawing is available to *o*"""
return not self.locked() or self.isowner(o)
def isowner(self, o):
"""Return True if *o* owns this lock"""
return self._owner is o
def locked(self):
"""Return True if the lock is currently held by an owner"""
return self._owner is not None
class Widget(object):
"""
Abstract base class for GUI neutral widgets
"""
drawon = True
eventson = True
_active = True
def set_active(self, active):
"""Set whether the widget is active.
"""
self._active = active
def get_active(self):
"""Get whether the widget is active.
"""
return self._active
# set_active is overriden by SelectorWidgets.
active = property(get_active, lambda self, active: self.set_active(active),
doc="Is the widget active?")
def ignore(self, event):
"""Return True if event should be ignored.
This method (or a version of it) should be called at the beginning
of any event callback.
"""
return not self.active
class AxesWidget(Widget):
"""Widget that is connected to a single
:class:`~matplotlib.axes.Axes`.
To guarantee that the widget remains responsive and not garbage-collected,
a reference to the object should be maintained by the user.
This is necessary because the callback registry
maintains only weak-refs to the functions, which are member
functions of the widget. If there are no references to the widget
object it may be garbage collected which will disconnect the
callbacks.
Attributes:
*ax* : :class:`~matplotlib.axes.Axes`
The parent axes for the widget
*canvas* : :class:`~matplotlib.backend_bases.FigureCanvasBase` subclass
The parent figure canvas for the widget.
*active* : bool
If False, the widget does not respond to events.
"""
def __init__(self, ax):
self.ax = ax
self.canvas = ax.figure.canvas
self.cids = []
def connect_event(self, event, callback):
"""Connect callback with an event.
This should be used in lieu of `figure.canvas.mpl_connect` since this
function stores callback ids for later clean up.
"""
cid = self.canvas.mpl_connect(event, callback)
self.cids.append(cid)
def disconnect_events(self):
"""Disconnect all events created by this widget."""
for c in self.cids:
self.canvas.mpl_disconnect(c)
class Button(AxesWidget):
"""
A GUI neutral button.
For the button to remain responsive you must keep a reference to it.
The following attributes are accessible
*ax*
The :class:`matplotlib.axes.Axes` the button renders into.
*label*
A :class:`matplotlib.text.Text` instance.
*color*
The color of the button when not hovering.
*hovercolor*
The color of the button when hovering.
Call :meth:`on_clicked` to connect to the button
"""
def __init__(self, ax, label, image=None,
color='0.85', hovercolor='0.95'):
"""
Parameters
----------
ax : matplotlib.axes.Axes
The :class:`matplotlib.axes.Axes` instance the button
will be placed into.
label : str
The button text. Accepts string.
image : array, mpl image, Pillow Image
The image to place in the button, if not *None*.
Can be any legal arg to imshow (numpy array,
matplotlib Image instance, or Pillow Image).
color : color
The color of the button when not activated
hovercolor : color
The color of the button when the mouse is over it
"""
AxesWidget.__init__(self, ax)
if image is not None:
ax.imshow(image)
self.label = ax.text(0.5, 0.5, label,
verticalalignment='center',
horizontalalignment='center',
transform=ax.transAxes)
self.cnt = 0
self.observers = {}
self.connect_event('button_press_event', self._click)
self.connect_event('button_release_event', self._release)
self.connect_event('motion_notify_event', self._motion)
ax.set_navigate(False)
ax.set_facecolor(color)
ax.set_xticks([])
ax.set_yticks([])
self.color = color
self.hovercolor = hovercolor
self._lastcolor = color
def _click(self, event):
if self.ignore(event):
return
if event.inaxes != self.ax:
return
if not self.eventson:
return
if event.canvas.mouse_grabber != self.ax:
event.canvas.grab_mouse(self.ax)
def _release(self, event):
if self.ignore(event):
return
if event.canvas.mouse_grabber != self.ax:
return
event.canvas.release_mouse(self.ax)
if not self.eventson:
return
if event.inaxes != self.ax:
return
for cid, func in six.iteritems(self.observers):
func(event)
def _motion(self, event):
if self.ignore(event):
return
if event.inaxes == self.ax:
c = self.hovercolor
else:
c = self.color
if c != self._lastcolor:
self.ax.set_facecolor(c)
self._lastcolor = c
if self.drawon:
self.ax.figure.canvas.draw()
def on_clicked(self, func):
"""
When the button is clicked, call this *func* with event.
A connection id is returned. It can be used to disconnect
the button from its callback.
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
"""remove the observer with connection id *cid*"""
try:
del self.observers[cid]
except KeyError:
pass
class Slider(AxesWidget):
"""
A slider representing a floating point range.
For the slider to remain responsive you must maintain a
reference to it.
The following attributes are defined
*ax* : the slider :class:`matplotlib.axes.Axes` instance
*val* : the current slider value
*vline* : a :class:`matplotlib.lines.Line2D` instance
representing the initial value of the slider
*poly* : A :class:`matplotlib.patches.Polygon` instance
which is the slider knob
*valfmt* : the format string for formatting the slider text
*label* : a :class:`matplotlib.text.Text` instance
for the slider label
*closedmin* : whether the slider is closed on the minimum
*closedmax* : whether the slider is closed on the maximum
*slidermin* : another slider - if not *None*, this slider must be
greater than *slidermin*
*slidermax* : another slider - if not *None*, this slider must be
less than *slidermax*
*dragging* : allow for mouse dragging on slider
Call :meth:`on_changed` to connect to the slider event
"""
def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f',
closedmin=True, closedmax=True, slidermin=None,
slidermax=None, dragging=True, **kwargs):
"""
Create a slider from *valmin* to *valmax* in axes *ax*.
Additional kwargs are passed on to ``self.poly`` which is the
:class:`matplotlib.patches.Rectangle` that draws the slider
knob. See the :class:`matplotlib.patches.Rectangle` documentation for
valid property names (e.g., *facecolor*, *edgecolor*, *alpha*, ...).
Parameters
----------
ax : Axes
The Axes to put the slider in
label : str
Slider label
valmin : float
The minimum value of the slider
valmax : float
The maximum value of the slider
valinit : float
The slider initial position
label : str
The slider label
valfmt : str
Used to format the slider value, fprint format string
closedmin : bool
Indicate whether the slider interval is closed on the bottom
closedmax : bool
Indicate whether the slider interval is closed on the top
slidermin : Slider or None
Do not allow the current slider to have a value less than
`slidermin`
slidermax : Slider or None
Do not allow the current slider to have a value greater than
`slidermax`
dragging : bool
if the slider can be dragged by the mouse
"""
AxesWidget.__init__(self, ax)
self.valmin = valmin
self.valmax = valmax
self.val = valinit
self.valinit = valinit
self.poly = ax.axvspan(valmin, valinit, 0, 1, **kwargs)
self.vline = ax.axvline(valinit, 0, 1, color='r', lw=1)
self.valfmt = valfmt
ax.set_yticks([])
ax.set_xlim((valmin, valmax))
ax.set_xticks([])
ax.set_navigate(False)
self.connect_event('button_press_event', self._update)
self.connect_event('button_release_event', self._update)
if dragging:
self.connect_event('motion_notify_event', self._update)
self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.valtext = ax.text(1.02, 0.5, valfmt % valinit,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.cnt = 0
self.observers = {}
self.closedmin = closedmin
self.closedmax = closedmax
self.slidermin = slidermin
self.slidermax = slidermax
self.drag_active = False
def _update(self, event):
"""update the slider position"""
if self.ignore(event):
return
if event.button != 1:
return
if event.name == 'button_press_event' and event.inaxes == self.ax:
self.drag_active = True
event.canvas.grab_mouse(self.ax)
if not self.drag_active:
return
elif ((event.name == 'button_release_event') or
(event.name == 'button_press_event' and
event.inaxes != self.ax)):
self.drag_active = False
event.canvas.release_mouse(self.ax)
return
val = event.xdata
if val <= self.valmin:
if not self.closedmin:
return
val = self.valmin
elif val >= self.valmax:
if not self.closedmax:
return
val = self.valmax
if self.slidermin is not None and val <= self.slidermin.val:
if not self.closedmin:
return
val = self.slidermin.val
if self.slidermax is not None and val >= self.slidermax.val:
if not self.closedmax:
return
val = self.slidermax.val
self.set_val(val)
def set_val(self, val):
xy = self.poly.xy
xy[2] = val, 1
xy[3] = val, 0
self.poly.xy = xy
self.valtext.set_text(self.valfmt % val)
if self.drawon:
self.ax.figure.canvas.draw_idle()
self.val = val
if not self.eventson:
return
for cid, func in six.iteritems(self.observers):
func(val)
def on_changed(self, func):
"""
When the slider value is changed, call *func* with the new
slider position
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
"""remove the observer with connection id *cid*"""
try:
del self.observers[cid]
except KeyError:
pass
def reset(self):
"""reset the slider to the initial value if needed"""
if (self.val != self.valinit):
self.set_val(self.valinit)
class CheckButtons(AxesWidget):
"""
A GUI neutral radio button.
For the check buttons to remain responsive you must keep a
reference to this object.
The following attributes are exposed
*ax*
The :class:`matplotlib.axes.Axes` instance the buttons are
located in
*labels*
List of :class:`matplotlib.text.Text` instances
*lines*
List of (line1, line2) tuples for the x's in the check boxes.
These lines exist for each box, but have ``set_visible(False)``
when its box is not checked.
*rectangles*
List of :class:`matplotlib.patches.Rectangle` instances
Connect to the CheckButtons with the :meth:`on_clicked` method
"""
def __init__(self, ax, labels, actives):
"""
Add check buttons to :class:`matplotlib.axes.Axes` instance *ax*
*labels*
A len(buttons) list of labels as strings
*actives*
A len(buttons) list of booleans indicating whether
the button is active
"""
AxesWidget.__init__(self, ax)
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
if len(labels) > 1:
dy = 1. / (len(labels) + 1)
ys = np.linspace(1 - dy, dy, len(labels))
else:
dy = 0.25
ys = [0.5]
cnt = 0
axcolor = ax.get_facecolor()
self.labels = []
self.lines = []
self.rectangles = []
lineparams = {'color': 'k', 'linewidth': 1.25,
'transform': ax.transAxes, 'solid_capstyle': 'butt'}
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
w, h = dy / 2., dy / 2.
x, y = 0.05, y - h / 2.
p = Rectangle(xy=(x, y), width=w, height=h, edgecolor='black',
facecolor=axcolor, transform=ax.transAxes)
l1 = Line2D([x, x + w], [y + h, y], **lineparams)
l2 = Line2D([x, x + w], [y, y + h], **lineparams)
l1.set_visible(actives[cnt])
l2.set_visible(actives[cnt])
self.labels.append(t)
self.rectangles.append(p)
self.lines.append((l1, l2))
ax.add_patch(p)
ax.add_line(l1)
ax.add_line(l2)
cnt += 1
self.connect_event('button_press_event', self._clicked)
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if self.ignore(event):
return
if event.button != 1:
return
if event.inaxes != self.ax:
return
for i, (p, t) in enumerate(zip(self.rectangles, self.labels)):
if (t.get_window_extent().contains(event.x, event.y) or
p.get_window_extent().contains(event.x, event.y)):
self.set_active(i)
break
else:
return
def set_active(self, index):
"""
Directly (de)activate a check button by index.
*index* is an index into the original label list
that this object was constructed with.
Raises ValueError if *index* is invalid.
Callbacks will be triggered if :attr:`eventson` is True.
"""
if 0 > index >= len(self.labels):
raise ValueError("Invalid CheckButton index: %d" % index)
l1, l2 = self.lines[index]
l1.set_visible(not l1.get_visible())
l2.set_visible(not l2.get_visible())
if self.drawon:
self.ax.figure.canvas.draw()
if not self.eventson:
return
for cid, func in six.iteritems(self.observers):
func(self.labels[index].get_text())
def on_clicked(self, func):
"""
When the button is clicked, call *func* with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
"""remove the observer with connection id *cid*"""
try:
del self.observers[cid]
except KeyError:
pass
class RadioButtons(AxesWidget):
"""
A GUI neutral radio button.
For the buttons to remain responsive
you must keep a reference to this object.
The following attributes are exposed:
*ax*
The :class:`matplotlib.axes.Axes` instance the buttons are in
*activecolor*
The color of the button when clicked
*labels*
A list of :class:`matplotlib.text.Text` instances
*circles*
A list of :class:`matplotlib.patches.Circle` instances
*value_selected*
A string listing the current value selected
Connect to the RadioButtons with the :meth:`on_clicked` method
"""
def __init__(self, ax, labels, active=0, activecolor='blue'):
"""
Add radio buttons to :class:`matplotlib.axes.Axes` instance *ax*
*labels*
A len(buttons) list of labels as strings
*active*
The index into labels for the button that is active
*activecolor*
The color of the button when clicked
"""
AxesWidget.__init__(self, ax)
self.activecolor = activecolor
self.value_selected = None
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
dy = 1. / (len(labels) + 1)
ys = np.linspace(1 - dy, dy, len(labels))
cnt = 0
axcolor = ax.get_facecolor()
self.labels = []
self.circles = []
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
if cnt == active:
self.value_selected = label
facecolor = activecolor
else:
facecolor = axcolor
p = Circle(xy=(0.15, y), radius=0.05, edgecolor='black',
facecolor=facecolor, transform=ax.transAxes)
self.labels.append(t)
self.circles.append(p)
ax.add_patch(p)
cnt += 1
self.connect_event('button_press_event', self._clicked)
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if self.ignore(event):
return
if event.button != 1:
return
if event.inaxes != self.ax:
return
xy = self.ax.transAxes.inverted().transform_point((event.x, event.y))
pclicked = np.array([xy[0], xy[1]])
def inside(p):
pcirc = np.array([p.center[0], p.center[1]])
return dist(pclicked, pcirc) < p.radius
for i, (p, t) in enumerate(zip(self.circles, self.labels)):
if t.get_window_extent().contains(event.x, event.y) or inside(p):
self.set_active(i)
break
else:
return
def set_active(self, index):
"""
Trigger which radio button to make active.
*index* is an index into the original label list
that this object was constructed with.
Raise ValueError if the index is invalid.
Callbacks will be triggered if :attr:`eventson` is True.
"""
if 0 > index >= len(self.labels):
raise ValueError("Invalid RadioButton index: %d" % index)
self.value_selected = self.labels[index].get_text()
for i, p in enumerate(self.circles):
if i == index:
color = self.activecolor
else:
color = self.ax.get_facecolor()
p.set_facecolor(color)
if self.drawon:
self.ax.figure.canvas.draw()
if not self.eventson:
return
for cid, func in six.iteritems(self.observers):
func(self.labels[index].get_text())
def on_clicked(self, func):
"""
When the button is clicked, call *func* with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
"""remove the observer with connection id *cid*"""
try:
del self.observers[cid]
except KeyError:
pass
class SubplotTool(Widget):
"""
A tool to adjust the subplot params of a :class:`matplotlib.figure.Figure`.
"""
def __init__(self, targetfig, toolfig):
"""
*targetfig*
The figure instance to adjust.
*toolfig*
The figure instance to embed the subplot tool into. If
*None*, a default figure will be created. If you are using
this from the GUI
"""
# FIXME: The docstring seems to just abruptly end without...
self.targetfig = targetfig
toolfig.subplots_adjust(left=0.2, right=0.9)
class toolbarfmt:
def __init__(self, slider):
self.slider = slider
def __call__(self, x, y):
fmt = '%s=%s' % (self.slider.label.get_text(),
self.slider.valfmt)
return fmt % x
self.axleft = toolfig.add_subplot(711)
self.axleft.set_title('Click on slider to adjust subplot param')
self.axleft.set_navigate(False)
self.sliderleft = Slider(self.axleft, 'left',
0, 1, targetfig.subplotpars.left,
closedmax=False)
self.sliderleft.on_changed(self.funcleft)
self.axbottom = toolfig.add_subplot(712)
self.axbottom.set_navigate(False)
self.sliderbottom = Slider(self.axbottom,
'bottom', 0, 1,
targetfig.subplotpars.bottom,
closedmax=False)
self.sliderbottom.on_changed(self.funcbottom)
self.axright = toolfig.add_subplot(713)
self.axright.set_navigate(False)
self.sliderright = Slider(self.axright, 'right', 0, 1,
targetfig.subplotpars.right,
closedmin=False)
self.sliderright.on_changed(self.funcright)
self.axtop = toolfig.add_subplot(714)
self.axtop.set_navigate(False)
self.slidertop = Slider(self.axtop, 'top', 0, 1,
targetfig.subplotpars.top,
closedmin=False)
self.slidertop.on_changed(self.functop)
self.axwspace = toolfig.add_subplot(715)
self.axwspace.set_navigate(False)
self.sliderwspace = Slider(self.axwspace, 'wspace',
0, 1, targetfig.subplotpars.wspace,
closedmax=False)
self.sliderwspace.on_changed(self.funcwspace)
self.axhspace = toolfig.add_subplot(716)
self.axhspace.set_navigate(False)
self.sliderhspace = Slider(self.axhspace, 'hspace',
0, 1, targetfig.subplotpars.hspace,
closedmax=False)
self.sliderhspace.on_changed(self.funchspace)
# constraints
self.sliderleft.slidermax = self.sliderright
self.sliderright.slidermin = self.sliderleft
self.sliderbottom.slidermax = self.slidertop
self.slidertop.slidermin = self.sliderbottom
bax = toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace,)
def func(event):
thisdrawon = self.drawon
self.drawon = False
# store the drawon state of each slider
bs = []
for slider in sliders:
bs.append(slider.drawon)
slider.drawon = False
# reset the slider to the initial position
for slider in sliders:
slider.reset()
# reset drawon
for slider, b in zip(sliders, bs):
slider.drawon = b
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
toolfig.canvas.draw()
self.targetfig.canvas.draw()
# during reset there can be a temporary invalid state
# depending on the order of the reset so we turn off
# validation for the resetting
validate = toolfig.subplotpars.validate
toolfig.subplotpars.validate = False
self.buttonreset.on_clicked(func)
toolfig.subplotpars.validate = validate
def funcleft(self, val):
self.targetfig.subplots_adjust(left=val)
if self.drawon:
self.targetfig.canvas.draw()
def funcright(self, val):
self.targetfig.subplots_adjust(right=val)
if self.drawon:
self.targetfig.canvas.draw()
def funcbottom(self, val):
self.targetfig.subplots_adjust(bottom=val)
if self.drawon:
self.targetfig.canvas.draw()
def functop(self, val):
self.targetfig.subplots_adjust(top=val)
if self.drawon:
self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val)
if self.drawon:
self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val)
if self.drawon:
self.targetfig.canvas.draw()
class Cursor(AxesWidget):
"""
A horizontal and vertical line that spans the axes and moves with
the pointer. You can turn off the hline or vline respectively with
the following attributes:
*horizOn*
Controls the visibility of the horizontal line
*vertOn*
Controls the visibility of the horizontal line
and the visibility of the cursor itself with the *visible* attribute.
For the cursor to remain responsive you must keep a reference to
it.
"""
def __init__(self, ax, horizOn=True, vertOn=True, useblit=False,
**lineprops):
"""
Add a cursor to *ax*. If ``useblit=True``, use the backend-
dependent blitting features for faster updates (GTKAgg
only for now). *lineprops* is a dictionary of line properties.
.. plot :: mpl_examples/widgets/cursor.py
"""
# TODO: Is the GTKAgg limitation still true?
AxesWidget.__init__(self, ax)
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('draw_event', self.clear)
self.visible = True
self.horizOn = horizOn
self.vertOn = vertOn
self.useblit = useblit and self.canvas.supports_blit
if self.useblit:
lineprops['animated'] = True
self.lineh = ax.axhline(ax.get_ybound()[0], visible=False, **lineprops)
self.linev = ax.axvline(ax.get_xbound()[0], visible=False, **lineprops)
self.background = None
self.needclear = False
def clear(self, event):
"""clear the cursor"""
if self.ignore(event):
return
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.linev.set_visible(False)
self.lineh.set_visible(False)
def onmove(self, event):
"""on mouse motion draw the cursor if visible"""
if self.ignore(event):
return
if not self.canvas.widgetlock.available(self):
return
if event.inaxes != self.ax:
self.linev.set_visible(False)
self.lineh.set_visible(False)
if self.needclear:
self.canvas.draw()
self.needclear = False
return
self.needclear = True
if not self.visible:
return
self.linev.set_xdata((event.xdata, event.xdata))
self.lineh.set_ydata((event.ydata, event.ydata))
self.linev.set_visible(self.visible and self.vertOn)
self.lineh.set_visible(self.visible and self.horizOn)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.linev)
self.ax.draw_artist(self.lineh)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
class MultiCursor(Widget):
"""
Provide a vertical (default) and/or horizontal line cursor shared between
multiple axes.
For the cursor to remain responsive you must keep a reference to
it.
Example usage::
from matplotlib.widgets import MultiCursor
from pylab import figure, show, np
t = np.arange(0.0, 2.0, 0.01)
s1 = np.sin(2*np.pi*t)
s2 = np.sin(4*np.pi*t)
fig = figure()
ax1 = fig.add_subplot(211)
ax1.plot(t, s1)
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.plot(t, s2)
multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1,
horizOn=False, vertOn=True)
show()
"""
def __init__(self, canvas, axes, useblit=True, horizOn=False, vertOn=True,
**lineprops):
self.canvas = canvas
self.axes = axes
self.horizOn = horizOn
self.vertOn = vertOn
xmin, xmax = axes[-1].get_xlim()
ymin, ymax = axes[-1].get_ylim()
xmid = 0.5 * (xmin + xmax)
ymid = 0.5 * (ymin + ymax)
self.visible = True
self.useblit = useblit and self.canvas.supports_blit
self.background = None
self.needclear = False
if self.useblit:
lineprops['animated'] = True
if vertOn:
self.vlines = [ax.axvline(xmid, visible=False, **lineprops)
for ax in axes]
else:
self.vlines = []
if horizOn:
self.hlines = [ax.axhline(ymid, visible=False, **lineprops)
for ax in axes]
else:
self.hlines = []
self.connect()
def connect(self):
"""connect events"""
self._cidmotion = self.canvas.mpl_connect('motion_notify_event',
self.onmove)
self._ciddraw = self.canvas.mpl_connect('draw_event', self.clear)
def disconnect(self):
"""disconnect events"""
self.canvas.mpl_disconnect(self._cidmotion)
self.canvas.mpl_disconnect(self._ciddraw)
def clear(self, event):
"""clear the cursor"""
if self.ignore(event):
return
if self.useblit:
self.background = (
self.canvas.copy_from_bbox(self.canvas.figure.bbox))
for line in self.vlines + self.hlines:
line.set_visible(False)
def onmove(self, event):
if self.ignore(event):
return
if event.inaxes is None:
return
if not self.canvas.widgetlock.available(self):
return
self.needclear = True
if not self.visible:
return
if self.vertOn:
for line in self.vlines:
line.set_xdata((event.xdata, event.xdata))
line.set_visible(self.visible)
if self.horizOn:
for line in self.hlines:
line.set_ydata((event.ydata, event.ydata))
line.set_visible(self.visible)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
if self.vertOn:
for ax, line in zip(self.axes, self.vlines):
ax.draw_artist(line)
if self.horizOn:
for ax, line in zip(self.axes, self.hlines):
ax.draw_artist(line)
self.canvas.blit(self.canvas.figure.bbox)
else:
self.canvas.draw_idle()
class _SelectorWidget(AxesWidget):
def __init__(self, ax, onselect, useblit=False, button=None,
state_modifier_keys=None):
AxesWidget.__init__(self, ax)
self.visible = True
self.onselect = onselect
self.useblit = useblit and self.canvas.supports_blit
self.connect_default_events()
self.state_modifier_keys = dict(move=' ', clear='escape',
square='shift', center='control')
self.state_modifier_keys.update(state_modifier_keys or {})
self.background = None
self.artists = []
if isinstance(button, int):
self.validButtons = [button]
else:
self.validButtons = button
# will save the data (position at mouseclick)
self.eventpress = None
# will save the data (pos. at mouserelease)
self.eventrelease = None
self._prev_event = None
self.state = set()
def set_active(self, active):
AxesWidget.set_active(self, active)
if active:
self.update_background(None)
def update_background(self, event):
"""force an update of the background"""
# If you add a call to `ignore` here, you'll want to check edge case:
# `release` can call a draw event even when `ignore` is True.
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def connect_default_events(self):
"""Connect the major canvas events to methods."""
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('button_press_event', self.press)
self.connect_event('button_release_event', self.release)
self.connect_event('draw_event', self.update_background)
self.connect_event('key_press_event', self.on_key_press)
self.connect_event('key_release_event', self.on_key_release)
self.connect_event('scroll_event', self.on_scroll)
def ignore(self, event):
"""return *True* if *event* should be ignored"""
if not self.active or not self.ax.get_visible():
return True
# If canvas was locked
if not self.canvas.widgetlock.available(self):
return True
if not hasattr(event, 'button'):
event.button = None
# Only do rectangle selection if event was triggered
# with a desired button
if self.validButtons is not None:
if event.button not in self.validButtons:
return True
# If no button was pressed yet ignore the event if it was out
# of the axes
if self.eventpress is None:
return event.inaxes != self.ax
# If a button was pressed, check if the release-button is the
# same.
if event.button == self.eventpress.button:
return False
# If a button was pressed, check if the release-button is the
# same.
return (event.inaxes != self.ax or
event.button != self.eventpress.button)
def update(self):
"""draw using newfangled blit or oldfangled draw depending on
useblit
"""
if not self.ax.get_visible():
return False
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
for artist in self.artists:
self.ax.draw_artist(artist)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def _get_data(self, event):
"""Get the xdata and ydata for event, with limits"""
if event.xdata is None:
return None, None
x0, x1 = self.ax.get_xbound()
y0, y1 = self.ax.get_ybound()
xdata = max(x0, event.xdata)
xdata = min(x1, xdata)
ydata = max(y0, event.ydata)
ydata = min(y1, ydata)
return xdata, ydata
def _clean_event(self, event):
"""Clean up an event
Use prev event if there is no xdata
Limit the xdata and ydata to the axes limits
Set the prev event
"""
if event.xdata is None:
event = self._prev_event
else:
event = copy.copy(event)
event.xdata, event.ydata = self._get_data(event)
self._prev_event = event
return event
def press(self, event):
"""Button press handler and validator"""
if not self.ignore(event):
event = self._clean_event(event)
self.eventpress = event
self._prev_event = event
key = event.key or ''
key = key.replace('ctrl', 'control')
# move state is locked in on a button press
if key == self.state_modifier_keys['move']:
self.state.add('move')
self._press(event)
return True
return False
def _press(self, event):
"""Button press handler"""
pass
def release(self, event):
"""Button release event handler and validator"""
if not self.ignore(event) and self.eventpress:
event = self._clean_event(event)
self.eventrelease = event
self._release(event)
self.eventpress = None
self.eventrelease = None
self.state.discard('move')
return True
return False
def _release(self, event):
"""Button release event handler"""
pass
def onmove(self, event):
"""Cursor move event handler and validator"""
if not self.ignore(event) and self.eventpress:
event = self._clean_event(event)
self._onmove(event)
return True
return False
def _onmove(self, event):
"""Cursor move event handler"""
pass
def on_scroll(self, event):
"""Mouse scroll event handler and validator"""
if not self.ignore(event):
self._on_scroll(event)
def _on_scroll(self, event):
"""Mouse scroll event handler"""
pass
def on_key_press(self, event):
"""Key press event handler and validator for all selection widgets"""
if self.active:
key = event.key or ''
key = key.replace('ctrl', 'control')
if key == self.state_modifier_keys['clear']:
for artist in self.artists:
artist.set_visible(False)
self.update()
return
for (state, modifier) in self.state_modifier_keys.items():
if modifier in key:
self.state.add(state)
self._on_key_press(event)
def _on_key_press(self, event):
"""Key press event handler - use for widget-specific key press actions.
"""
pass
def on_key_release(self, event):
"""Key release event handler and validator"""
if self.active:
key = event.key or ''
for (state, modifier) in self.state_modifier_keys.items():
if modifier in key:
self.state.discard(state)
self._on_key_release(event)
def _on_key_release(self, event):
"""Key release event handler"""
pass
def set_visible(self, visible):
""" Set the visibility of our artists """
self.visible = visible
for artist in self.artists:
artist.set_visible(visible)
class SpanSelector(_SelectorWidget):
"""
Visually select a min/max range on a single axis and call a function with
those values.
To guarantee that the selector remains responsive, keep a reference to
it.
In order to turn off the SpanSelector, set `span_selector.active=False`. To
turn it back on, set `span_selector.active=True`.
Parameters
----------
ax : :class:`matplotlib.axes.Axes` object
onselect : func(min, max), min/max are floats
direction : "horizontal" or "vertical"
The axis along which to draw the span selector
minspan : float, default is None
If selection is less than *minspan*, do not call *onselect*
useblit : bool, default is False
If True, use the backend-dependent blitting features for faster
canvas updates. Only available for GTKAgg right now.
rectprops : dict, default is None
Dictionary of :class:`matplotlib.patches.Patch` properties
onmove_callback : func(min, max), min/max are floats, default is None
Called on mouse move while the span is being selected
span_stays : bool, default is False
If True, the span stays visible after the mouse is released
button : int or list of ints
Determines which mouse buttons activate the span selector
1 = left mouse button\n
2 = center mouse button (scroll wheel)\n
3 = right mouse button\n
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import matplotlib.widgets as mwidgets
>>> fig, ax = plt.subplots()
>>> ax.plot([1, 2, 3], [10, 50, 100])
>>> def onselect(vmin, vmax):
print(vmin, vmax)
>>> rectprops = dict(facecolor='blue', alpha=0.5)
>>> span = mwidgets.SpanSelector(ax, onselect, 'horizontal',
rectprops=rectprops)
>>> fig.show()
See also: :ref:`widgets-span_selector`
"""
def __init__(self, ax, onselect, direction, minspan=None, useblit=False,
rectprops=None, onmove_callback=None, span_stays=False,
button=None):
_SelectorWidget.__init__(self, ax, onselect, useblit=useblit,
button=button)
if rectprops is None:
rectprops = dict(facecolor='red', alpha=0.5)
rectprops['animated'] = self.useblit
if direction not in ['horizontal', 'vertical']:
msg = "direction must be in [ 'horizontal' | 'vertical' ]"
raise ValueError(msg)
self.direction = direction
self.rect = None
self.pressv = None
self.rectprops = rectprops
self.onmove_callback = onmove_callback
self.minspan = minspan
self.span_stays = span_stays
# Needed when dragging out of axes
self.prev = (0, 0)
# Reset canvas so that `new_axes` connects events.
self.canvas = None
self.new_axes(ax)
def new_axes(self, ax):
"""Set SpanSelector to operate on a new Axes"""
self.ax = ax
if self.canvas is not ax.figure.canvas:
if self.canvas is not None:
self.disconnect_events()
self.canvas = ax.figure.canvas
self.connect_default_events()
if self.direction == 'horizontal':
trans = blended_transform_factory(self.ax.transData,
self.ax.transAxes)
w, h = 0, 1
else:
trans = blended_transform_factory(self.ax.transAxes,
self.ax.transData)
w, h = 1, 0
self.rect = Rectangle((0, 0), w, h,
transform=trans,
visible=False,
**self.rectprops)
if self.span_stays:
self.stay_rect = Rectangle((0, 0), w, h,
transform=trans,
visible=False,
**self.rectprops)
self.stay_rect.set_animated(False)
self.ax.add_patch(self.stay_rect)
self.ax.add_patch(self.rect)
self.artists = [self.rect]
def ignore(self, event):
"""return *True* if *event* should be ignored"""
return _SelectorWidget.ignore(self, event) or not self.visible
def _press(self, event):
"""on button press event"""
self.rect.set_visible(self.visible)
if self.span_stays:
self.stay_rect.set_visible(False)
# really force a draw so that the stay rect is not in
# the blit background
if self.useblit:
self.canvas.draw()
xdata, ydata = self._get_data(event)
if self.direction == 'horizontal':
self.pressv = xdata
else:
self.pressv = ydata
return False
def _release(self, event):
"""on button release event"""
if self.pressv is None:
return
self.buttonDown = False
self.rect.set_visible(False)
if self.span_stays:
self.stay_rect.set_x(self.rect.get_x())
self.stay_rect.set_y(self.rect.get_y())
self.stay_rect.set_width(self.rect.get_width())
self.stay_rect.set_height(self.rect.get_height())
self.stay_rect.set_visible(True)
self.canvas.draw_idle()
vmin = self.pressv
xdata, ydata = self._get_data(event)
if self.direction == 'horizontal':
vmax = xdata or self.prev[0]
else:
vmax = ydata or self.prev[1]
if vmin > vmax:
vmin, vmax = vmax, vmin
span = vmax - vmin
if self.minspan is not None and span < self.minspan:
return
self.onselect(vmin, vmax)
self.pressv = None
return False
def _onmove(self, event):
"""on motion notify event"""
if self.pressv is None:
return
x, y = self._get_data(event)
if x is None:
return
self.prev = x, y
if self.direction == 'horizontal':
v = x
else:
v = y
minv, maxv = v, self.pressv
if minv > maxv:
minv, maxv = maxv, minv
if self.direction == 'horizontal':
self.rect.set_x(minv)
self.rect.set_width(maxv - minv)
else:
self.rect.set_y(minv)
self.rect.set_height(maxv - minv)
if self.onmove_callback is not None:
vmin = self.pressv
xdata, ydata = self._get_data(event)
if self.direction == 'horizontal':
vmax = xdata or self.prev[0]
else:
vmax = ydata or self.prev[1]
if vmin > vmax:
vmin, vmax = vmax, vmin
self.onmove_callback(vmin, vmax)
self.update()
return False
class ToolHandles(object):
"""Control handles for canvas tools.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
Matplotlib axes where tool handles are displayed.
x, y : 1D arrays
Coordinates of control handles.
marker : str
Shape of marker used to display handle. See `matplotlib.pyplot.plot`.
marker_props : dict
Additional marker properties. See :class:`matplotlib.lines.Line2D`.
"""
def __init__(self, ax, x, y, marker='o', marker_props=None, useblit=True):
self.ax = ax
props = dict(marker=marker, markersize=7, mfc='w', ls='none',
alpha=0.5, visible=False, label='_nolegend_')
props.update(marker_props if marker_props is not None else {})
self._markers = Line2D(x, y, animated=useblit, **props)
self.ax.add_line(self._markers)
self.artist = self._markers
@property
def x(self):
return self._markers.get_xdata()
@property
def y(self):
return self._markers.get_ydata()
def set_data(self, pts, y=None):
"""Set x and y positions of handles"""
if y is not None:
x = pts
pts = np.array([x, y])
self._markers.set_data(pts)
def set_visible(self, val):
self._markers.set_visible(val)
def set_animated(self, val):
self._markers.set_animated(val)
def closest(self, x, y):
"""Return index and pixel distance to closest index."""
pts = np.transpose((self.x, self.y))
# Transform data coordinates to pixel coordinates.
pts = self.ax.transData.transform(pts)
diff = pts - ((x, y))
if diff.ndim == 2:
dist = np.sqrt(np.sum(diff ** 2, axis=1))
return np.argmin(dist), np.min(dist)
else:
return 0, np.sqrt(np.sum(diff ** 2))
class RectangleSelector(_SelectorWidget):
"""
Select a rectangular region of an axes.
For the cursor to remain responsive you must keep a reference to
it.
Example usage::
from matplotlib.widgets import RectangleSelector
from pylab import *
def onselect(eclick, erelease):
'eclick and erelease are matplotlib events at press and release'
print(' startposition : (%f, %f)' % (eclick.xdata, eclick.ydata))
print(' endposition : (%f, %f)' % (erelease.xdata, erelease.ydata))
print(' used button : ', eclick.button)
def toggle_selector(event):
print(' Key pressed.')
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print(' RectangleSelector deactivated.')
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print(' RectangleSelector activated.')
toggle_selector.RS.set_active(True)
x = arange(100)/(99.0)
y = sin(x)
fig = figure
ax = subplot(111)
ax.plot(x,y)
toggle_selector.RS = RectangleSelector(ax, onselect, drawtype='line')
connect('key_press_event', toggle_selector)
show()
"""
_shape_klass = Rectangle
def __init__(self, ax, onselect, drawtype='box',
minspanx=None, minspany=None, useblit=False,
lineprops=None, rectprops=None, spancoords='data',
button=None, maxdist=10, marker_props=None,
interactive=False, state_modifier_keys=None):
"""
Create a selector in *ax*. When a selection is made, clear
the span and call onselect with::
onselect(pos_1, pos_2)
and clear the drawn box/line. The ``pos_1`` and ``pos_2`` are
arrays of length 2 containing the x- and y-coordinate.
If *minspanx* is not *None* then events smaller than *minspanx*
in x direction are ignored (it's the same for y).
The rectangle is drawn with *rectprops*; default::
rectprops = dict(facecolor='red', edgecolor = 'black',
alpha=0.2, fill=True)
The line is drawn with *lineprops*; default::
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
Use *drawtype* if you want the mouse to draw a line,
a box or nothing between click and actual position by setting
``drawtype = 'line'``, ``drawtype='box'`` or ``drawtype = 'none'``.
*spancoords* is one of 'data' or 'pixels'. If 'data', *minspanx*
and *minspanx* will be interpreted in the same coordinates as
the x and y axis. If 'pixels', they are in pixels.
*button* is a list of integers indicating which mouse buttons should
be used for rectangle selection. You can also specify a single
integer if only a single button is desired. Default is *None*,
which does not limit which button can be used.
Note, typically:
1 = left mouse button
2 = center mouse button (scroll wheel)
3 = right mouse button
*interactive* will draw a set of handles and allow you interact
with the widget after it is drawn.
*state_modifier_keys* are keyboard modifiers that affect the behavior
of the widget.
The defaults are:
dict(move=' ', clear='escape', square='shift', center='ctrl')
Keyboard modifiers, which:
'move': Move the existing shape.
'clear': Clear the current shape.
'square': Makes the shape square.
'center': Make the initial point the center of the shape.
'square' and 'center' can be combined.
"""
_SelectorWidget.__init__(self, ax, onselect, useblit=useblit,
button=button,
state_modifier_keys=state_modifier_keys)
self.to_draw = None
self.visible = True
self.interactive = interactive
if drawtype == 'none':
drawtype = 'line' # draw a line but make it
self.visible = False # invisible
if drawtype == 'box':
if rectprops is None:
rectprops = dict(facecolor='red', edgecolor='black',
alpha=0.2, fill=True)
rectprops['animated'] = self.useblit
self.rectprops = rectprops
self.to_draw = self._shape_klass((0, 0), 0, 1, visible=False,
**self.rectprops)
self.ax.add_patch(self.to_draw)
if drawtype == 'line':
if lineprops is None:
lineprops = dict(color='black', linestyle='-',
linewidth=2, alpha=0.5)
lineprops['animated'] = self.useblit
self.lineprops = lineprops
self.to_draw = Line2D([0, 0], [0, 0], visible=False,
**self.lineprops)
self.ax.add_line(self.to_draw)
self.minspanx = minspanx
self.minspany = minspany
if spancoords not in ('data', 'pixels'):
msg = "'spancoords' must be one of [ 'data' | 'pixels' ]"
raise ValueError(msg)
self.spancoords = spancoords
self.drawtype = drawtype
self.maxdist = maxdist
if rectprops is None:
props = dict(mec='r')
else:
props = dict(mec=rectprops.get('edgecolor', 'r'))
self._corner_order = ['NW', 'NE', 'SE', 'SW']
xc, yc = self.corners
self._corner_handles = ToolHandles(self.ax, xc, yc, marker_props=props,
useblit=self.useblit)
self._edge_order = ['W', 'N', 'E', 'S']
xe, ye = self.edge_centers
self._edge_handles = ToolHandles(self.ax, xe, ye, marker='s',
marker_props=props,
useblit=self.useblit)
xc, yc = self.center
self._center_handle = ToolHandles(self.ax, [xc], [yc], marker='s',
marker_props=props,
useblit=self.useblit)
self.active_handle = None
self.artists = [self.to_draw, self._center_handle.artist,
self._corner_handles.artist,
self._edge_handles.artist]
if not self.interactive:
self.artists = [self.to_draw]
self._extents_on_press = None
def _press(self, event):
"""on button press event"""
# make the drawed box/line visible get the click-coordinates,
# button, ...
if self.interactive and self.to_draw.get_visible():
self._set_active_handle(event)
else:
self.active_handle = None
if self.active_handle is None or not self.interactive:
# Clear previous rectangle before drawing new rectangle.
self.update()
self.set_visible(self.visible)
def _release(self, event):
"""on button release event"""
if not self.interactive:
self.to_draw.set_visible(False)
# update the eventpress and eventrelease with the resulting extents
x1, x2, y1, y2 = self.extents
self.eventpress.xdata = x1
self.eventpress.ydata = y1
xy1 = self.ax.transData.transform_point([x1, y1])
self.eventpress.x, self.eventpress.y = xy1
self.eventrelease.xdata = x2
self.eventrelease.ydata = y2
xy2 = self.ax.transData.transform_point([x2, y2])
self.eventrelease.x, self.eventrelease.y = xy2
if self.spancoords == 'data':
xmin, ymin = self.eventpress.xdata, self.eventpress.ydata
xmax, ymax = self.eventrelease.xdata, self.eventrelease.ydata
# calculate dimensions of box or line get values in the right
# order
elif self.spancoords == 'pixels':
xmin, ymin = self.eventpress.x, self.eventpress.y
xmax, ymax = self.eventrelease.x, self.eventrelease.y
else:
raise ValueError('spancoords must be "data" or "pixels"')
if xmin > xmax:
xmin, xmax = xmax, xmin
if ymin > ymax:
ymin, ymax = ymax, ymin
spanx = xmax - xmin
spany = ymax - ymin
xproblems = self.minspanx is not None and spanx < self.minspanx
yproblems = self.minspany is not None and spany < self.minspany
# check if drawn distance (if it exists) is not too small in
# either x or y-direction
if self.drawtype != 'none' and (xproblems or yproblems):
for artist in self.artists:
artist.set_visible(False)
self.update()
return
# call desired function
self.onselect(self.eventpress, self.eventrelease)
self.update()
return False
def _onmove(self, event):
"""on motion notify event if box/line is wanted"""
# resize an existing shape
if self.active_handle and not self.active_handle == 'C':
x1, x2, y1, y2 = self._extents_on_press
if self.active_handle in ['E', 'W'] + self._corner_order:
x2 = event.xdata
if self.active_handle in ['N', 'S'] + self._corner_order:
y2 = event.ydata
# move existing shape
elif (('move' in self.state or self.active_handle == 'C')
and self._extents_on_press is not None):
x1, x2, y1, y2 = self._extents_on_press
dx = event.xdata - self.eventpress.xdata
dy = event.ydata - self.eventpress.ydata
x1 += dx
x2 += dx
y1 += dy
y2 += dy
# new shape
else:
center = [self.eventpress.xdata, self.eventpress.ydata]
center_pix = [self.eventpress.x, self.eventpress.y]
dx = (event.xdata - center[0]) / 2.
dy = (event.ydata - center[1]) / 2.
# square shape
if 'square' in self.state:
dx_pix = abs(event.x - center_pix[0])
dy_pix = abs(event.y - center_pix[1])
if not dx_pix:
return
maxd = max(abs(dx_pix), abs(dy_pix))
if abs(dx_pix) < maxd:
dx *= maxd / (abs(dx_pix) + 1e-6)
if abs(dy_pix) < maxd:
dy *= maxd / (abs(dy_pix) + 1e-6)
# from center
if 'center' in self.state:
dx *= 2
dy *= 2
# from corner
else:
center[0] += dx
center[1] += dy
x1, x2, y1, y2 = (center[0] - dx, center[0] + dx,
center[1] - dy, center[1] + dy)
self.extents = x1, x2, y1, y2
@property
def _rect_bbox(self):
if self.drawtype == 'box':
x0 = self.to_draw.get_x()
y0 = self.to_draw.get_y()
width = self.to_draw.get_width()
height = self.to_draw.get_height()
return x0, y0, width, height
else:
x, y = self.to_draw.get_data()
x0, x1 = min(x), max(x)
y0, y1 = min(y), max(y)
return x0, y0, x1 - x0, y1 - y0
@property
def corners(self):
"""Corners of rectangle from lower left, moving clockwise."""
x0, y0, width, height = self._rect_bbox
xc = x0, x0 + width, x0 + width, x0
yc = y0, y0, y0 + height, y0 + height
return xc, yc
@property
def edge_centers(self):
"""Midpoint of rectangle edges from left, moving clockwise."""
x0, y0, width, height = self._rect_bbox
w = width / 2.
h = height / 2.
xe = x0, x0 + w, x0 + width, x0 + w
ye = y0 + h, y0, y0 + h, y0 + height
return xe, ye
@property
def center(self):
"""Center of rectangle"""
x0, y0, width, height = self._rect_bbox
return x0 + width / 2., y0 + height / 2.
@property
def extents(self):
"""Return (xmin, xmax, ymin, ymax)."""
x0, y0, width, height = self._rect_bbox
xmin, xmax = sorted([x0, x0 + width])
ymin, ymax = sorted([y0, y0 + height])
return xmin, xmax, ymin, ymax
@extents.setter
def extents(self, extents):
# Update displayed shape
self.draw_shape(extents)
# Update displayed handles
self._corner_handles.set_data(*self.corners)
self._edge_handles.set_data(*self.edge_centers)
self._center_handle.set_data(*self.center)
self.set_visible(self.visible)
self.update()
def draw_shape(self, extents):
x0, x1, y0, y1 = extents
xmin, xmax = sorted([x0, x1])
ymin, ymax = sorted([y0, y1])
xlim = sorted(self.ax.get_xlim())
ylim = sorted(self.ax.get_ylim())
xmin = max(xlim[0], xmin)
ymin = max(ylim[0], ymin)
xmax = min(xmax, xlim[1])
ymax = min(ymax, ylim[1])
if self.drawtype == 'box':
self.to_draw.set_x(xmin)
self.to_draw.set_y(ymin)
self.to_draw.set_width(xmax - xmin)
self.to_draw.set_height(ymax - ymin)
elif self.drawtype == 'line':
self.to_draw.set_data([xmin, xmax], [ymin, ymax])
def _set_active_handle(self, event):
"""Set active handle based on the location of the mouse event"""
# Note: event.xdata/ydata in data coordinates, event.x/y in pixels
c_idx, c_dist = self._corner_handles.closest(event.x, event.y)
e_idx, e_dist = self._edge_handles.closest(event.x, event.y)
m_idx, m_dist = self._center_handle.closest(event.x, event.y)
if 'move' in self.state:
self.active_handle = 'C'
self._extents_on_press = self.extents
# Set active handle as closest handle, if mouse click is close enough.
elif m_dist < self.maxdist * 2:
self.active_handle = 'C'
elif c_dist > self.maxdist and e_dist > self.maxdist:
self.active_handle = None
return
elif c_dist < e_dist:
self.active_handle = self._corner_order[c_idx]
else:
self.active_handle = self._edge_order[e_idx]
# Save coordinates of rectangle at the start of handle movement.
x1, x2, y1, y2 = self.extents
# Switch variables so that only x2 and/or y2 are updated on move.
if self.active_handle in ['W', 'SW', 'NW']:
x1, x2 = x2, event.xdata
if self.active_handle in ['N', 'NW', 'NE']:
y1, y2 = y2, event.ydata
self._extents_on_press = x1, x2, y1, y2
@property
def geometry(self):
if hasattr(self.to_draw, 'get_verts'):
xfm = self.ax.transData.inverted()
y, x = xfm.transform(self.to_draw.get_verts()).T
return np.array([x, y])
else:
return np.array(self.to_draw.get_data())
class EllipseSelector(RectangleSelector):
"""
Select an elliptical region of an axes.
For the cursor to remain responsive you must keep a reference to
it.
Example usage::
from matplotlib.widgets import EllipseSelector
from pylab import *
def onselect(eclick, erelease):
'eclick and erelease are matplotlib events at press and release'
print(' startposition : (%f, %f)' % (eclick.xdata, eclick.ydata))
print(' endposition : (%f, %f)' % (erelease.xdata, erelease.ydata))
print(' used button : ', eclick.button)
def toggle_selector(event):
print(' Key pressed.')
if event.key in ['Q', 'q'] and toggle_selector.ES.active:
print(' EllipseSelector deactivated.')
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.ES.active:
print(' EllipseSelector activated.')
toggle_selector.ES.set_active(True)
x = arange(100)/(99.0)
y = sin(x)
fig = figure
ax = subplot(111)
ax.plot(x,y)
toggle_selector.ES = EllipseSelector(ax, onselect, drawtype='line')
connect('key_press_event', toggle_selector)
show()
"""
_shape_klass = Ellipse
def draw_shape(self, extents):
x1, x2, y1, y2 = extents
xmin, xmax = sorted([x1, x2])
ymin, ymax = sorted([y1, y2])
center = [x1 + (x2 - x1) / 2., y1 + (y2 - y1) / 2.]
a = (xmax - xmin) / 2.
b = (ymax - ymin) / 2.
if self.drawtype == 'box':
self.to_draw.center = center
self.to_draw.width = 2 * a
self.to_draw.height = 2 * b
else:
rad = np.arange(31) * 12 * np.pi / 180
x = a * np.cos(rad) + center[0]
y = b * np.sin(rad) + center[1]
self.to_draw.set_data(x, y)
@property
def _rect_bbox(self):
if self.drawtype == 'box':
x, y = self.to_draw.center
width = self.to_draw.width
height = self.to_draw.height
return x - width / 2., y - height / 2., width, height
else:
x, y = self.to_draw.get_data()
x0, x1 = min(x), max(x)
y0, y1 = min(y), max(y)
return x0, y0, x1 - x0, y1 - y0
class LassoSelector(_SelectorWidget):
"""Selection curve of an arbitrary shape.
For the selector to remain responsive you must keep a reference to
it.
The selected path can be used in conjunction with
:func:`~matplotlib.path.Path.contains_point` to select
data points from an image.
In contrast to :class:`Lasso`, `LassoSelector` is written with an interface
similar to :class:`RectangleSelector` and :class:`SpanSelector` and will
continue to interact with the axes until disconnected.
Parameters:
*ax* : :class:`~matplotlib.axes.Axes`
The parent axes for the widget.
*onselect* : function
Whenever the lasso is released, the `onselect` function is called and
passed the vertices of the selected path.
Example usage::
ax = subplot(111)
ax.plot(x,y)
def onselect(verts):
print(verts)
lasso = LassoSelector(ax, onselect)
*button* is a list of integers indicating which mouse buttons should
be used for rectangle selection. You can also specify a single
integer if only a single button is desired. Default is *None*,
which does not limit which button can be used.
Note, typically:
1 = left mouse button
2 = center mouse button (scroll wheel)
3 = right mouse button
"""
def __init__(self, ax, onselect=None, useblit=True, lineprops=None,
button=None):
_SelectorWidget.__init__(self, ax, onselect, useblit=useblit,
button=button)
self.verts = None
if lineprops is None:
lineprops = dict()
if useblit:
lineprops['animated'] = True
self.line = Line2D([], [], **lineprops)
self.line.set_visible(False)
self.ax.add_line(self.line)
self.artists = [self.line]
def onpress(self, event):
self.press(event)
def _press(self, event):
self.verts = [self._get_data(event)]
self.line.set_visible(True)
def onrelease(self, event):
self.release(event)
def _release(self, event):
if self.verts is not None:
self.verts.append(self._get_data(event))
self.onselect(self.verts)
self.line.set_data([[], []])
self.line.set_visible(False)
self.verts = None
def _onmove(self, event):
if self.verts is None:
return
self.verts.append(self._get_data(event))
self.line.set_data(list(zip(*self.verts)))
self.update()
class Lasso(AxesWidget):
"""Selection curve of an arbitrary shape.
The selected path can be used in conjunction with
:func:`~matplotlib.path.Path.contains_point` to select data points
from an image.
Unlike :class:`LassoSelector`, this must be initialized with a starting
point `xy`, and the `Lasso` events are destroyed upon release.
Parameters:
*ax* : :class:`~matplotlib.axes.Axes`
The parent axes for the widget.
*xy* : array
Coordinates of the start of the lasso.
*callback* : function
Whenever the lasso is released, the `callback` function is called and
passed the vertices of the selected path.
"""
def __init__(self, ax, xy, callback=None, useblit=True):
AxesWidget.__init__(self, ax)
self.useblit = useblit and self.canvas.supports_blit
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
x, y = xy
self.verts = [(x, y)]
self.line = Line2D([x], [y], linestyle='-', color='black', lw=2)
self.ax.add_line(self.line)
self.callback = callback
self.connect_event('button_release_event', self.onrelease)
self.connect_event('motion_notify_event', self.onmove)
def onrelease(self, event):
if self.ignore(event):
return
if self.verts is not None:
self.verts.append((event.xdata, event.ydata))
if len(self.verts) > 2:
self.callback(self.verts)
self.ax.lines.remove(self.line)
self.verts = None
self.disconnect_events()
def onmove(self, event):
if self.ignore(event):
return
if self.verts is None:
return
if event.inaxes != self.ax:
return
if event.button != 1:
return
self.verts.append((event.xdata, event.ydata))
self.line.set_data(list(zip(*self.verts)))
if self.useblit:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
|
gpl-3.0
|
chubbymaggie/datasketch
|
benchmark/lsh_benchmark_plot.py
|
2
|
2782
|
import json, sys, argparse
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
def get_precision_recall(found, reference):
reference = set(reference)
intersect = sum(1 for i in found if i in reference)
if len(found) == 0:
precision = 0.0
else:
precision = float(intersect) / float(len(found))
if len(reference) == 0:
recall = 1.0
else:
recall = float(intersect) / float(len(reference))
if len(found) == len(reference) == 0:
precision = 1.0
recall = 1.0
return [precision, recall]
def fscore(precision, recall):
if precision == 0.0 and recall == 0.0:
return 0.0
return 2.0 / (1.0 / precision + 1.0 / recall)
def average_fscore(founds, references):
return np.mean([fscore(*get_precision_recall(found, reference))
for found, reference in zip(founds, references)])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("benchmark_output")
args = parser.parse_args(sys.argv[1:])
with open(args.benchmark_output) as f:
benchmark = json.load(f)
num_perms = benchmark["num_perms"]
lsh_times = benchmark["lsh_times"]
linearscan_times = benchmark["linearscan_times"]
ground_truth_results = [[x[0] for x in r] for r in benchmark["ground_truth_results"]]
lsh_fscores = []
for results in benchmark["lsh_results"]:
query_results = [[x[0] for x in r] for r in results]
lsh_fscores.append(average_fscore(query_results, ground_truth_results))
linearscan_fscores = []
for results in benchmark["linearscan_results"]:
query_results = [[x[0] for x in r] for r in results]
linearscan_fscores.append(average_fscore(query_results, ground_truth_results))
lsh_times = np.array([np.percentile(ts, 90)
for ts in lsh_times])*1000
linearscan_times = np.array([np.percentile(ts, 90)
for ts in linearscan_times])*1000
fig, axes = plt.subplots(1, 2, figsize=(5*2, 4.5), sharex=True)
# Plot query fscore vs. num perm
axes[0].plot(num_perms, linearscan_fscores, marker="+", label="Linearscan")
axes[0].plot(num_perms, lsh_fscores, marker="+", label="LSH")
axes[0].set_ylabel("Average F-Score")
axes[0].set_xlabel("# of Permmutation Functions")
axes[0].grid()
# Plot query time vs. num perm
axes[1].plot(num_perms, linearscan_times, marker="+", label="Linearscan")
axes[1].plot(num_perms, lsh_times, marker="+", label="LSH")
axes[1].set_xlabel("# of Permutation Functions")
axes[1].set_ylabel("90 Percentile Query Time (ms)")
axes[1].grid()
axes[1].legend(loc="center right")
fig.savefig("lsh_benchmark.png", pad_inches=0.05, bbox_inches="tight")
|
mit
|
liberatorqjw/scikit-learn
|
examples/exercises/plot_cv_digits.py
|
20
|
1207
|
"""
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial excercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn import cross_validation, datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_validation.cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
|
bsd-3-clause
|
jreback/pandas
|
pandas/tests/frame/methods/test_combine.py
|
8
|
1359
|
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
class TestCombine:
@pytest.mark.parametrize(
"data",
[
pd.date_range("2000", periods=4),
pd.date_range("2000", periods=4, tz="US/Central"),
pd.period_range("2000", periods=4),
pd.timedelta_range(0, periods=4),
],
)
def test_combine_datetlike_udf(self, data):
# GH#23079
df = pd.DataFrame({"A": data})
other = df.copy()
df.iloc[1, 0] = None
def combiner(a, b):
return b
result = df.combine(other, combiner)
tm.assert_frame_equal(result, other)
def test_combine_generic(self, float_frame):
df1 = float_frame
df2 = float_frame.loc[float_frame.index[:-5], ["A", "B", "C"]]
combined = df1.combine(df2, np.add)
combined2 = df2.combine(df1, np.add)
assert combined["D"].isna().all()
assert combined2["D"].isna().all()
chunk = combined.loc[combined.index[:-5], ["A", "B", "C"]]
chunk2 = combined2.loc[combined2.index[:-5], ["A", "B", "C"]]
exp = (
float_frame.loc[float_frame.index[:-5], ["A", "B", "C"]].reindex_like(chunk)
* 2
)
tm.assert_frame_equal(chunk, exp)
tm.assert_frame_equal(chunk2, exp)
|
bsd-3-clause
|
bmazin/SDR
|
Projects/BestBeammap/beamPlot.py
|
2
|
2542
|
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4 import QtGui
import matplotlib.pyplot as plt
import numpy as np
import sys
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
def getList(file):
posList= np.recfromtxt(file)
l = [posList['f0'],posList['f2'],posList['f3']]
l = np.array(l)
l = l.T
names = posList['f4']
return l,names
class AppForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Beammap Chooser')
self.create_main_frame()
self.create_status_bar()
def lookupPoint(self,event):
x = event.xdata
y = event.ydata
if y != None and x != None and self.mpl_toolbar.mode == '':
iClosestLeft = np.argmin((x-self.left[:,1])**2+(y-self.left[:,2])**2)
print 'Closest Pixel:',self.leftNames[iClosestLeft],self.left[iClosestLeft]
self.axes0.scatter(self.left[iClosestLeft,1],self.left[iClosestLeft,2],alpha=0.9,s=100,label='left',marker='o',color='c')
self.canvas.draw()
def create_main_frame(self):
self.main_frame = QWidget()
# Create the mpl Figure and FigCanvas objects.
self.dpi = 100
self.fig = Figure((15.0, 15.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
self.axes0 = self.fig.add_subplot(111)
cid=self.canvas.mpl_connect('button_press_event', self.lookupPoint)
# Create the navigation toolbar, tied to the canvas
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
vbox = QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addWidget(self.mpl_toolbar)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def create_status_bar(self):
self.status_text = QLabel("Awaiting orders.")
self.statusBar().addWidget(self.status_text, 1)
def plot_beammap(self):
self.left,self.leftNames = getList('freq_atten_x_y_swap.txt')
self.axes0.scatter(self.left[:,1],self.left[:,2],marker='o',alpha=0.5,s=100,label='left',color='b')
self.canvas.draw()
def main():
app = QApplication(sys.argv)
form = AppForm()
form.plot_beammap()
form.show()
app.exec_()
if __name__ == "__main__":
main()
|
gpl-2.0
|
ZhangBioinfoLab/HMM-TilingArray-CRC
|
tools/analysis/genomic_annotation.py
|
1
|
3012
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 5 2015
@author: junjiang Lin
@contact: [email protected]
"""
# This python script is used to summarize the genomic annotation
#
# Input List:
# 0. regions to annotate, bed format
# 1. exon mapping file, -wa -wb in bedtools
# 2. intron mapping file, -wa -wb in bedtools
# 3. promotor mapping file, -wa -wb in bedtools
# 4. intergenic mapping file, -v in bedtools
# 5. output prefix
#
# Output List:
# 1. a tab-delimited file summarize all information
#
import pandas as pd
import numpy as np
import matplotlib as mpl
mpl.use('Agg') #without using GUI or X server
import seaborn as sb
import matplotlib.pyplot as plt
def main():
import argparse
parser = argparse.ArgumentParser(description='summarize mapping files')
parser.add_argument("regions",help="regions to annotate")
parser.add_argument("-e","--exon",help="the mapping file for exon generated from bedtools")
parser.add_argument("-i","--intron",help="the mapping file for intron generated from bedtools")
parser.add_argument("-g","--intergenic",help="the mapping file for intergenic generated from bedtools")
parser.add_argument("-p","--promotor",help="the mapping file for promotor generated from bedtools")
parser.add_argument("output",help="prefix for output file")
args = parser.parse_args()
regions = args.regions
exon = args.exon
intron = args.intron
intergenic = args.intergenic
promotor = args.promotor
output = args.output
# Start building summary dataframe
summary_df = pd.read_table(regions,header=None)
summary_df[3] = summary_df[0] + ',' + map(str,summary_df[1]) + '-' +map(str,summary_df[2])
summary_df = summary_df.iloc[:,:4]
summary_df.rename(columns={0:'chr',1:'start',2:'end',3:'name'},inplace=True)
summary_df.set_index(keys='name',inplace=True)
# go through each mapping file
colors = []
if promotor:
promotor_df = pd.read_table(promotor,header=None)
summary_df['promotor'] = promotor_df[3].value_counts()
colors.append('gold')
if exon:
exon_df = pd.read_table(exon,header=None)
summary_df['exon'] = exon_df[3].value_counts()
colors.append('red')
if intron:
intron_df = pd.read_table(intron,header=None)
summary_df['intron'] = intron_df[3].value_counts()
colors.append('green')
if intergenic:
intergenic_df = pd.read_table(intergenic,header=None)
summary_df['intergenic'] = intergenic_df[3].value_counts()
colors.append('blue')
#post processing
summary_df.fillna(value=0,inplace=True)
class_dict = dict(zip(range(7),summary_df.columns))
class_index = np.argmax(np.array(summary_df.iloc[:,3:]),axis=1)+3
summary_df['class'] = [class_dict[i] for i in class_index]
summary_df.to_csv(output+"_annotation.tsv",sep="\t")
summary_df['class'].value_counts()[['promotor','intron','exon','intergenic']].plot(kind='pie',autopct='%1.1f%%',shadow=True,startangle=45,colors=colors)
plt.axis('equal')
plt.title(output+" genomic annotation")
plt.savefig(output+"_annotation.pdf")
if __name__ == "__main__":
main()
|
gpl-2.0
|
radekosmulski/courses
|
deeplearning2/kmeans.py
|
10
|
2802
|
import tensorflow as tf
import math, numpy as np
import matplotlib.pyplot as plt
def plot_data(centroids, data, n_samples):
colour = plt.cm.rainbow(np.linspace(0,1,len(centroids)))
for i, centroid in enumerate(centroids):
samples = data[i*n_samples:(i+1)*n_samples]
plt.scatter(samples[:,0], samples[:,1], c=colour[i], s=1)
plt.plot(centroid[0], centroid[1], markersize=10, marker="x", color='k', mew=5)
plt.plot(centroid[0], centroid[1], markersize=5, marker="x", color='m', mew=2)
def all_distances(a, b):
diff = tf.squared_difference(tf.expand_dims(a, 0), tf.expand_dims(b,1))
return tf.reduce_sum(diff, axis=2)
class Kmeans(object):
def __init__(self, data, n_clusters):
self.n_data, self.n_dim = data.shape
self.n_clusters = n_clusters
self.data = data
self.v_data = tf.Variable(data)
self.n_samples = self.n_data//self.n_clusters
def run(self):
tf.global_variables_initializer().run()
initial_centroids = self.find_initial_centroids(self.n_clusters).eval()
curr_centroids = tf.Variable(initial_centroids)
nearest_indices = self.assign_to_nearest(curr_centroids)
updated_centroids = self.update_centroids(nearest_indices)
# Begin main algorithm
tf.global_variables_initializer().run()
c = initial_centroids
for i in range(10):
c2 = curr_centroids.assign(updated_centroids).eval()
if np.allclose(c,c2): break
c=c2
return c2
def find_initial_centroids(self, k):
r_index = tf.random_uniform([1], 0, self.n_data, dtype=tf.int32)
r = tf.expand_dims(self.v_data[tf.squeeze(r_index)], dim=1)
initial_centroids = []
for i in range(k):
dist = all_distances(self.v_data, r)
farthest_index = tf.argmax(tf.reduce_min(dist, axis=0), 0)
farthest_point = self.v_data[tf.to_int32(farthest_index)]
initial_centroids.append(farthest_point)
r = tf.stack(initial_centroids)
return r
def choose_random_centroids(self):
n_samples = tf.shape(v_data)[0]
random_indices = tf.random_shuffle(tf.range(0, n_samples))
centroid_indices = random_indices[:self.n_clusters]
return tf.gather(self.v_data, centroid_indices)
def assign_to_nearest(self, centroids):
return tf.argmin(all_distances(self.v_data, centroids), 0)
def update_centroids(self, nearest_indices):
partitions = tf.dynamic_partition(self.v_data, tf.to_int32(nearest_indices), self.n_clusters)
return tf.concat([tf.expand_dims(tf.reduce_mean(partition, 0), 0)
for partition in partitions], 0)
|
apache-2.0
|
Erotemic/hotspotter
|
hotspotter/report_results2.py
|
1
|
41367
|
#!/usr/env python
from __future__ import division, print_function
from hscom import __common__
(print, print_, print_on, print_off, rrr, profile, printDBG) =\
__common__.init(__name__, '[rr2]', DEBUG=False)
# Matplotlib
import matplotlib
matplotlib.use('Qt4Agg')
# Python
import os
import sys
import textwrap
import fnmatch
import warnings
from itertools import izip
from os.path import join, exists
# Scientific imports
import numpy as np
# Hotspotter imports
from hscom import fileio as io
from hscom import helpers as util
from hscom import params
from hscom.Printable import DynStruct
from hsviz import draw_func2 as df2
from hsviz import viz
from hsviz import allres_viz
import load_data2 as ld2
import spatial_verification2 as sv2
#import datetime
#import subprocess
REPORT_MATRIX = True
REPORT_MATRIX_VIZ = True
# ========================================================
# Report result initialization
# ========================================================
class AllResults(DynStruct):
'Data container for all compiled results'
def __init__(self, hs, qcx2_res, qcx_list):
super(DynStruct, self).__init__()
self.hs = hs
self.qcx2_res = qcx2_res
self.qcx_list = hs.test_sample_cx if qcx_list is None else qcx_list
self.rankres_str = None
self.title_suffix = None
self.scalar_mAP_str = '# mAP score = NA\n'
self.scalar_summary = None
self.problem_false_pairs = None
self.problem_true_pairs = None
self.greater1_cxs = None
self.greater5_cxs = None
self.matrix_str = None
def get_orgres2_distances(allres, *args, **kwargs):
return _get_orgres2_distances(allres, *args, **kwargs)
def __str__(allres):
#print = tores.append
hs = allres.hs
toret = ('+======================\n')
scalar_summary = str(allres.scalar_summary).strip()
toret += ('| All Results: %s \n' % hs.get_db_name())
toret += ('| title_suffix=%s\n' % str(allres.title_suffix))
toret += ('| scalar_summary=\n%s\n' % util.indent(scalar_summary, '| '))
toret += ('| ' + str(allres.scalar_mAP_str))
toret += ('|---\n')
toret += ('| greater5_%s \n' % (hs.cidstr(allres.greater5_cxs),))
toret += ('|---\n')
toret += ('| greater1_%s \n' % (hs.cidstr(allres.greater1_cxs),))
toret += ('|---\n')
toret += ('+======================.\n')
#toret+=('| problem_false_pairs=\n%r' % allres.problem_false_pairs)
#toret+=('| problem_true_pairs=\n%r' % allres.problem_true_pairs)
return toret
class OrganizedResult(DynStruct):
'''
Maintains an organized list of query chip indexes, their top matching
result, the score, and the rank. What chips are populated depends on the
type of organization
'''
def __init__(self):
super(DynStruct, self).__init__()
self.qcxs = []
self.cxs = []
self.scores = []
self.ranks = []
def append(self, qcx, cx, rank, score):
self.qcxs.append(qcx)
self.cxs.append(cx)
self.scores.append(score)
self.ranks.append(rank)
def __len__(self):
num_qcxs = len(self.qcxs)
num_cxs = len(self.cxs)
num_scores = len(self.scores)
num_ranks = len(self.ranks)
assert num_qcxs == num_cxs
assert num_cxs == num_scores
assert num_scores == num_ranks
return num_qcxs
def iter(self):
'useful for plotting'
result_iter = izip(self.qcxs, self.cxs, self.scores, self.ranks)
for qcx, cx, score, rank in result_iter:
yield qcx, cx, score, rank
def qcx_arrays(self, hs):
'useful for reportres_str'
cx2_cid = hs.tables.cx2_cid
qcx2_rank = np.zeros(len(cx2_cid)) - 2
qcx2_score = np.zeros(len(cx2_cid)) - 2
qcx2_cx = np.arange(len(cx2_cid)) * -1
#---
for (qcx, cx, score, rank) in self.iter():
qcx2_rank[qcx] = rank
qcx2_score[qcx] = score
qcx2_cx[qcx] = cx
return qcx2_rank, qcx2_score, qcx2_cx
def printme3(self):
for qcx, cx, score, rank in self.iter():
print('%4d %4d %6.1f %4d' % (qcx, cx, score, rank))
def get_false_match_distances(allres):
false_distances = get_orgres_match_distances(allres, 'false')
return false_distances
def get_true_match_distances(allres):
true_distances = get_orgres_match_distances(allres, 'true')
return true_distances
def res2_true_and_false(hs, res):
'''
Organizes results into true positive and false positive sets
a set is a query, its best match, and a score
'''
#if not 'res' in vars():
#res = qcx2_res[qcx]
indx_samp = hs.indexed_sample_cx
qcx = res.qcx
cx2_score = res.cx2_score
unfilt_top_cx = np.argsort(cx2_score)[::-1]
# Get top chip indexes and scores
top_cx = np.array(util.intersect_ordered(unfilt_top_cx, indx_samp))
top_score = cx2_score[top_cx]
# Get the true and false ground truth ranks
qnx = hs.tables.cx2_nx[qcx]
if qnx <= 1:
qnx = -1 # disallow uniden animals from being marked as true
top_nx = hs.tables.cx2_nx[top_cx]
true_ranks = np.where(np.logical_and(top_nx == qnx, top_cx != qcx))[0]
false_ranks = np.where(np.logical_and(top_nx != qnx, top_cx != qcx))[0]
# Construct the true positive tuple
true_scores = top_score[true_ranks]
true_cxs = top_cx[true_ranks]
true_tup = (true_cxs, true_scores, true_ranks)
# Construct the false positive tuple
false_scores = top_score[false_ranks]
false_cxs = top_cx[false_ranks]
false_tup = (false_cxs, false_scores, false_ranks)
# Return tuples
return true_tup, false_tup
def init_organized_results(allres):
print('[rr2] init_organized_results()')
hs = allres.hs
qcx2_res = allres.qcx2_res
allres.true = OrganizedResult()
allres.false = OrganizedResult()
allres.top_true = OrganizedResult()
allres.top_false = OrganizedResult()
allres.bot_true = OrganizedResult()
allres.problem_true = OrganizedResult()
allres.problem_false = OrganizedResult()
# -----------------
# Query result loop
def _organize_result(res):
# Use ground truth to sort into true/false
true_tup, false_tup = res2_true_and_false(hs, res)
last_rank = -1
skipped_ranks = set([])
# Record: all_true, missed_true, top_true, bot_true
topx = 0
for cx, score, rank in zip(*true_tup):
allres.true.append(qcx, cx, rank, score)
if rank - last_rank > 1:
skipped_ranks.add(rank - 1)
allres.problem_true.append(qcx, cx, rank, score)
if topx == 0:
allres.top_true.append(qcx, cx, rank, score)
last_rank = rank
topx += 1
if topx > 1:
allres.bot_true.append(qcx, cx, rank, score)
# Record the all_false, false_positive, top_false
topx = 0
for cx, score, rank in zip(*false_tup):
allres.false.append(qcx, cx, rank, score)
if rank in skipped_ranks:
allres.problem_false.append(qcx, cx, rank, score)
if topx == 0:
allres.top_false.append(qcx, cx, rank, score)
topx += 1
for qcx in allres.qcx_list:
res = qcx2_res[qcx]
if res is not None:
_organize_result(res)
#print('[rr2] len(allres.true) = %r' % len(allres.true))
#print('[rr2] len(allres.false) = %r' % len(allres.false))
#print('[rr2] len(allres.top_true) = %r' % len(allres.top_true))
#print('[rr2] len(allres.top_false) = %r' % len(allres.top_false))
#print('[rr2] len(allres.bot_true) = %r' % len(allres.bot_true))
#print('[rr2] len(allres.problem_true) = %r' % len(allres.problem_true))
#print('[rr2] len(allres.problem_false) = %r' % len(allres.problem_false))
# qcx arrays for ttbttf
allres.top_true_qcx_arrays = allres.top_true.qcx_arrays(hs)
allres.bot_true_qcx_arrays = allres.bot_true.qcx_arrays(hs)
allres.top_false_qcx_arrays = allres.top_false.qcx_arrays(hs)
def init_score_matrix(allres):
print('[rr2] init score matrix')
hs = allres.hs
qcx2_res = allres.qcx2_res
qcx_list = allres.qcx_list
nx_list = np.unique(hs.tables.cx2_nx[qcx_list])
#nx_list = hs.get_valid_nxs(unknown=False)
cxs_list = hs.nx2_cxs(nx_list, aslist=True)
# Sort names by number of chips
nx_size = map(len, cxs_list)
# Build sorted chip list
nx_cxs_tuples = zip(nx_size, cxs_list)
# Sort by name
cx_sorted = [x for (y, x) in sorted(nx_cxs_tuples)]
# Subsort by chip
cx_sorted = map(sorted, cx_sorted)
# Flattten
from itertools import chain
cx_sorted = list(chain.from_iterable(cx_sorted)) # very fast flatten
row_label_cx = []
row_scores = []
qcx_set = set(qcx_list)
# Build each row in the score matrix
for qcx in iter(cx_sorted):
if not qcx in qcx_set:
continue
try:
res = qcx2_res[qcx]
except IndexError:
print('qcx = %r' % qcx)
print('len(qcx2_res) = %r' % len(qcx2_res))
raise
if res is None:
continue
# Append a label to score matrix
row_label_cx.append(qcx)
# Append a column to score matrix
row_scores.append(res.cx2_score[cx_sorted])
col_label_cx = cx_sorted
# convert to numpy matrix array
score_matrix = np.array(row_scores, dtype=np.float64)
# Fill diagonal with -1's
np.fill_diagonal(score_matrix, -np.ones(len(row_label_cx)))
# Add score matrix to allres
allres.score_matrix = score_matrix
allres.col_label_cx = col_label_cx
allres.row_label_cx = row_label_cx
def get_title_suffix(hs):
title_suffix = hs.get_cache_uid()
return title_suffix
def init_allres(hs, qcx2_res,
qcx_list=None,
matrix=(REPORT_MATRIX or REPORT_MATRIX_VIZ),
oxford=False,
**kwargs):
'Organizes results into a visualizable data structure'
# Make AllResults data containter
allres = AllResults(hs, qcx2_res, qcx_list)
allres.title_suffix = get_title_suffix(hs)
#util.ensurepath(allres.summary_dir)
print('[rr2] init_allres()')
#---
hs = allres.hs
qcx2_res = allres.qcx2_res
#cx2_cid = hs.tables.cx2_cid
# Initialize
if matrix:
init_score_matrix(allres)
init_organized_results(allres)
# Build
build_rankres_str(allres)
if matrix:
build_matrix_str(allres)
if oxford is True:
import oxsty_results
oxsty_map_csv, scalar_mAP_str = oxsty_results.oxsty_mAP_results(allres)
allres.scalar_mAP_str = scalar_mAP_str
allres.oxsty_map_csv = oxsty_map_csv
#print(allres)
return allres
# ========================================================
# Build textfile result strings
# ========================================================
def build_matrix_str(allres):
hs = allres.hs
cx2_gx = hs.tables.cx2_gx
gx2_gname = hs.tables.gx2_gname
def cx2_gname(cx):
return [os.path.splitext(gname)[0] for gname in gx2_gname[cx2_gx]]
col_label_gname = cx2_gname(allres.col_label_cx)
row_label_gname = cx2_gname(allres.row_label_cx)
timestamp = util.get_timestamp(format_='comment') + '\n'
header = '\n'.join(
['# Result score matrix',
'# Generated on: ' + timestamp,
'# Format: rows separated by newlines, cols separated by commas',
'# num_queries / rows = ' + repr(len(row_label_gname)),
'# num_indexed / cols = ' + repr(len(col_label_gname)),
'# row_labels = ' + repr(row_label_gname),
'# col_labels = ' + repr(col_label_gname)])
row_strings = []
for row in allres.score_matrix:
row_str = map(lambda x: '%5.2f' % x, row)
row_strings.append(', '.join(row_str))
body = '\n'.join(row_strings)
matrix_str = '\n'.join([header, body])
allres.matrix_str = matrix_str
def build_rankres_str(allres):
'Builds csv files showing the cxs/scores/ranks of the query results'
hs = allres.hs
#qcx2_res = allres.qcx2_res
cx2_cid = hs.tables.cx2_cid
#cx2_nx = hs.tables.cx2_nx
test_samp = allres.qcx_list
train_samp = hs.train_sample_cx
indx_samp = hs.indexed_sample_cx
# Get organized data for csv file
(qcx2_top_true_rank,
qcx2_top_true_score,
qcx2_top_true_cx) = allres.top_true_qcx_arrays
(qcx2_bot_true_rank,
qcx2_bot_true_score,
qcx2_bot_true_cx) = allres.bot_true_qcx_arrays
(qcx2_top_false_rank,
qcx2_top_false_score,
qcx2_top_false_cx) = allres.top_false_qcx_arrays
# Number of groundtruth per query
qcx2_numgt = np.zeros(len(cx2_cid)) - 2
for qcx in test_samp:
qcx2_numgt[qcx] = len(hs.get_other_indexed_cxs(qcx))
# Easy to digest results
num_chips = len(test_samp)
num_nonquery = len(np.setdiff1d(indx_samp, test_samp))
# Find the test samples WITH ground truth
test_samp_with_gt = np.array(test_samp)[qcx2_numgt[test_samp] > 0]
if len(test_samp_with_gt) == 0:
warnings.warn('[rr2] there were no queries with ground truth')
#train_nxs_set = set(cx2_nx[train_samp])
flag_cxs_fn = hs.flag_cxs_with_name_in_sample
def ranks_less_than_(thresh, intrain=None):
#Find the number of ranks scoring more than thresh
# Get statistics with respect to the training set
if len(test_samp_with_gt) == 0:
test_cxs_ = np.array([])
elif intrain is None: # report all
test_cxs_ = test_samp_with_gt
else: # report either or
in_train_flag = flag_cxs_fn(test_samp_with_gt, train_samp)
if intrain is False:
in_train_flag = True - in_train_flag
test_cxs_ = test_samp_with_gt[in_train_flag]
# number of test samples with ground truth
num_with_gt = len(test_cxs_)
if num_with_gt == 0:
return [], ('NoGT', 'NoGT', -1, 'NoGT')
# find tests with ranks greater and less than thresh
testcx2_ttr = qcx2_top_true_rank[test_cxs_]
greater_cxs = test_cxs_[np.where(testcx2_ttr >= thresh)[0]]
num_greater = len(greater_cxs)
num_less = num_with_gt - num_greater
num_greater = num_with_gt - num_less
frac_less = 100.0 * num_less / num_with_gt
fmt_tup = (num_less, num_with_gt, frac_less, num_greater)
return greater_cxs, fmt_tup
greater5_cxs, fmt5_tup = ranks_less_than_(5)
greater1_cxs, fmt1_tup = ranks_less_than_(1)
#
gt5_intrain_cxs, fmt5_in_tup = ranks_less_than_(5, intrain=True)
gt1_intrain_cxs, fmt1_in_tup = ranks_less_than_(1, intrain=True)
#
gt5_outtrain_cxs, fmt5_out_tup = ranks_less_than_(5, intrain=False)
gt1_outtrain_cxs, fmt1_out_tup = ranks_less_than_(1, intrain=False)
#
allres.greater1_cxs = greater1_cxs
allres.greater5_cxs = greater5_cxs
#print('greater5_cxs = %r ' % (allres.greater5_cxs,))
#print('greater1_cxs = %r ' % (allres.greater1_cxs,))
# CSV Metadata
header = '# Experiment allres.title_suffix = ' + allres.title_suffix + '\n'
header += util.get_timestamp(format_='comment') + '\n'
# Scalar summary
scalar_summary = '# Num Query Chips: %d \n' % num_chips
scalar_summary += '# Num Query Chips with at least one match: %d \n' % len(test_samp_with_gt)
scalar_summary += '# Num NonQuery Chips: %d \n' % num_nonquery
scalar_summary += '# Ranks <= 5: %r/%r = %.1f%% (missed %r)\n' % (fmt5_tup)
scalar_summary += '# Ranks <= 1: %r/%r = %.1f%% (missed %r)\n\n' % (fmt1_tup)
scalar_summary += '# InTrain Ranks <= 5: %r/%r = %.1f%% (missed %r)\n' % (fmt5_in_tup)
scalar_summary += '# InTrain Ranks <= 1: %r/%r = %.1f%% (missed %r)\n\n' % (fmt1_in_tup)
scalar_summary += '# OutTrain Ranks <= 5: %r/%r = %.1f%% (missed %r)\n' % (fmt5_out_tup)
scalar_summary += '# OutTrain Ranks <= 1: %r/%r = %.1f%% (missed %r)\n\n' % (fmt1_out_tup)
header += scalar_summary
# Experiment parameters
#header += '# Full Parameters: \n' + util.indent(params.param_string(), '#') + '\n\n'
# More Metadata
header += textwrap.dedent('''
# Rank Result Metadata:
# QCX = Query chip-index
# QGNAME = Query images name
# NUMGT = Num ground truth matches
# TT = top true
# BT = bottom true
# TF = top false''').strip()
# Build the CSV table
test_sample_gx = hs.tables.cx2_gx[test_samp]
test_sample_gname = hs.tables.gx2_gname[test_sample_gx]
test_sample_gname = [g.replace('.jpg', '') for g in test_sample_gname]
column_labels = ['QCX', 'NUM GT',
'TT CX', 'BT CX', 'TF CX',
'TT SCORE', 'BT SCORE', 'TF SCORE',
'TT RANK', 'BT RANK', 'TF RANK',
'QGNAME', ]
column_list = [
test_samp, qcx2_numgt[test_samp],
qcx2_top_true_cx[test_samp], qcx2_bot_true_cx[test_samp],
qcx2_top_false_cx[test_samp], qcx2_top_true_score[test_samp],
qcx2_bot_true_score[test_samp], qcx2_top_false_score[test_samp],
qcx2_top_true_rank[test_samp], qcx2_bot_true_rank[test_samp],
qcx2_top_false_rank[test_samp], test_sample_gname, ]
column_type = [int, int, int, int, int,
float, float, float, int, int, int, str, ]
rankres_str = ld2.make_csv_table(column_labels, column_list, header, column_type)
# Put some more data at the end
problem_true_pairs = zip(allres.problem_true.qcxs, allres.problem_true.cxs)
problem_false_pairs = zip(allres.problem_false.qcxs, allres.problem_false.cxs)
problem_str = '\n'.join( [
'#Problem Cases: ',
'# problem_true_pairs = ' + repr(problem_true_pairs),
'# problem_false_pairs = ' + repr(problem_false_pairs)])
rankres_str += '\n' + problem_str
# Attach results to allres structure
allres.rankres_str = rankres_str
allres.scalar_summary = scalar_summary
allres.problem_false_pairs = problem_false_pairs
allres.problem_true_pairs = problem_true_pairs
allres.problem_false_pairs = problem_false_pairs
allres.problem_true_pairs = problem_true_pairs
# ===========================
# Helper Functions
# ===========================
def __dump_text_report(allres, report_type):
if not 'report_type' in vars():
report_type = 'rankres_str'
print('[rr2] Dumping textfile: ' + report_type)
report_str = allres.__dict__[report_type]
# Get directories
result_dir = allres.hs.dirs.result_dir
timestamp_dir = join(result_dir, 'timestamped_results')
util.ensurepath(timestamp_dir)
util.ensurepath(result_dir)
# Write to timestamp and result dir
timestamp = util.get_timestamp()
csv_timestamp_fname = report_type + allres.title_suffix + timestamp + '.csv'
csv_timestamp_fpath = join(timestamp_dir, csv_timestamp_fname)
csv_fname = report_type + allres.title_suffix + '.csv'
csv_fpath = join(result_dir, csv_fname)
util.write_to(csv_fpath, report_str)
util.write_to(csv_timestamp_fpath, report_str)
# ===========================
# Driver functions
# ===========================
TMP = False
SCORE_PDF = TMP
RANK_HIST = TMP
PARI_ANALY = TMP
STEM = TMP
TOP5 = TMP
#if TMP:
ALLQUERIES = False
ANALYSIS = True
def dump_all(allres,
matrix=REPORT_MATRIX, #
matrix_viz=REPORT_MATRIX_VIZ, #
score_pdf=SCORE_PDF,
rank_hist=RANK_HIST,
ttbttf=False,
problems=False,
gtmatches=False,
oxford=False,
no_viz=False,
rankres=True,
stem=STEM,
missed_top5=TOP5,
analysis=ANALYSIS,
pair_analysis=PARI_ANALY,
allqueries=ALLQUERIES):
print('\n======================')
print('[rr2] DUMP ALL')
print('======================')
viz.BROWSE = False
viz.DUMP = True
# Text Reports
if rankres:
dump_rankres_str_results(allres)
if matrix:
dump_matrix_str_results(allres)
if oxford:
dump_oxsty_mAP_results(allres)
if no_viz:
print('\n --- (NO VIZ) END DUMP ALL ---\n')
return
# Viz Reports
if stem:
dump_rank_stems(allres)
if matrix_viz:
dump_score_matrixes(allres)
if rank_hist:
dump_rank_hists(allres)
if score_pdf:
dump_score_pdfs(allres)
#
#if ttbttf:
#dump_ttbttf_matches(allres)
if problems:
dump_problem_matches(allres)
if gtmatches:
dump_gt_matches(allres)
if missed_top5:
dump_missed_top5(allres)
if analysis:
dump_analysis(allres)
if pair_analysis:
dump_feature_pair_analysis(allres)
if allqueries:
dump_all_queries(allres)
print('\n --- END DUMP ALL ---\n')
def dump_oxsty_mAP_results(allres):
#print('\n---DUMPING OXSTYLE RESULTS---')
__dump_text_report(allres, 'oxsty_map_csv')
def dump_rankres_str_results(allres):
#print('\n---DUMPING RANKRES RESULTS---')
__dump_text_report(allres, 'rankres_str')
def dump_matrix_str_results(allres):
#print('\n---DUMPING MATRIX STRING RESULTS---')
__dump_text_report(allres, 'matrix_str')
def dump_problem_matches(allres):
#print('\n---DUMPING PROBLEM MATCHES---')
dump_orgres_matches(allres, 'problem_false')
dump_orgres_matches(allres, 'problem_true')
def dump_score_matrixes(allres):
#print('\n---DUMPING SCORE MATRIX---')
try:
allres_viz.plot_score_matrix(allres)
except Exception as ex:
print('[dump_score_matixes] IMPLEMENTME: %r ' % ex)
pass
def dump_rank_stems(allres):
#print('\n---DUMPING RANK STEMS---')
viz.plot_rank_stem(allres, 'true')
def dump_rank_hists(allres):
#print('\n---DUMPING RANK HISTS---')
viz.plot_rank_histogram(allres, 'true')
def dump_score_pdfs(allres):
#print('\n---DUMPING SCORE PDF ---')
viz.plot_score_pdf(allres, 'true', colorx=0.0, variation_truncate=True)
viz.plot_score_pdf(allres, 'false', colorx=0.2)
viz.plot_score_pdf(allres, 'top_true', colorx=0.4, variation_truncate=True)
viz.plot_score_pdf(allres, 'bot_true', colorx=0.6)
viz.plot_score_pdf(allres, 'top_false', colorx=0.9)
def dump_gt_matches(allres):
#print('\n---DUMPING GT MATCHES ---')
'Displays the matches to ground truth for all queries'
qcx2_res = allres.qcx2_res
for qcx in xrange(0, len(qcx2_res)):
viz.show_chip(allres, qcx, 'gt_matches')
def dump_missed_top5(allres):
#print('\n---DUMPING MISSED TOP 5---')
'Displays the top5 matches for all queries'
greater5_cxs = allres.greater5_cxs
#qcx = greater5_cxs[0]
for qcx in greater5_cxs:
viz.show_chip(allres, qcx, 'top5', 'missed_top5')
viz.show_chip(allres, qcx, 'gt_matches', 'missed_top5')
def dump_analysis(allres):
print('[rr2] dump analysis')
greater1_cxs = allres.greater1_cxs
#qcx = greater5_cxs[0]
for qcx in greater1_cxs:
viz.show_chip(allres, qcx, 'analysis', 'analysis')
viz.show_chip(allres, qcx, 'analysis', 'analysis', annotations=False, title_aug=' noanote')
def dump_all_queries2(hs):
import QueryResult as qr
test_cxs = hs.test_sample_cx
title_suffix = get_title_suffix(hs)
print('[rr2] dumping all %r queries' % len(test_cxs))
for qcx in test_cxs:
res = qr.QueryResult(qcx)
res.load(hs)
# SUPER HACK (I don't know the figurename a priori, I have to contstruct
# it to not duplciate dumping a figure)
title_aug = ' noanote'
fpath = hs.dirs.result_dir
subdir = 'allqueries'
N = 5
topN_cxs = res.topN_cxs(N)
topscore = res.cx2_score[topN_cxs][0]
dump_dir = join(fpath, subdir + title_suffix)
fpath = join(dump_dir, ('topscore=%r -- qcid=%r' % (topscore, res.qcid)))
fpath_aug = join(dump_dir, ('topscore=%r -- qcid=%r' % (topscore, res.qcid))) + title_aug
fpath_clean = df2.sanatize_img_fpath(fpath)
fpath_aug_clean = df2.sanatize_img_fpath(fpath_aug)
print('----')
print(fpath_clean)
print(fpath_clean)
if not exists(fpath_aug_clean):
viz.plot_cx2(hs, res, 'analysis', subdir=subdir, annotations=False, title_aug=title_aug)
if not exists(fpath_clean):
viz.plot_cx2(hs, res, 'analysis', subdir=subdir)
print('----')
def dump_all_queries(allres):
test_cxs = allres.qcx_list
print('[rr2] dumping all %r queries' % len(test_cxs))
for qcx in test_cxs:
viz.show_chip(allres, qcx, 'analysis', subdir='allqueries',
annotations=False, title_aug=' noanote')
viz.show_chip(allres, qcx, 'analysis', subdir='allqueries')
def dump_orgres_matches(allres, orgres_type):
orgres = allres.__dict__[orgres_type]
hs = allres.hs
qcx2_res = allres.qcx2_res
# loop over each query / result of interest
for qcx, cx, score, rank in orgres.iter():
query_gname, _ = os.path.splitext(hs.tables.gx2_gname[hs.tables.cx2_gx[qcx]])
result_gname, _ = os.path.splitext(hs.tables.gx2_gname[hs.tables.cx2_gx[cx]])
res = qcx2_res[qcx]
df2.figure(fnum=1, plotnum=121)
df2.show_matches_annote_res(res, hs, cx, fnum=1, plotnum=121)
big_title = 'score=%.2f_rank=%d_q=%s_r=%s' % (score, rank, query_gname, result_gname)
df2.set_figtitle(big_title)
viz.__dump_or_browse(allres, orgres_type + '_matches' + allres.title_suffix)
def dump_feature_pair_analysis(allres):
print('[rr2] Doing: feature pair analysis')
# TODO: Measure score consistency over a spatial area.
# Measures entropy of matching vs nonmatching descriptors
# Measures scale of m vs nm desc
hs = allres.hs
qcx2_res = allres.qcx2_res
import scipy
def _hist_prob_x(desc, bw_factor):
# Choose number of bins based on the bandwidth
bin_range = (0, 256) # assuming input is uint8
bins = bin_range[1] // bw_factor
bw_factor = bin_range[1] / bins
# Compute the probabilty mass function, each w.r.t a single descriptor
hist_params = dict(bins=bins, range=bin_range, density=True)
hist_func = np.histogram
desc_pmf = [hist_func(d, **hist_params)[0] for d in desc]
# Compute the probability that you saw what you saw
# TODO: could use linear interpolation for a bit more robustness here
bin_vals = [np.array(np.floor(d / bw_factor), dtype=np.uint8) for d in desc]
hist_prob_x = [pmf[vals] for pmf, vals in zip(desc_pmf, bin_vals)]
return hist_prob_x
def _gkde_prob_x(desc, bw_factor):
# Estimate the probabilty density function, each w.r.t a single descriptor
gkde_func = scipy.stats.gaussian_kde
desc_pdf = [gkde_func(d, bw_factor) for d in desc]
gkde_prob_x = [pdf(d) for pdf, d in zip(desc_pdf, desc)]
return gkde_prob_x
def descriptor_entropy(desc, bw_factor=4):
'computes the shannon entropy of each descriptor in desc'
# Compute shannon entropy = -sum(p(x)*log(p(x)))
prob_x = _hist_prob_x(desc, bw_factor)
entropy = [-(px * np.log2(px)).sum() for px in prob_x]
return entropy
# Load features if we need to
if hs.feats.cx2_desc.size == 0:
print(' * forcing load of descriptors')
hs.load_features()
cx2_desc = hs.feats.cx2_desc
cx2_kpts = hs.feats.cx2_kpts
def measure_feat_pairs(allres, orgtype='top_true'):
print('Measure ' + orgtype + ' pairs')
orgres = allres.__dict__[orgtype]
entropy_list = []
scale_list = []
score_list = []
lbl = 'Measuring ' + orgtype + ' pair '
fmt_str = util.make_progress_fmt_str(len(orgres), lbl)
rank_skips = []
gt_skips = []
for ix, (qcx, cx, score, rank) in enumerate(orgres.iter()):
util.print_(fmt_str % (ix + 1,))
# Skip low ranks
if rank > 5:
rank_skips.append(qcx)
continue
other_cxs = hs.get_other_indexed_cxs(qcx)
# Skip no groundtruth
if len(other_cxs) == 0:
gt_skips.append(qcx)
continue
res = qcx2_res[qcx]
# Get matching feature indexes
fm = res.cx2_fm[cx]
# Get their scores
fs = res.cx2_fs[cx]
# Get matching descriptors
printDBG('\nfm.shape=%r' % (fm.shape,))
desc1 = cx2_desc[qcx][fm[:, 0]]
desc2 = cx2_desc[cx][fm[:, 1]]
# Get matching keypoints
kpts1 = cx2_kpts[qcx][fm[:, 0]]
kpts2 = cx2_kpts[cx][fm[:, 1]]
# Get their scale
scale1_m = sv2.keypoint_scale(kpts1)
scale2_m = sv2.keypoint_scale(kpts2)
# Get their entropy
entropy1 = descriptor_entropy(desc1, bw_factor=1)
entropy2 = descriptor_entropy(desc2, bw_factor=1)
# Append to results
entropy_tup = np.array(zip(entropy1, entropy2))
scale_tup = np.array(zip(scale1_m, scale2_m))
entropy_tup = entropy_tup.reshape(len(entropy_tup), 2)
scale_tup = scale_tup.reshape(len(scale_tup), 2)
entropy_list.append(entropy_tup)
scale_list.append(scale_tup)
score_list.append(fs)
print('Skipped %d total.' % (len(rank_skips) + len(gt_skips),))
print('Skipped %d for rank > 5, %d for no gt' % (len(rank_skips), len(gt_skips),))
print(np.unique(map(len, entropy_list)))
def evstack(tup):
return np.vstack(tup) if len(tup) > 0 else np.empty((0, 2))
def ehstack(tup):
return np.hstack(tup) if len(tup) > 0 else np.empty((0, 2))
entropy_pairs = evstack(entropy_list)
scale_pairs = evstack(scale_list)
scores = ehstack(score_list)
print('\n * Measured %d pairs' % len(entropy_pairs))
return entropy_pairs, scale_pairs, scores
tt_entropy, tt_scale, tt_scores = measure_feat_pairs(allres, 'top_true')
tf_entropy, tf_scale, tf_scores = measure_feat_pairs(allres, 'top_false')
# Measure ratios
def measure_ratio(arr):
return arr[:, 0] / arr[:, 1] if len(arr) > 0 else np.array([])
tt_entropy_ratio = measure_ratio(tt_entropy)
tf_entropy_ratio = measure_ratio(tf_entropy)
tt_scale_ratio = measure_ratio(tt_scale)
tf_scale_ratio = measure_ratio(tf_scale)
title_suffix = allres.title_suffix
# Entropy vs Score
df2.figure(fnum=1, docla=True)
df2.figure(fnum=1, plotnum=(2, 2, 1))
df2.plot2(tt_entropy[:, 0], tt_scores, 'gx', 'entropy1', 'score', 'Top True')
df2.figure(fnum=1, plotnum=(2, 2, 2))
df2.plot2(tf_entropy[:, 0], tf_scores, 'rx', 'entropy1', 'score', 'Top False')
df2.figure(fnum=1, plotnum=(2, 2, 3))
df2.plot2(tt_entropy[:, 1], tt_scores, 'gx', 'entropy2', 'score', 'Top True')
df2.figure(fnum=1, plotnum=(2, 2, 4))
df2.plot2(tf_entropy[:, 1], tf_scores, 'rx', 'entropy2', 'score', 'Top False')
df2.set_figtitle('Entropy vs Score -- ' + title_suffix)
viz.__dump_or_browse(allres, 'pair_analysis')
# Scale vs Score
df2.figure(fnum=2, plotnum=(2, 2, 1), docla=True)
df2.plot2(tt_scale[:, 0], tt_scores, 'gx', 'scale1', 'score', 'Top True')
df2.figure(fnum=2, plotnum=(2, 2, 2))
df2.plot2(tf_scale[:, 0], tf_scores, 'rx', 'scale1', 'score', 'Top False')
df2.figure(fnum=2, plotnum=(2, 2, 3))
df2.plot2(tt_scale[:, 1], tt_scores, 'gx', 'scale2', 'score', 'Top True')
df2.figure(fnum=2, plotnum=(2, 2, 4))
df2.plot2(tf_scale[:, 1], tf_scores, 'rx', 'scale2', 'score', 'Top False')
df2.set_figtitle('Scale vs Score -- ' + title_suffix)
viz.__dump_or_browse(allres, 'pair_analysis')
# Entropy Ratio vs Score
df2.figure(fnum=3, plotnum=(1, 2, 1), docla=True)
df2.plot2(tt_entropy_ratio, tt_scores, 'gx', 'entropy-ratio', 'score', 'Top True')
df2.figure(fnum=3, plotnum=(1, 2, 2))
df2.plot2(tf_entropy_ratio, tf_scores, 'rx', 'entropy-ratio', 'score', 'Top False')
df2.set_figtitle('Entropy Ratio vs Score -- ' + title_suffix)
viz.__dump_or_browse(allres, 'pair_analysis')
# Scale Ratio vs Score
df2.figure(fnum=4, plotnum=(1, 2, 1), docla=True)
df2.plot2(tt_scale_ratio, tt_scores, 'gx', 'scale-ratio', 'score', 'Top True')
df2.figure(fnum=4, plotnum=(1, 2, 2))
df2.plot2(tf_scale_ratio, tf_scores, 'rx', 'scale-ratio', 'score', 'Top False')
df2.set_figtitle('Entropy Ratio vs Score -- ' + title_suffix)
viz.__dump_or_browse(allres, 'pair_analysis')
#df2.rrr(); viz.rrr(); clf(); df2.show_chip(hs, 14, allres=allres)
#viz.show_chip(allres, 14, 'top5')
#viz.show_chip(allres, 14, 'gt_matches')
#df2.show_chip(hs, 1, allres=allres)
def possible_problems():
# Perhaps overlapping keypoints are causing more harm than good.
# Maybe there is a way of grouping them or averaging them into a
# better descriptor.
pass
#===============================
# MAIN SCRIPT
#===============================
def report_all(hs, qcx2_res, qcx_list, **kwargs):
allres = init_allres(hs, qcx2_res, qcx_list=qcx_list, **kwargs)
#if not 'kwargs' in vars():
#kwargs = dict(rankres=True, stem=False, matrix=False, pdf=False,
#hist=False, oxford=False, ttbttf=False, problems=False,
#gtmatches=False)
try:
dump_all(allres, **kwargs)
except Exception as ex:
import traceback
print('\n\n-----------------')
print('report_all(hs, qcx2_res, **kwargs=%r' % (kwargs))
print('Caught Error in rr2.dump_all')
print(repr(ex))
exc_type, exc_value, exc_traceback = sys.exc_info()
print("*** print_tb:")
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print("*** print_exception:")
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
print('Caught Error in rr2.dump_all')
print('-----------------\n')
raise
return allres, ex
return allres
def read_until(file, target):
curent_line = file.readline()
while not curent_line is None:
if curent_line.find(target) > -1:
return curent_line
curent_line = file.readline()
def print_result_summaries_list(topnum=5):
print('\n<(^_^<)\n')
# Print out some summary of all results you have
hs = ld2.HotSpotter()
hs.load_tables(ld2.DEFAULT)
result_file_list = os.listdir(hs.dirs.result_dir)
sorted_rankres = []
for result_fname in iter(result_file_list):
if fnmatch.fnmatch(result_fname, 'rankres_str*.csv'):
print(result_fname)
with open(join(hs.dirs.result_dir, result_fname), 'r') as file:
metaline = file.readline()
toprint = metaline
# skip 4 metalines
[file.readline() for _ in xrange(4)]
top5line = file.readline()
top1line = file.readline()
toprint += top5line + top1line
line = read_until(file, '# NumData')
num_data = int(line.replace('# NumData', ''))
file.readline() # header
res_data_lines = [file.readline() for _ in xrange(num_data)]
res_data_str = np.array([line.split(',') for line in res_data_lines])
tt_scores = np.array(res_data_str[:, 5], dtype=np.float)
bt_scores = np.array(res_data_str[:, 6], dtype=np.float)
tf_scores = np.array(res_data_str[:, 7], dtype=np.float)
tt_score_sum = sum([score for score in tt_scores if score > 0])
bt_score_sum = sum([score for score in bt_scores if score > 0])
tf_score_sum = sum([score for score in tf_scores if score > 0])
toprint += ('tt_scores = %r; ' % tt_score_sum)
toprint += ('bt_scores = %r; ' % bt_score_sum)
toprint += ('tf_scores = %r; ' % tf_score_sum)
if topnum == 5:
sorted_rankres.append(top5line + metaline)
else:
sorted_rankres.append(top1line + metaline)
print(toprint + '\n')
print('\n(>^_^)>\n')
sorted_mapscore = []
for result_fname in iter(result_file_list):
if fnmatch.fnmatch(result_fname, 'oxsty_map_csv*.csv'):
print(result_fname)
with open(join(hs.dirs.result_dir, result_fname), 'r') as file:
metaline = file.readline()
scoreline = file.readline()
toprint = metaline + scoreline
sorted_mapscore.append(scoreline + metaline)
print(toprint)
print('\n'.join(sorted(sorted_rankres)))
print('\n'.join(sorted(sorted_mapscore)))
print('\n^(^_^)^\n')
def _get_orgres2_distances(allres, orgres_list=None):
if orgres_list is None:
#orgres_list = ['true', 'false', 'top_true', 'bot_true', 'top_false']
orgres_list = ['true', 'false']
#print(allres)
dist_fn = lambda orgres: get_orgres_match_distances(allres, orgres)
orgres2_distance = {}
for orgres in orgres_list:
try:
orgres2_distance[orgres] = dist_fn(orgres)
except Exception as ex:
print(ex)
print('failed dist orgres=%r' % orgres)
return orgres2_distance
@profile
def get_orgres_match_distances(allres, orgtype_='false'):
import algos
qcxs = allres[orgtype_].qcxs
cxs = allres[orgtype_].cxs
match_list = zip(qcxs, cxs)
printDBG('[rr2] getting orgtype_=%r distances between sifts' % orgtype_)
adesc1, adesc2 = get_matching_descriptors(allres, match_list)
printDBG('[rr2] * adesc1.shape = %r' % (adesc1.shape,))
printDBG('[rr2] * adesc2.shape = %r' % (adesc2.shape,))
#dist_list = ['L1', 'L2', 'hist_isect', 'emd']
#dist_list = ['L1', 'L2', 'hist_isect']
dist_list = ['L2', 'hist_isect']
hist1 = np.array(adesc1, dtype=np.float64)
hist2 = np.array(adesc2, dtype=np.float64)
distances = algos.compute_distances(hist1, hist2, dist_list)
return distances
def get_matching_descriptors(allres, match_list):
hs = allres.hs
qcx2_res = allres.qcx2_res
# FIXME: More intelligent feature loading
if len(hs.feats.cx2_desc) == 0:
hs.refresh_features()
cx2_desc = hs.feats.cx2_desc
desc1_list = []
desc2_list = []
desc1_append = desc1_list.append
desc2_append = desc2_list.append
for qcx, cx in match_list:
fx2_desc1 = cx2_desc[qcx]
fx2_desc2 = cx2_desc[cx]
res = qcx2_res[qcx]
fm = res.cx2_fm[cx]
#fs = res.cx2_fs[cx]
if len(fm) == 0:
continue
fx1_list = fm.T[0]
fx2_list = fm.T[1]
desc1 = fx2_desc1[fx1_list]
desc2 = fx2_desc2[fx2_list]
desc1_append(desc1)
desc2_append(desc2)
aggdesc1 = np.vstack(desc1_list)
aggdesc2 = np.vstack(desc2_list)
return aggdesc1, aggdesc2
def load_qcx2_res(hs, qcx_list, nocache=False):
'Prefrosm / loads all queries'
import match_chips3 as mc3
qreq = mc3.quickly_ensure_qreq(hs, qcxs=qcx_list)
# Build query big cache uid
query_uid = qreq.get_uid()
hs_uid = hs.get_db_name()
qcxs_uid = util.hashstr_arr(qcx_list, lbl='_qcxs')
qres_uid = hs_uid + query_uid + qcxs_uid
cache_dir = join(hs.dirs.cache_dir, 'query_results_bigcache')
print('[rr2] load_qcx2_res(): %r' % qres_uid)
io_kwargs = dict(dpath=cache_dir, fname='query_results', uid=qres_uid, ext='.cPkl')
# Return cache if available
if not params.args.nocache_query and (not nocache):
qcx2_res = io.smart_load(**io_kwargs)
if qcx2_res is not None:
print('[rr2] * cache hit')
return qcx2_res
print('[rr2] * cache miss')
else:
print('[rr2] * cache off')
# Individually load / compute queries
if isinstance(qcx_list, list):
qcx_set = set(qcx_list)
else:
qcx_set = set(qcx_list.tolist())
qcx_max = max(qcx_list) + 1
qcx2_res = [hs.query(qcx) if qcx in qcx_set else None for qcx in xrange(qcx_max)]
# Save to the cache
print('[rr2] Saving query_results to bigcache: %r' % qres_uid)
util.ensuredir(cache_dir)
io.smart_save(qcx2_res, **io_kwargs)
return qcx2_res
def get_allres(hs, qcx_list):
'Performs / Loads all queries and build allres structure'
print('[rr2] get_allres()')
#valid_cxs = hs.get_valid_cxs()
qcx2_res = load_qcx2_res(hs, qcx_list)
allres = init_allres(hs, qcx2_res, qcx_list=qcx_list)
return allres
|
apache-2.0
|
nmartensen/pandas
|
asv_bench/benchmarks/io_sql.py
|
7
|
4120
|
import sqlalchemy
from .pandas_vb_common import *
import sqlite3
from sqlalchemy import create_engine
#-------------------------------------------------------------------------------
# to_sql
class WriteSQL(object):
goal_time = 0.2
def setup(self):
self.engine = create_engine('sqlite:///:memory:')
self.con = sqlite3.connect(':memory:')
self.index = tm.makeStringIndex(10000)
self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index)
def time_fallback(self):
self.df.to_sql('test1', self.con, if_exists='replace')
def time_sqlalchemy(self):
self.df.to_sql('test1', self.engine, if_exists='replace')
#-------------------------------------------------------------------------------
# read_sql
class ReadSQL(object):
goal_time = 0.2
def setup(self):
self.engine = create_engine('sqlite:///:memory:')
self.con = sqlite3.connect(':memory:')
self.index = tm.makeStringIndex(10000)
self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index)
self.df.to_sql('test2', self.engine, if_exists='replace')
self.df.to_sql('test2', self.con, if_exists='replace')
def time_read_query_fallback(self):
read_sql_query('SELECT * FROM test2', self.con)
def time_read_query_sqlalchemy(self):
read_sql_query('SELECT * FROM test2', self.engine)
def time_read_table_sqlalchemy(self):
read_sql_table('test2', self.engine)
#-------------------------------------------------------------------------------
# type specific write
class WriteSQLTypes(object):
goal_time = 0.2
def setup(self):
self.engine = create_engine('sqlite:///:memory:')
self.con = sqlite3.connect(':memory:')
self.df = DataFrame({'float': randn(10000), 'string': (['foo'] * 10000), 'bool': ([True] * 10000), 'datetime': date_range('2000-01-01', periods=10000, freq='s'), })
self.df.loc[1000:3000, 'float'] = np.nan
def time_string_fallback(self):
self.df[['string']].to_sql('test_string', self.con, if_exists='replace')
def time_string_sqlalchemy(self):
self.df[['string']].to_sql('test_string', self.engine, if_exists='replace')
def time_float_fallback(self):
self.df[['float']].to_sql('test_float', self.con, if_exists='replace')
def time_float_sqlalchemy(self):
self.df[['float']].to_sql('test_float', self.engine, if_exists='replace')
def time_datetime_sqlalchemy(self):
self.df[['datetime']].to_sql('test_datetime', self.engine, if_exists='replace')
#-------------------------------------------------------------------------------
# type specific read
class ReadSQLTypes(object):
goal_time = 0.2
def setup(self):
self.engine = create_engine('sqlite:///:memory:')
self.con = sqlite3.connect(':memory:')
self.df = DataFrame({'float': randn(10000), 'datetime': date_range('2000-01-01', periods=10000, freq='s'), })
self.df['datetime_string'] = self.df['datetime'].map(str)
self.df.to_sql('test_type', self.engine, if_exists='replace')
self.df[['float', 'datetime_string']].to_sql('test_type', self.con, if_exists='replace')
def time_datetime_read_and_parse_sqlalchemy(self):
read_sql_table('test_type', self.engine, columns=['datetime_string'], parse_dates=['datetime_string'])
def time_datetime_read_as_native_sqlalchemy(self):
read_sql_table('test_type', self.engine, columns=['datetime'])
def time_float_read_query_fallback(self):
read_sql_query('SELECT float FROM test_type', self.con)
def time_float_read_query_sqlalchemy(self):
read_sql_query('SELECT float FROM test_type', self.engine)
def time_float_read_table_sqlalchemy(self):
read_sql_table('test_type', self.engine, columns=['float'])
|
bsd-3-clause
|
LohithBlaze/scikit-learn
|
examples/model_selection/grid_search_digits.py
|
227
|
2665
|
"""
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
|
bsd-3-clause
|
hugobowne/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
24
|
39507
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import compute_class_weight
from sklearn.utils.fixes import sp_version
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.model_selection import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42)]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg', 'sag']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg, lbfgs"
" and sag solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# only 'liblinear' solver
msg = "Solver liblinear does not support a multinomial backend."
lr = LR(solver='liblinear', multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg', 'sag']:
clf = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000)
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# The cv indices from stratified kfold (where stratification is done based
# on the fine-grained iris classes, i.e, before the classes 0 and 1 are
# conflated) is used for both clf and clf1
n_cv = 2
cv = StratifiedKFold(n_cv)
precomputed_folds = list(cv.split(train, target))
# Train clf on the original dataset where classes 0 and 1 are separated
clf = LogisticRegressionCV(cv=precomputed_folds)
clf.fit(train, target)
# Conflate classes 0 and 1 and train clf1 on this modified dataset
clf1 = LogisticRegressionCV(cv=precomputed_folds)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
# Ensure that what OvR learns for class2 is same regardless of whether
# classes 0 and 1 are separated or not
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg', 'sag']:
max_iter = 100 if solver == 'sag' else 15
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=max_iter,
random_state=42, tol=1e-2, cv=2)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-6
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
msg = ("In LogisticRegressionCV the liblinear solver cannot handle "
"multiclass with class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set class_weight='balanced'")
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raise_message(ValueError, msg, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
clf_sag = LogisticRegressionCV(solver='sag', fit_intercept=False,
class_weight='balanced', max_iter=2000)
clf_sag.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_sag.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
sample_weight = y + 1
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
for solver in ['lbfgs', 'liblinear']:
clf_sw_none = LR(solver=solver, fit_intercept=False)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver=solver, fit_intercept=False)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(
clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False)
clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False)
clf_sw_n.fit(X, y, sample_weight=sample_weight)
clf_sw_sag = LR(solver='sag', fit_intercept=False, tol=1e-10)
# ignore convergence warning due to small dataset
with ignore_warnings():
clf_sw_sag.fit(X, y, sample_weight=sample_weight)
clf_sw_liblinear = LR(solver='liblinear', fit_intercept=False)
clf_sw_liblinear.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_liblinear.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
for solver in ['lbfgs', 'liblinear']:
clf_cw_12 = LR(solver=solver, fit_intercept=False,
class_weight={0: 1, 1: 2})
clf_cw_12.fit(X, y)
clf_sw_12 = LR(solver=solver, fit_intercept=False)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
# Test the above for l1 penalty and l2 penalty with dual=True.
# since the patched liblinear code is different.
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l1")
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l1")
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l2", dual=True)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l2", dual=True)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
def _compute_class_weight_dictionary(y):
# helper for returning a dictionary instead of an array
classes = np.unique(y)
class_weight = compute_class_weight("balanced", classes, y)
class_weight_dict = dict(zip(classes, class_weight))
return class_weight_dict
def test_logistic_regression_class_weights():
# Multinomial case: remove 90% of class 0
X = iris.data[45:, :]
y = iris.target[45:]
solvers = ("lbfgs", "newton-cg")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4)
# Binary case: remove 90% of class 0 and 100% of class 2
X = iris.data[45:100, :]
y = iris.target[45:100]
solvers = ("lbfgs", "newton-cg", "liblinear")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
def test_multinomial_logistic_regression_with_classweight_auto():
X, y = iris.data, iris.target
model = LogisticRegression(multi_class='multinomial',
class_weight='auto', solver='lbfgs')
# 'auto' is deprecated and will be removed in 0.19
assert_warns_message(DeprecationWarning,
"class_weight='auto' heuristic is deprecated",
model.fit, X, y)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
# 'lbfgs' is used as a referenced
solver = 'lbfgs'
ref_i = LogisticRegression(solver=solver, multi_class='multinomial')
ref_w = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
ref_i.fit(X, y)
ref_w.fit(X, y)
assert_array_equal(ref_i.coef_.shape, (n_classes, n_features))
assert_array_equal(ref_w.coef_.shape, (n_classes, n_features))
for solver in ['sag', 'newton-cg']:
clf_i = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=1000, tol=1e-6)
clf_w = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=1000, tol=1e-6,
fit_intercept=False)
clf_i.fit(X, y)
clf_w.fit(X, y)
assert_array_equal(clf_i.coef_.shape, (n_classes, n_features))
assert_array_equal(clf_w.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and the other solvers
assert_almost_equal(ref_i.coef_, clf_i.coef_, decimal=3)
assert_almost_equal(ref_w.coef_, clf_w.coef_, decimal=3)
assert_almost_equal(ref_i.intercept_, clf_i.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg', 'sag']:
clf_path = LogisticRegressionCV(solver=solver, max_iter=2000, tol=1e-6,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, ref_i.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, ref_i.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for max_iter in range(1, 5):
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'liblinear' and multi_class == 'multinomial':
continue
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
multi_class=multi_class,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
@ignore_warnings
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
with ignore_warnings():
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
|
bsd-3-clause
|
toobaz/pandas
|
pandas/tests/indexes/interval/test_interval.py
|
1
|
41008
|
from itertools import permutations
import re
import numpy as np
import pytest
import pandas as pd
from pandas import (
Index,
Interval,
IntervalIndex,
Timedelta,
Timestamp,
date_range,
interval_range,
isna,
notna,
timedelta_range,
)
import pandas.core.common as com
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
@pytest.fixture(scope="class", params=[None, "foo"])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples([(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed="right"):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed="right"):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan),
closed=closed,
)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10,)
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10,)
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [
Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)
]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
@pytest.mark.parametrize(
"breaks",
[
[1, 1, 2, 5, 15, 53, 217, 1014, 5335, 31240, 201608],
[-np.inf, -100, -10, 0.5, 1, 1.5, 3.8, 101, 202, np.inf],
pd.to_datetime(["20170101", "20170202", "20170303", "20170404"]),
pd.to_timedelta(["1ns", "2ms", "3s", "4M", "5H", "6D"]),
],
)
def test_length(self, closed, breaks):
# GH 18789
index = IntervalIndex.from_breaks(breaks, closed=closed)
result = index.length
expected = Index(iv.length for iv in index)
tm.assert_index_equal(result, expected)
# with NA
index = index.insert(1, np.nan)
result = index.length
expected = Index(iv.length if notna(iv) else iv for iv in index)
tm.assert_index_equal(result, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert index.hasnans is False
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans is True
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(
index.left.values, result.left.values, check_same="same"
)
tm.assert_numpy_array_equal(
index.right.values, result.right.values, check_same="same"
)
# by-definition make a copy
result = IntervalIndex(index._ndarray_values, copy=False)
tm.assert_numpy_array_equal(
index.left.values, result.left.values, check_same="copy"
)
tm.assert_numpy_array_equal(
index.right.values, result.right.values, check_same="copy"
)
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range("20130101", periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name="foo"
)
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name="bar"
)
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {"left", "right", "both", "neither"} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed
)
assert not expected.equals(expected_other_closed)
@pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
interval_range(0, periods=10, closed="neither"),
interval_range(1.7, periods=8, freq=2.5, closed="both"),
interval_range(Timestamp("20170101"), periods=12, closed="left"),
interval_range(Timedelta("1 day"), periods=6, closed="right"),
],
)
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = "can only insert Interval objects and NA into an IntervalIndex"
with pytest.raises(ValueError, match=msg):
data.insert(1, "foo")
# invalid closed
msg = "inserted item must be closed on the same side as the index"
for closed in {"left", "right", "both", "neither"} - {item.closed}:
with pytest.raises(ValueError, match=msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_is_unique_interval(self, closed):
"""
Interval specific tests for is_unique in addition to base class tests
"""
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique is True
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique is True
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique is True
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# decreasing non-overlapping
idx = IntervalIndex.from_tuples([(4, 5), (2, 3), (1, 2)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
# unordered non-overlapping
idx = IntervalIndex.from_tuples([(0, 1), (4, 5), (2, 3)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# increasing overlapping
idx = IntervalIndex.from_tuples([(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# decreasing overlapping
idx = IntervalIndex.from_tuples([(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
# unordered overlapping
idx = IntervalIndex.from_tuples([(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples([(2, 3), (1, 3), (1, 2)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is False
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
@pytest.mark.skip(reason="not a valid repr as we use interval notation")
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed="right")
expected = (
"IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')"
)
assert repr(i) == expected
i = IntervalIndex.from_tuples(
(Timestamp("20130101"), Timestamp("20130102")),
(Timestamp("20130102"), Timestamp("20130103")),
closed="right",
)
expected = (
"IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')"
)
assert repr(i) == expected
@pytest.mark.skip(reason="not a valid repr as we use interval notation")
def test_repr_max_seq_item_setting(self):
super().test_repr_max_seq_item_setting()
@pytest.mark.skip(reason="not a valid repr as we use interval notation")
def test_repr_roundtrip(self):
super().test_repr_roundtrip()
def test_frame_repr(self):
# https://github.com/pandas-dev/pandas/pull/24134/files
df = pd.DataFrame(
{"A": [1, 2, 3, 4]}, index=pd.IntervalIndex.from_breaks([0, 1, 2, 3, 4])
)
result = repr(df)
expected = " A\n(0, 1] 1\n(1, 2] 2\n(2, 3] 3\n(3, 4] 4"
assert result == expected
@pytest.mark.parametrize(
"constructor,expected",
[
(
pd.Series,
(
"(0.0, 1.0] a\n"
"NaN b\n"
"(2.0, 3.0] c\n"
"dtype: object"
),
),
(
pd.DataFrame,
(" 0\n(0.0, 1.0] a\nNaN b\n(2.0, 3.0] c"),
),
],
)
def test_repr_missing(self, constructor, expected):
# GH 25984
index = IntervalIndex.from_tuples([(0, 1), np.nan, (2, 3)])
obj = constructor(list("abc"), index=index)
result = repr(obj)
assert result == expected
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan), closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.0,), (1.0,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0.0, 1), (1.0, 2.0), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays(
(1.0, np.nan), (2.0, np.nan), closed=closed
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("scalar", [-1, 0, 0.5, 3, 4.5, 5, 6])
def test_get_loc_length_one_scalar(self, scalar, closed):
# GH 20921
index = IntervalIndex.from_tuples([(0, 5)], closed=closed)
if scalar in index[0]:
result = index.get_loc(scalar)
assert result == 0
else:
with pytest.raises(KeyError, match=str(scalar)):
index.get_loc(scalar)
@pytest.mark.parametrize("other_closed", ["left", "right", "both", "neither"])
@pytest.mark.parametrize("left, right", [(0, 5), (-1, 4), (-1, 6), (6, 7)])
def test_get_loc_length_one_interval(self, left, right, closed, other_closed):
# GH 20921
index = IntervalIndex.from_tuples([(0, 5)], closed=closed)
interval = Interval(left, right, closed=other_closed)
if interval == index[0]:
result = index.get_loc(interval)
assert result == 0
else:
with pytest.raises(
KeyError,
match=re.escape(
"Interval({left}, {right}, closed='{other_closed}')".format(
left=left, right=right, other_closed=other_closed
)
),
):
index.get_loc(interval)
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize(
"breaks",
[
date_range("20180101", periods=4),
date_range("20180101", periods=4, tz="US/Eastern"),
timedelta_range("0 days", periods=4),
],
ids=lambda x: str(x.dtype),
)
def test_get_loc_datetimelike_nonoverlapping(self, breaks):
# GH 20636
# nonoverlapping = IntervalIndex method and no i8 conversion
index = IntervalIndex.from_breaks(breaks)
value = index[0].mid
result = index.get_loc(value)
expected = 0
assert result == expected
interval = Interval(index[0].left, index[0].right)
result = index.get_loc(interval)
expected = 0
assert result == expected
@pytest.mark.parametrize(
"arrays",
[
(date_range("20180101", periods=4), date_range("20180103", periods=4)),
(
date_range("20180101", periods=4, tz="US/Eastern"),
date_range("20180103", periods=4, tz="US/Eastern"),
),
(
timedelta_range("0 days", periods=4),
timedelta_range("2 days", periods=4),
),
],
ids=lambda x: str(x[0].dtype),
)
def test_get_loc_datetimelike_overlapping(self, arrays):
# GH 20636
index = IntervalIndex.from_arrays(*arrays)
value = index[0].mid + Timedelta("12 hours")
result = index.get_loc(value)
expected = slice(0, 2, None)
assert result == expected
interval = Interval(index[0].left, index[0].right)
result = index.get_loc(interval)
expected = 0
assert result == expected
@pytest.mark.parametrize(
"values",
[
date_range("2018-01-04", periods=4, freq="-1D"),
date_range("2018-01-04", periods=4, freq="-1D", tz="US/Eastern"),
timedelta_range("3 days", periods=4, freq="-1D"),
np.arange(3.0, -1.0, -1.0),
np.arange(3, -1, -1),
],
ids=lambda x: str(x.dtype),
)
def test_get_loc_decreasing(self, values):
# GH 25860
index = IntervalIndex.from_arrays(values[1:], values[:-1])
result = index.get_loc(index[0])
expected = 0
assert result == expected
@pytest.mark.parametrize("item", [[3], np.arange(0.5, 5, 0.5)])
def test_get_indexer_length_one(self, item, closed):
# GH 17284
index = IntervalIndex.from_tuples([(0, 5)], closed=closed)
result = index.get_indexer(item)
expected = np.array([0] * len(item), dtype="intp")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("size", [1, 5])
def test_get_indexer_length_one_interval(self, size, closed):
# GH 17284
index = IntervalIndex.from_tuples([(0, 5)], closed=closed)
result = index.get_indexer([Interval(0, 5, closed)] * size)
expected = np.array([0] * size, dtype="intp")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"breaks",
[
date_range("20180101", periods=4),
date_range("20180101", periods=4, tz="US/Eastern"),
timedelta_range("0 days", periods=4),
],
ids=lambda x: str(x.dtype),
)
def test_maybe_convert_i8(self, breaks):
# GH 20636
index = IntervalIndex.from_breaks(breaks)
# intervalindex
result = index._maybe_convert_i8(index)
expected = IntervalIndex.from_breaks(breaks.asi8)
tm.assert_index_equal(result, expected)
# interval
interval = Interval(breaks[0], breaks[1])
result = index._maybe_convert_i8(interval)
expected = Interval(breaks[0].value, breaks[1].value)
assert result == expected
# datetimelike index
result = index._maybe_convert_i8(breaks)
expected = Index(breaks.asi8)
tm.assert_index_equal(result, expected)
# datetimelike scalar
result = index._maybe_convert_i8(breaks[0])
expected = breaks[0].value
assert result == expected
# list-like of datetimelike scalars
result = index._maybe_convert_i8(list(breaks))
expected = Index(breaks.asi8)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"breaks",
[date_range("2018-01-01", periods=5), timedelta_range("0 days", periods=5)],
)
def test_maybe_convert_i8_nat(self, breaks):
# GH 20636
index = IntervalIndex.from_breaks(breaks)
to_convert = breaks._constructor([pd.NaT] * 3)
expected = pd.Float64Index([np.nan] * 3)
result = index._maybe_convert_i8(to_convert)
tm.assert_index_equal(result, expected)
to_convert = to_convert.insert(0, breaks[0])
expected = expected.insert(0, float(breaks[0].value))
result = index._maybe_convert_i8(to_convert)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"breaks",
[np.arange(5, dtype="int64"), np.arange(5, dtype="float64")],
ids=lambda x: str(x.dtype),
)
@pytest.mark.parametrize(
"make_key",
[
IntervalIndex.from_breaks,
lambda breaks: Interval(breaks[0], breaks[1]),
lambda breaks: breaks,
lambda breaks: breaks[0],
list,
],
ids=["IntervalIndex", "Interval", "Index", "scalar", "list"],
)
def test_maybe_convert_i8_numeric(self, breaks, make_key):
# GH 20636
index = IntervalIndex.from_breaks(breaks)
key = make_key(breaks)
# no conversion occurs for numeric
result = index._maybe_convert_i8(key)
assert result is key
@pytest.mark.parametrize(
"breaks1, breaks2",
permutations(
[
date_range("20180101", periods=4),
date_range("20180101", periods=4, tz="US/Eastern"),
timedelta_range("0 days", periods=4),
],
2,
),
ids=lambda x: str(x.dtype),
)
@pytest.mark.parametrize(
"make_key",
[
IntervalIndex.from_breaks,
lambda breaks: Interval(breaks[0], breaks[1]),
lambda breaks: breaks,
lambda breaks: breaks[0],
list,
],
ids=["IntervalIndex", "Interval", "Index", "scalar", "list"],
)
def test_maybe_convert_i8_errors(self, breaks1, breaks2, make_key):
# GH 20636
index = IntervalIndex.from_breaks(breaks1)
key = make_key(breaks2)
msg = (
"Cannot index an IntervalIndex of subtype {dtype1} with "
"values of dtype {dtype2}"
)
msg = re.escape(msg.format(dtype1=breaks1.dtype, dtype2=breaks2.dtype))
with pytest.raises(ValueError, match=msg):
index._maybe_convert_i8(key)
def test_contains_method(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
expected = np.array([False, False], dtype="bool")
actual = i.contains(0)
tm.assert_numpy_array_equal(actual, expected)
actual = i.contains(3)
tm.assert_numpy_array_equal(actual, expected)
expected = np.array([True, False], dtype="bool")
actual = i.contains(0.5)
tm.assert_numpy_array_equal(actual, expected)
actual = i.contains(1)
tm.assert_numpy_array_equal(actual, expected)
# __contains__ not implemented for "interval in interval", follow
# that for the contains method for now
with pytest.raises(
NotImplementedError, match="contains not implemented for two"
):
i.contains(Interval(0, 1))
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples([(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays([0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype="intp")
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_isin(self, closed):
index = self.create_index(closed=closed)
expected = np.array([True] + [False] * (len(index) - 1))
result = index.isin(index[:1])
tm.assert_numpy_array_equal(result, expected)
result = index.isin([index[0]])
tm.assert_numpy_array_equal(result, expected)
other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
expected = np.array([True] * (len(index) - 1) + [False])
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
for other_closed in {"right", "left", "both", "neither"}:
other = self.create_index(closed=other_closed)
expected = np.repeat(closed == other_closed, len(index))
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], "left")
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with pytest.raises(TypeError, match="unorderable types"):
self.index > 0
with pytest.raises(TypeError, match="unorderable types"):
self.index <= 0
msg = r"unorderable types: Interval\(\) > int\(\)"
with pytest.raises(TypeError, match=msg):
self.index > np.arange(2)
msg = "Lengths must match to compare"
with pytest.raises(ValueError, match=msg):
self.index > np.arange(3)
def test_missing_values(self, closed):
idx = Index(
[np.nan, Interval(0, 1, closed=closed), Interval(1, 2, closed=closed)]
)
idx2 = IntervalIndex.from_arrays([np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
msg = (
"missing values must be missing in the same location both left"
" and right sides"
)
with pytest.raises(ValueError, match=msg):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed
)
tm.assert_numpy_array_equal(isna(idx), np.array([True, False, False]))
def test_sort_values(self, closed):
index = self.create_index(closed=closed)
result = index.sort_values()
tm.assert_index_equal(result, index)
result = index.sort_values(ascending=False)
tm.assert_index_equal(result, index[::-1])
# with nan
index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
result = index.sort_values()
expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
tm.assert_index_equal(result, expected)
result = index.sort_values(ascending=False)
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_datetime(self, tz):
start = Timestamp("2000-01-01", tz=tz)
dates = date_range(start=start, periods=10)
index = IntervalIndex.from_breaks(dates)
# test mid
start = Timestamp("2000-01-01T12:00", tz=tz)
expected = date_range(start=start, periods=9)
tm.assert_index_equal(index.mid, expected)
# __contains__ doesn't check individual points
assert Timestamp("2000-01-01", tz=tz) not in index
assert Timestamp("2000-01-01T12", tz=tz) not in index
assert Timestamp("2000-01-02", tz=tz) not in index
iv_true = Interval(
Timestamp("2000-01-02", tz=tz), Timestamp("2000-01-03", tz=tz)
)
iv_false = Interval(
Timestamp("1999-12-31", tz=tz), Timestamp("2000-01-01", tz=tz)
)
assert iv_true in index
assert iv_false not in index
# .contains does check individual points
assert not index.contains(Timestamp("2000-01-01", tz=tz)).any()
assert index.contains(Timestamp("2000-01-01T12", tz=tz)).any()
assert index.contains(Timestamp("2000-01-02", tz=tz)).any()
# test get_indexer
start = Timestamp("1999-12-31T12:00", tz=tz)
target = date_range(start=start, periods=7, freq="12H")
actual = index.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, 2], dtype="intp")
tm.assert_numpy_array_equal(actual, expected)
start = Timestamp("2000-01-08T18:00", tz=tz)
target = date_range(start=start, periods=7, freq="6H")
actual = index.get_indexer(target)
expected = np.array([7, 7, 8, 8, 8, 8, -1], dtype="intp")
tm.assert_numpy_array_equal(actual, expected)
def test_append(self, closed):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed)
index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed)
result = index1.append(index2)
expected = IntervalIndex.from_arrays([0, 1, 1, 2], [1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays(
[0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed
)
tm.assert_index_equal(result, expected)
msg = (
"can only append two IntervalIndex objects that are closed "
"on the same side"
)
for other_closed in {"left", "right", "both", "neither"} - {closed}:
index_other_closed = IntervalIndex.from_arrays(
[0, 1], [1, 2], closed=other_closed
)
with pytest.raises(ValueError, match=msg):
index1.append(index_other_closed)
def test_is_non_overlapping_monotonic(self, closed):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False in all cases (non-monotonic)
tpls = [(0, 1), (2, 3), (6, 7), (4, 5)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False for closed='both', otherwise True (GH16560)
if closed == "both":
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is False
else:
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
@pytest.mark.parametrize(
"start, shift, na_value",
[
(0, 1, np.nan),
(Timestamp("2018-01-01"), Timedelta("1 day"), pd.NaT),
(Timedelta("0 days"), Timedelta("1 day"), pd.NaT),
],
)
def test_is_overlapping(self, start, shift, na_value, closed):
# GH 23309
# see test_interval_tree.py for extensive tests; interface tests here
# non-overlapping
tuples = [(start + n * shift, start + (n + 1) * shift) for n in (0, 2, 4)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
assert index.is_overlapping is False
# non-overlapping with NA
tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
assert index.is_overlapping is False
# overlapping
tuples = [(start + n * shift, start + (n + 2) * shift) for n in range(3)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
assert index.is_overlapping is True
# overlapping with NA
tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
assert index.is_overlapping is True
# common endpoints
tuples = [(start + n * shift, start + (n + 1) * shift) for n in range(3)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
result = index.is_overlapping
expected = closed == "both"
assert result is expected
# common endpoints with NA
tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
result = index.is_overlapping
assert result is expected
@pytest.mark.parametrize(
"tuples",
[
list(zip(range(10), range(1, 11))),
list(
zip(
date_range("20170101", periods=10),
date_range("20170101", periods=10),
)
),
list(
zip(
timedelta_range("0 days", periods=10),
timedelta_range("1 day", periods=10),
)
),
],
)
def test_to_tuples(self, tuples):
# GH 18756
idx = IntervalIndex.from_tuples(tuples)
result = idx.to_tuples()
expected = Index(com.asarray_tuplesafe(tuples))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"tuples",
[
list(zip(range(10), range(1, 11))) + [np.nan],
list(
zip(
date_range("20170101", periods=10),
date_range("20170101", periods=10),
)
)
+ [np.nan],
list(
zip(
timedelta_range("0 days", periods=10),
timedelta_range("1 day", periods=10),
)
)
+ [np.nan],
],
)
@pytest.mark.parametrize("na_tuple", [True, False])
def test_to_tuples_na(self, tuples, na_tuple):
# GH 18756
idx = IntervalIndex.from_tuples(tuples)
result = idx.to_tuples(na_tuple=na_tuple)
# check the non-NA portion
expected_notna = Index(com.asarray_tuplesafe(tuples[:-1]))
result_notna = result[:-1]
tm.assert_index_equal(result_notna, expected_notna)
# check the NA portion
result_na = result[-1]
if na_tuple:
assert isinstance(result_na, tuple)
assert len(result_na) == 2
assert all(isna(x) for x in result_na)
else:
assert isna(result_na)
def test_nbytes(self):
# GH 19209
left = np.arange(0, 4, dtype="i8")
right = np.arange(1, 5, dtype="i8")
result = IntervalIndex.from_arrays(left, right).nbytes
expected = 64 # 4 * 8 * 2
assert result == expected
def test_itemsize(self):
# GH 19209
left = np.arange(0, 4, dtype="i8")
right = np.arange(1, 5, dtype="i8")
expected = 16 # 8 * 2
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = IntervalIndex.from_arrays(left, right).itemsize
assert result == expected
@pytest.mark.parametrize("new_closed", ["left", "right", "both", "neither"])
def test_set_closed(self, name, closed, new_closed):
# GH 21670
index = interval_range(0, 5, closed=closed, name=name)
result = index.set_closed(new_closed)
expected = interval_range(0, 5, closed=new_closed, name=name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("bad_closed", ["foo", 10, "LEFT", True, False])
def test_set_closed_errors(self, bad_closed):
# GH 21670
index = interval_range(0, 5)
msg = "invalid option for 'closed': {closed}".format(closed=bad_closed)
with pytest.raises(ValueError, match=msg):
index.set_closed(bad_closed)
def test_is_all_dates(self):
# GH 23576
year_2017 = pd.Interval(
pd.Timestamp("2017-01-01 00:00:00"), pd.Timestamp("2018-01-01 00:00:00")
)
year_2017_index = pd.IntervalIndex([year_2017])
assert not year_2017_index.is_all_dates
|
bsd-3-clause
|
jmbeuken/abinit
|
tests/Scripts/abimem.py
|
1
|
7562
|
#!/usr/bin/env python
"""
This script analyzes the `abimem_rank.mocc` files produced by Abinit when
the code is executed in memory-profiling mode.
"""
from __future__ import print_function, division, unicode_literals
__version__ = "0.1.0"
__author__ = "Matteo Giantomassi"
import sys
import os
import re
try:
import argparse
except ImportError:
print("abimem.py requires argparse module and python >= 2.7")
raise
import logging
logger = logging.getLogger(__name__)
# We don't install with setup.py hence we have to add the directory [...]/abinit/tests to $PYTHONPATH
# TODO: Use Absolute imports and rename tests --> abitests to
# avoid possible conflicts with the packages in PYTHONPATH
# monty installs the subpackage paths and this breaks the import below
pack_dir, x = os.path.split(os.path.abspath(__file__))
pack_dir, x = os.path.split(pack_dir)
sys.path.insert(0,pack_dir)
pack_dir, x = os.path.split(pack_dir)
sys.path.insert(0,pack_dir)
try:
from tests.pymods.memprof import AbimemParser
except ImportError:
print("Cannot find tests.pymods.memprof module in python path")
print("Very likely, you have copied the scripts from ~abinit/test/Scripts to another directory")
print("In this case, you have to add the abinit directory to the python path by executing:\n")
print(" export PYTHONPATH=~/abinit_directory\n")
print("before running the script.")
print("Note that there's no need to change PYTHONPATH if you invoke the script as ~abinit/tests/Scripts/abimem.py")
raise
def leaks(options, parsers):
"""Find possible memory leaks"""
retcode = 0
for parser in parsers:
parser.find_memleaks()
return retcode
def small(options, parsers):
"""Find small allocations."""
retcode = 0
for parser in parsers:
smalls = parser.find_small_allocs() #nbytes=options.nbytes)
return retcode
def intens(options, parsers):
"""Find routines with intensive allocations."""
retcode = 0
for parser in parsers:
intensive = parser.find_intensive() #threshold=options.threshold)
return retcode
def peaks(options, parsers):
"""Find memory peaks."""
retcode = 0
for parser in parsers:
parser.find_peaks()
return retcode
def weird(options, parsers):
"""Find weird allocations. i.e. pointers <= 0"""
retcode = 0
for parser in parsers:
parser.find_weird_ptrs()
return retcode
def zerosized(options, parsers):
"""Find zero-sized allocations."""
retcode = 0
for parser in parsers:
parser.find_zerosized()
return retcode
def plot(options, parsers):
"""Plot data with matplotlib."""
for parser in parsers:
parser.plot_memory_usage()
return 0
def get_parsers(options):
if not hasattr(options, "paths"):
raise ValueError("paths argument is missing")
if os.path.isfile(options.paths[0]):
return [AbimemParser(path) for path in options.paths]
else:
# Walk directory tree and find abimem files.
top = options.paths[0]
if len(options.paths) != 1:
raise ValueError("Expecting one argument with dirname, got %s" % len(options.paths))
if not os.path.isdir(top):
raise ValueError("Expecting existenting directory, got %s" % top)
re_abimem = re.compile("^abimem_rank(\d+)\.mocc$")
paths = []
for dirpath, dirnames, filenames in os.walk(top):
for f in filenames:
if not re_abimem.match(f): continue
paths.append(os.path.join(dirpath, f))
options.paths = paths
print("Will analyze %s abimem file(s)" % len(paths))
if not paths: sys.exit(1)
return [AbimemParser(path) for path in paths]
def main():
def str_examples():
return """\
usage example:
abimem.py leaks [FILES] => Find possible memory leaks in FILE(s)
abimem.py small [FILES] => Find small memory allocations in FILE(s)
abimem.py intens [FILES] => Find periods of intense memory allocation in FILE(s)
abimem.py peaks [FILES] => Find peaks in memory allocation in FILE(s)
abimem.py plot [FILES] => Plot memory allocations in FILE(s) with matplotlib
FILES could be either a list of files or a single directory containing abimem_ran.mocc files.
TIP: To profile the python code add `prof` before the command e.g.
abimem.py prof leaks [FILES]
"""
return examples
def show_examples_and_exit(err_msg=None, error_code=1):
"""Display the usage of the script."""
sys.stderr.write(str_examples())
if err_msg: sys.stderr.write("Fatal Error\n" + err_msg + "\n")
sys.exit(error_code)
paths_selector_parser = argparse.ArgumentParser(add_help=False)
paths_selector_parser.add_argument('paths', nargs="+", help="List of files or directory containing abimem files.")
# Build the main parser.
parser = argparse.ArgumentParser(epilog=str_examples(), formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-V', '--version', action='version', version="%(prog)s version " + __version__)
parser.add_argument('--loglevel', default="ERROR", type=str,
help="set the loglevel. Possible values: CRITICAL, ERROR (default), WARNING, INFO, DEBUG")
# Create the parsers for the sub-commands
subparsers = parser.add_subparsers(dest='command', help='sub-command help', description="Valid subcommands")
# Build Subparsers for commands
p_leaks = subparsers.add_parser('leaks', parents=[paths_selector_parser], help=leaks.__doc__)
# Subparser for small
p_small = subparsers.add_parser('small', parents=[paths_selector_parser], help=small.__doc__)
# Subparser for intens
p_intensive = subparsers.add_parser('intens', parents=[paths_selector_parser], help=intens.__doc__)
# Subparser for peaks command.
p_peaks = subparsers.add_parser('peaks', parents=[paths_selector_parser], help=peaks.__doc__)
# Subparser for weird command.
p_weird = subparsers.add_parser('weird', parents=[paths_selector_parser], help=weird.__doc__)
# Subparser for zerosized command.
p_zerosized = subparsers.add_parser('zerosized', parents=[paths_selector_parser], help=zerosized.__doc__)
# Subparser for plot command.
p_plot = subparsers.add_parser('plot', parents=[paths_selector_parser], help=plot.__doc__)
# Parse command line.
try:
options = parser.parse_args()
except Exception as exc:
show_examples_and_exit(error_code=1)
# loglevel is bound to the string value obtained from the command line argument.
# Convert to upper case to allow the user to specify --loglevel=DEBUG or --loglevel=debug
numeric_level = getattr(logging, options.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % options.loglevel)
logging.basicConfig(level=numeric_level)
parsers = get_parsers(options)
# Dispatch
return globals()[options.command](options, parsers)
if __name__ == "__main__":
# Check whether we are in profiling mode
try:
do_prof = sys.argv[1] == "prof"
if do_prof: sys.argv.pop(1)
except:
do_prof = False
if not do_prof:
sys.exit(main())
else:
import pstats, cProfile
cProfile.runctx("main()", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("time").print_stats()
|
gpl-3.0
|
DerThorsten/boring_spaghetti
|
exp/patch_W_nice_res2.py
|
1
|
4464
|
import vigra
import opengm
import numpy
import matplotlib.pyplot as plt
img = vigra.readImage('/home/tbeier/datasets/BSR/BSDS500/data/images/train/56028.jpg')
img = vigra.readImage('/home/tbeier/datasets/BSR/BSDS500/data/images/train/118035.jpg')
img = img[::1, ::1,:]
grad = vigra.filters.gaussianGradientMagnitude(vigra.colors.transform_Lab2RGB(img), 1.5).squeeze()
grad -= grad.min()
grad /= grad.max()
grad2 = grad.copy()
grad2[numpy.where(grad2<0.3)] = 0
grad2 = numpy.exp(1.5*grad2)-1.0
show = True
if show:
imgplot = plt.imshow(grad2.swapaxes(0,1))
plt.colorbar()
plt.show()
expGrad = numpy.exp(-2.1*grad)
w = 2*expGrad -1.0
w-=w.min()
if show:
imgplot = plt.imshow(w.swapaxes(0,1))
plt.colorbar()
plt.show()
gm = opengm.adder.gridPatchAffinityGm(grad2.astype(numpy.float64), 10.0*w.astype(numpy.float64), 40, 5 ,20, 0.01)
print gm
verbose = True
useQpbo = False
useCgc = False
useWs = False
with opengm.Timer("with new method"):
fusionParam = opengm.InfParam(fusionSolver = 'cgc', planar=False)
arg = None
if useQpbo:
infParam = opengm.InfParam(
numStopIt=0,
numIt=200,
generator='qpboBased',
fusionParam = fusionParam
)
inf=opengm.inference.IntersectionBased(gm, parameter=infParam)
# inf.setStartingPoint(arg)
# start inference (in this case verbose infernce)
visitor=inf.verboseVisitor(printNth=1,multiline=False)
if verbose:
inf.infer(visitor)
else:
inf.infer()
inf.infer()
arg = inf.arg()
proposalParam = opengm.InfParam(
randomizer = opengm.weightRandomizer(noiseType='normalAdd',noiseParam=1.700000001, ignoreSeed=False),
stopWeight=0.0,
reduction=0.999,
setCutToZero=False
)
infParam = opengm.InfParam(
numStopIt=20,
numIt=100,
generator='randomizedHierarchicalClustering',
proposalParam=proposalParam,
fusionParam = fusionParam
)
inf=opengm.inference.IntersectionBased(gm, parameter=infParam)
if arg is not None:
inf.setStartingPoint(arg)
# start inference (in this case verbose infernce)
visitor=inf.verboseVisitor(printNth=1,multiline=False)
if verbose:
inf.infer(visitor)
else:
inf.infer()
arg = inf.arg()
if useWs:
print "ws"
proposalParam = opengm.InfParam(
randomizer = opengm.weightRandomizer(noiseType='normalAdd',noiseParam=1.100000001,ignoreSeed=False),
seedFraction = 0.005
)
infParam = opengm.InfParam(
numStopIt=20,
numIt=10,
generator='randomizedWatershed',
proposalParam=proposalParam,
fusionParam = fusionParam
)
inf=opengm.inference.IntersectionBased(gm, parameter=infParam)
if arg is not None:
inf.setStartingPoint(arg)
# start inference (in this case verbose infernce)
visitor=inf.verboseVisitor(printNth=1,multiline=False)
if verbose:
inf.infer(visitor)
else:
inf.infer()
arg = inf.arg()
if useQpbo:
infParam = opengm.InfParam(
numStopIt=0,
numIt=40,
generator='qpboBased',
fusionParam = fusionParam
)
inf=opengm.inference.IntersectionBased(gm, parameter=infParam)
inf.setStartingPoint(arg)
# start inference (in this case verbose infernce)
visitor=inf.verboseVisitor(printNth=10)
if useCgc:
print "cgc"
infParam = opengm.InfParam(
planar=False,
startFromThreshold=False,
doCutMove = False,
doGlueCutMove = True,
maxIterations = 1
)
inf=opengm.inference.Cgc(gm, parameter=infParam)
if arg is not None:
inf.setStartingPoint(arg)
# start inference (in this case verbose infernce)
visitor=inf.verboseVisitor(printNth=10)
if verbose:
inf.infer(visitor)
else:
inf.infer()
arg = inf.arg()
print gm.evaluate(arg)
argImg = arg.reshape(img.shape[0:2])
import matplotlib,numpy
import pylab
# A random colormap for matplotlib
cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( argImg.max()+1,3))
pylab.imshow ( argImg.swapaxes(0,1), cmap = cmap)
pylab.show()
|
mit
|
trungnt13/scikit-learn
|
sklearn/cluster/tests/test_dbscan.py
|
114
|
11393
|
"""
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.ones((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
|
bsd-3-clause
|
mne-tools/mne-tools.github.io
|
0.13/_downloads/ftclient_rt_compute_psd.py
|
18
|
2442
|
"""
==============================================================
Compute real-time power spectrum density with FieldTrip client
==============================================================
Please refer to `ftclient_rt_average.py` for instructions on
how to get the FieldTrip connector working in MNE-Python.
This example demonstrates how to use it for continuous
computation of power spectra in real-time using the
get_data_as_epoch function.
"""
# Author: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.realtime import FieldTripClient
from mne.time_frequency import psd_welch
print(__doc__)
# user must provide list of bad channels because
# FieldTrip header object does not provide that
bads = ['MEG 2443', 'EEG 053']
fig, ax = plt.subplots(1)
with FieldTripClient(host='localhost', port=1972,
tmax=150, wait_max=10) as rt_client:
# get measurement info guessed by MNE-Python
raw_info = rt_client.get_measurement_info()
# select gradiometers
picks = mne.pick_types(raw_info, meg='grad', eeg=False, eog=True,
stim=False, include=[], exclude=bads)
n_fft = 256 # the FFT size. Ideally a power of 2
n_samples = 2048 # time window on which to compute FFT
for ii in range(20):
epoch = rt_client.get_data_as_epoch(n_samples=n_samples, picks=picks)
psd, freqs = psd_welch(epoch, fmin=2, fmax=200, n_fft=n_fft)
cmap = 'RdBu_r'
freq_mask = freqs < 150
freqs = freqs[freq_mask]
log_psd = 10 * np.log10(psd[0])
tmin = epoch.events[0][0] / raw_info['sfreq']
tmax = (epoch.events[0][0] + n_samples) / raw_info['sfreq']
if ii == 0:
im = ax.imshow(log_psd[:, freq_mask].T, aspect='auto',
origin='lower', cmap=cmap)
ax.set_yticks(np.arange(0, len(freqs), 10))
ax.set_yticklabels(freqs[::10].round(1))
ax.set_xlabel('Frequency (Hz)')
ax.set_xticks(np.arange(0, len(picks), 30))
ax.set_xticklabels(picks[::30])
ax.set_xlabel('MEG channel index')
im.set_clim()
else:
im.set_data(log_psd[:, freq_mask].T)
plt.title('continuous power spectrum (t = %0.2f sec to %0.2f sec)'
% (tmin, tmax), fontsize=10)
plt.pause(0.5)
plt.close()
|
bsd-3-clause
|
toastedcornflakes/scikit-learn
|
sklearn/mixture/gmm.py
|
2
|
31651
|
"""
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array, deprecated
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
@deprecated("The function log_multivariate_normal_density is deprecated in 0.18"
" and will be removed in 0.20.")
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class _GMMBase(BaseEstimator):
"""Gaussian Mixture Model.
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. The best results is kept.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the Dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) # doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) # doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
self.n_components = n_components
self.covariance_type = covariance_type
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance."""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: Due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate.
.. versionadded:: 0.17
*fit_predict* method in Gaussian Mixture Model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2,
estimator=self)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < self.tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
"""Perform the Mstep of the EM algorithm and return the cluster weights.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
@deprecated("The class GMM is deprecated in 0.18 and will be "
" removed in 0.20. Use class GaussianMixture instead.")
class GMM(_GMMBase):
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
super(GMM, self).__init__(
n_components=n_components, covariance_type=covariance_type,
random_state=random_state, tol=tol, min_covar=min_covar,
n_iter=n_iter, n_init=n_init, params=params,
init_params=init_params, verbose=verbose)
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model."""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model."""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model."""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values."""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
@deprecated("The functon distribute_covar_matrix_to_match_covariance_type"
"is deprecated in 0.18 and will be removed in 0.20.")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template."""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for diagonal cases."""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Perform the covariance M step for spherical cases."""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for full cases."""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for tied cases."""
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
|
bsd-3-clause
|
nigroup/pypet
|
setup.py
|
1
|
2573
|
__author__ = 'Robert Meyer'
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
install_requires=[
'tables >= 2.3.1',
'pandas >= 0.14.1',
'numpy >= 1.6.1',
'scipy >= 0.9.0']
# check if importlib exists, if not (aka python 2.6) install it
try:
import importlib
except ImportError:
install_requires.append('importlib')
if (sys.version_info < (2, 7, 0)):
# For Python 2.6 we additionally need these packages:
install_requires.append(['unittest2'])
install_requires.append('ordereddict >= 1.1')
install_requires.append('importlib >= 1.0.1')
install_requires.append('logutils >= 0.3.3')
# For versioning, Version found in pypet._version.py
verstrline = open('pypet/_version.py', "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError('Unable to find version in pypet/_version.py')
setup(
name='pypet',
version=verstr,
packages=['pypet',
'pypet.brian',
'pypet.brian2',
'pypet.utils',
'pypet.tests',
'pypet.tests.unittests',
'pypet.tests.integration',
'pypet.tests.profiling',
'pypet.tests.testutils',
'pypet.tests.unittests.briantests',
'pypet.tests.integration.briantests',
'pypet.tests.unittests.brian2tests',
'pypet.tests.integration.brian2tests',
],
package_data={'pypet.tests': ['testdata/*.hdf5'], 'pypet': ['logging/*.ini']},
license='BSD',
author='Robert Meyer',
author_email='[email protected]',
description='A toolkit for numerical simulations to allow easy parameter exploration and storage of results.',
long_description=open('README.md').read(),
url='https://github.com/SmokinCaterpillar/pypet',
install_requires=install_requires,
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: BSD License',
'Topic :: Utilities']
)
|
bsd-3-clause
|
gf712/AbPyTools
|
abpytools/analysis/cdr_length.py
|
1
|
2354
|
from matplotlib import pyplot as plt
import seaborn as sns
import os
from abpytools.utils import PythonConfig
from abpytools.features.regions import ChainDomains
from matplotlib.ticker import MaxNLocator
from .analysis_helper_functions import switch_interactive_mode
class CDRLength(ChainDomains):
def __init__(self, path=None, antibody_objects=None, verbose=True, show_progressbar=True, n_threads=10):
super().__init__(path=path, antibody_objects=antibody_objects, verbose=verbose,
show_progressbar=show_progressbar, n_threads=n_threads)
def plot_cdr(self, only_cdr3=True, save=False, plot_path='./', plot_name='CDR_length',
plot_title=None, hist=True, ax=None, **kwargs):
switch_interactive_mode(save=save)
if ax is None:
if only_cdr3:
f, ax = plt.subplots(nrows=1, ncols=1)
else:
f, ax = plt.subplots(nrows=1, ncols=3, figsize=(15, 5))
ax = ax.ravel()
if only_cdr3:
if plot_title is None:
ax.set_title('CDR3 Length', size=18)
else:
ax.set_title(plot_title, size=18)
sns.distplot(self.cdr_lengths()[:, 2], hist=hist, ax=ax, **kwargs)
ax.set_ylabel('Density', size=14)
ax.set_xlabel('CDR Length', size=14)
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
else:
if plot_title is None:
plt.suptitle('CDR Length', size=20)
else:
plt.suptitle(plot_title, size=20)
for i, cdr in enumerate(['CDR 1', 'CDR 2', 'CDR 3']):
ax[i].set_title(cdr, size=16)
sns.distplot(self.cdr_lengths()[:, i], hist=hist, ax=ax[i])
if i == 0:
ax[i].set_ylabel('Density', size=16)
if i == 1:
ax[i].set_xlabel('CDR Length', size=16)
ax[i].xaxis.set_major_locator(MaxNLocator(integer=True))
plt.tight_layout()
plt.subplots_adjust(top=0.85)
ipython_config = PythonConfig()
if ipython_config.ipython_info == 'notebook' and save is False:
plt.plot()
else:
plt.savefig(os.path.join(plot_path, plot_name), format='png')
plt.close()
|
mit
|
nomadcube/scikit-learn
|
sklearn/pipeline.py
|
162
|
21103
|
"""
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
|
bsd-3-clause
|
gitj/rtlscan
|
process_scan.py
|
1
|
1398
|
import numpy as np
import pandas as pd
def scan_to_df(npz,num_subchannels=128,sampling_freq=1.024e6,samples_to_trim=2048):
d = np.load(npz)
num_tunings = d['freqs'].shape[0]
subchannel_offsets = sampling_freq*np.fft.fftshift(np.fft.fftfreq(num_subchannels))
window = np.hamming(num_subchannels)
frames = []
for k in range(num_tunings):
data = d['scales'][k]*(d['data_r'][k,:]+1j*d['data_i'][k,:])
data = data[samples_to_trim:]
data = data - data.mean()
ff = np.abs(np.fft.fftshift(np.fft.fft(window*data.reshape((-1,num_subchannels)),axis=1)))**2
tp = ff.sum(0)
sk = (ff.shape[0]/2.0)*(ff**2).sum(0)/(tp**2)
nspec = ff.shape[0]
tp = tp/nspec
f0 = d['freqs'][k]
freqs = subchannel_offsets + f0
timestamp = d['timestamps'][k]*np.ones(freqs.shape)
frames.append(pd.DataFrame(dict(timestamp=timestamp,freq=freqs,skurtosis=sk,total_power=tp,nspec=nspec)))
df = pd.concat(frames,ignore_index=True)
d.close()
return df
def process_files(fns):
dfs = []
for scan_num,fn in enumerate(fns):
df = scan_to_df(fn)
df['scan_num'] = scan_num
df['scan_filename'] = fn
dfs.append(df)
dfs = pd.concat(dfs,ignore_index=True)
return dfs
def load_archive(fn):
npa = np.load(fn)
df = pd.DataFrame.from_records(npa)
return df
|
gpl-2.0
|
ankurankan/scikit-learn
|
sklearn/tests/test_base.py
|
19
|
6858
|
# Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
"""Tests that clone creates a correct deep copy.
We create an estimator, make a copy of its original state
(which, in this case, is the current state of the estimator),
and check that the obtained copy is a correct deep copy.
"""
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
"""Tests that clone doesn't copy everything.
We first create an estimator, give it an own attribute, and
make a copy of its original state. Then we check that the copy doesn't
have the specific attribute we manually added to the initial estimator.
"""
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
"""Check that clone raises an error on buggy estimators."""
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
"""Regression test for cloning estimators with empty arrays"""
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_repr():
"""Smoke test the repr of the base estimator."""
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
"""Smoke test the str of the base estimator"""
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
#bad_pipeline = Pipeline([("bad", NoEstimator())])
#assert_raises(AttributeError, bad_pipeline.set_params,
#bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
|
bsd-3-clause
|
kc-lab/dms2dfe
|
dms2dfe/lib/io_stats.py
|
2
|
4389
|
#!usr/bin/python
# Copyright 2016, Rohan Dandage <[email protected],[email protected]>
# This program is distributed under General Public License v. 3.
"""
================================
``io_stats``
================================
"""
import pandas as pd
import numpy as np
import logging
from statsmodels.stats.weightstats import DescrStatsW,CompareMeans
from statsmodels.sandbox.stats.multicomp import multipletests
from dms2dfe.lib.io_dfs import debad
def testcomparison(df,smp1_cols,smp2_cols,test='ttest'):
if len(smp1_cols)==0 or len(smp2_cols)==0:
logging.warning("data not exist for comparison")
else:
col_stat='stat %s' % test
col_pval='pval %s' % test
df.loc[:,col_stat]=np.nan
df.loc[:,col_pval]=np.nan
for i in df.index:
X=DescrStatsW(df.loc[i,smp1_cols].as_matrix())
Y=DescrStatsW(df.loc[i,smp2_cols].as_matrix())
if test=='ttest':
df.loc[i,col_stat],df.loc[i,col_pval],tmp=CompareMeans(X,Y).ttest_ind()
if test=='ztest':
df.loc[i,col_stat],df.loc[i,col_pval]=CompareMeans(X,Y).ztest_ind()
return df
from scipy import stats
from dms2dfe.lib.io_dfs import denan
def get_r2(data,xcol,ycol,log=None):
data=denan(data.loc[:,[xcol,ycol]],axis='rows',condi='any')
if len(data)!=0:
if not log is None:
if log==2:
data=debad(data,axis=0,condi='any',bad=0)
data=np.log2(data)
data=debad(data,axis=0,condi='any',bad='nan')
slope, intercept, r_value, p_value, std_err = stats.linregress(data.loc[:,xcol],data.loc[:,ycol])
return r_value
else:
logging.error("one/both cols are empty")
return 0
def get_regression_metrics(y_test,y_score,
reg_type='lin',
res_rmse=True):
from scipy.stats import linregress,spearmanr
from sklearn.metrics import regression
rmse=np.sqrt(regression.mean_absolute_error(y_test,y_score))
if reg_type=='lin':
slope, intercept, r, p_value, std_err = linregress(y_test,y_score)
result="$r$=%0.2f" % (r)
elif reg_type=='rank':
r, p_value= spearmanr(y_test,y_score)
result="$\rho$=%0.2f" % (r)
if res_rmse:
result="%s\nRMSE=%0.2f" % (result,rmse)
return result,r,rmse
from dms2dfe.lib.io_ml import denanrows
from scipy.stats import wilcoxon
from numpy import asarray,compress
def get_wilcoxon_direction(data,col_x,col_y):
data=denanrows(data.loc[:,[col_x,col_y]])
x=data.loc[:,col_x]
y=data.loc[:,col_y]
if y is None:
d = x
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x - y
d = compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
if count < 10:
logging.info("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = np.sum((d > 0) * r, axis=0)
r_minus = np.sum((d < 0) * r, axis=0)
if r_plus>r_minus:
return 'negative'
if r_minus>r_plus:
return 'positive'
def get_wilcoxon(data,col_ctrl,col_test,side='both',denan=True):
if denan:
data=denanrows(data.loc[:,[col_ctrl,col_test]])
ranksum,pval=wilcoxon(data.loc[:,col_ctrl],data.loc[:,col_test],
# zero_method = "wilcox",
)
# print "ranksum=%d; pval=%d" % (ranksum,pval)
if side=='both':
return pval
else:
pval=pval/2
side_detected=get_wilcoxon_direction(data,col_ctrl,col_test)
if side=='one':
return pval,side_detected
# print side_detected
else:
if side==side_detected:
return pval
elif side!=side_detected:
return 1-pval
def pval2stars(pval,ns=True,numeric=False):
if not numeric:
if pval < 0.0001:
return "****"
elif (pval < 0.001):
return "***"
elif (pval < 0.01):
return "**"
elif (pval < 0.05):
return "*"
else:
if ns:
return "ns"
else:
return "p = %.2g" % pval
else:
return "p = %.2g" % pval
|
gpl-3.0
|
keptenkurk/BS440
|
plugins/BS440webapp/BS440plot.py
|
1
|
17499
|
#!/usr/bin/env python
# coding: utf-8
#-----------------------------------------------------------------------------------------
# BS440 plugin BS440plot.py
# About:
# Generate charts from CSV file to HTML file whenever BS440flask.py requires it
#
# Requirements:
# BS440 plugin BS440csv.py
#
#
# Dependencies (install with 'sudo -H pip install packagename'):
# pandas math plotly scipy
#
__author__ = 'DjZU'
__email__ = "[email protected]"
__license__ = "EUPL-1.1"
__version__ = "1.0.1"
__status__ = "Production"
#
#------------------------------------------------------------------------------------------
import pandas as pd
import plotly.graph_objs as go
import plotly.offline as plotly
import argparse
from datetime import datetime, timedelta
from ConfigParser import SafeConfigParser
from math import radians, cos, sin
from scipy import stats
import logging
import os
import sys
#-----------------------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------------------
def update_plotlyjs_for_offline():
import urllib2
cdn_url = 'https://cdn.plot.ly/plotly-latest.min.js'
response = urllib2.urlopen(cdn_url)
html = response.read()
f = open('./static/plotly.min.js', 'w')
f.write(html)
f.close()
log.info('Done updating ./static/plotly.min.js')
# This code is shipped with version plotly.js v1.20.5 retrieved from /usr/local/lib/python2.7/dist-packages/plotly/package_data/plotly.min.js
# Uncomment to update ./static/plotly.min.js. Beware it could screw the display.
#update_plotlyjs_for_offline()
def convert_to_datetime(timestamp):
if (timeWindow > 365):
return datetime.fromtimestamp(int(timestamp)).strftime('%A%n%b %-d %Y%n%-H:%M')
else:
return datetime.fromtimestamp(int(timestamp)).strftime('%A%n%b %-d%n%-H:%M')
def prepare_weight_text(weight):
return str(weight) + " kg"
def prepare_fat_text(fat):
return "Fat:\t\t" + str(fat) + " %\n"
def prepare_muscle_text(muscle):
return "Muscle:\t" + str(muscle) + " %\n"
def prepare_bone_text(bone):
return "Bone:\t" + str(bone) + " kg\n"
def prepare_water_text(water):
return "Water:\t" + str(water) + " %\n"
def prepare_bmi_text(bmi):
return "BMI:\t\t" + str(bmi) + "\n"
def prepare_kcal_text(kcal):
return str(kcal) + " kcal "
def rotatePoint(centerPoint,point,angle,output):
"""Rotates a point around another centerPoint. Angle is in degrees.
Rotation is counter-clockwise"""
angle = 360 - angle
""" Rotation is now clockwise """
angle = radians(angle)
temp_point = point[0]-centerPoint[0] , point[1]-centerPoint[1]
# 300/(260-20)=1.25
temp_point = ( temp_point[0]*cos(angle)-(temp_point[1]*sin(angle)) , ((temp_point[0]*sin(angle))+temp_point[1]*cos(angle))*1.25)
temp_point = temp_point[0]+centerPoint[0] , temp_point[1]+centerPoint[1]
rotatedPoint= str(temp_point[0]) + " " + str(temp_point[1])
if output=='dialPath':
return rotatedPoint
elif output=='x':
return str(temp_point[0])
elif output=='y':
return str(temp_point[1])
def gaugeDiv(baseLabels, meterLabels, colors, value, suffix):
meterValues = []
meterValues.append(0)
meterSum = 0
# Calculate steps. Then first value is the sum of all the others.
for i in range(1, len(baseLabels)-1):
meterValues.append(float(baseLabels[i+1]) - float(baseLabels[i]))
meterSum += meterValues[i]
meterValues[0] = meterSum
# Dial path. Apply angle from full left position.
rangeValue = float(meterValues[0])
minValue=float(baseLabels[1])
chartCenter=0.5
dialTip=chartCenter-0.12
dialAngle=(value-minValue)*180/rangeValue
dialPath = 'M ' + rotatePoint((chartCenter,0.5),(chartCenter,0.485),dialAngle, 'dialPath') + ' L ' + rotatePoint((chartCenter,0.5),(dialTip,0.5),dialAngle, 'dialPath') + ' L ' + rotatePoint((chartCenter,0.5),(chartCenter,0.515),dialAngle, 'dialPath') + ' Z'
infoText=(str(value) + str(suffix))
# Gauge
meterChart = go.Pie(
values=meterValues, labels=meterLabels,
marker=dict(colors=colors,
line=dict(width=0) # Switch line width to 0 in production
),
name="Gauge", hole=.3, direction="clockwise", rotation=90,
showlegend=False, textinfo="label", textposition="inside", hoverinfo="none",
sort=False
)
# Layout
layout = go.Layout(
xaxis=dict(showticklabels=False, autotick=False, showgrid=False, zeroline=False,),
yaxis=dict(showticklabels=False, autotick=False, showgrid=False, zeroline=False,),
shapes=[dict(
type='path', path=dialPath, fillcolor='rgba(44, 160, 101, 1)',
line=dict(width=0.5), xref='paper', yref='paper'),
],
annotations=[
dict(xref='paper', yref='paper', x=(chartCenter-0.015), y=0.2, text=infoText, font=dict(size='20', color='#ffffff'), showarrow=False),
],
height=260, width=300, margin=dict(l=0, r=0, t=20, b=0, autoexpand=False), plot_bgcolor="rgba(0,0,0,0)", paper_bgcolor="rgba(0,0,0,0)"
)
# Write static values as annotations
for value in baseLabels:
if value is not '-':
annotationDict=dict(
xref='paper', yref='paper', xanchor='center', yanchor='middle',
x=rotatePoint((chartCenter,0.5),((chartCenter-0.45),0.5), ((float(value)-minValue)*180/rangeValue), 'x'),
y=rotatePoint((chartCenter,0.5),((chartCenter-0.45),0.5), ((float(value)-minValue)*180/rangeValue), 'y'),
font=dict(size='12', color='#ffffff'), showarrow=False, text=value,
)
layout['annotations'].append(annotationDict)
# Build HTML div
div = plotly.plot(dict(data=[meterChart], layout=layout), include_plotlyjs=False, show_link=False, output_type='div')
return div
def barDiv(slope, value, suffix, bar_suffix, bar_format, color, layoutRange):
infoText=(str(value) + str(suffix))
tendancy = (slope*timedelta(days=7).total_seconds()) # ToDO: round instead
if tendancy > 0:
tendancyText="+"
tendancyOrigin=0.825
else:
tendancyText=""
tendancyOrigin=0.675
tendancyText+=str(bar_format % tendancy)+' '+bar_suffix
tendancyPosition=tendancyOrigin+((1.2*tendancy)/layoutRange[1]/4)
# Bar
barChart = go.Bar(
x=['tendancy',], y=[float(tendancy),],
name="Bar", showlegend=False, hoverinfo="none", marker=dict(color=color),
)
# Layout
layout = go.Layout(
xaxis=dict(showticklabels=False, autotick=False, showgrid=False, zeroline=True, fixedrange=True, domain=[0, 1], ),
yaxis=dict(showticklabels=False, autotick=False, showgrid=False, zeroline=False, fixedrange=True, domain=[0.5, 1], range=layoutRange, ),
annotations=[
dict(xref='paper', yref='paper', x=0.5, y=0, text=infoText, font=dict(size='20', color='#ffffff'), xanchor='center', yanchor='middle', showarrow=False),
dict(xref='paper', yref='paper', x=0.5, y=tendancyPosition, text=tendancyText, font=dict(size='14', color='#ffffff'), xanchor='center', yanchor='middle', showarrow=False),
],
height=260, width=120, margin=dict(l=0, r=0, t=40, b=40, autoexpand=False), plot_bgcolor="rgba(0,0,0,0)", paper_bgcolor="rgba(0,0,0,0)"
)
# Build HTML div
div = plotly.plot(dict(data=[barChart], layout=layout), include_plotlyjs=False, show_link=False, output_type='div')
return div
#-----------------------------------------------------------------------------------------
# Get arguments
parser = argparse.ArgumentParser(description='Plot body data from smart scale.')
parser.add_argument('-p','--person', help='Person ID',required=True)
parser.add_argument('-w','--window',help='Time window in days.', required=True)
args = parser.parse_args()
# Parameters
personID = int(args.person)
timeWindow = int(args.window)
dirname = os.path.dirname(__file__)
csvFile = '../BS440csv/' + str(personID) + '.csv'
csvPath = os.path.join(dirname, csvFile)
# BS440webapp config
config = SafeConfigParser()
config.read('BS440webapp.ini')
personsection = 'Person' + str(personID)
# Set up Logging
numeric_level = getattr(logging,
config.get('Program', 'loglevel').upper(),
None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level,
format='%(asctime)s %(levelname)-8s %(funcName)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='BS440plot.log',
filemode='a')
log = logging.getLogger(__name__)
# Grab person details
if config.has_section(personsection):
person = config.get(personsection, 'username')
gender = config.get(personsection, 'gender')
goal = config.get(personsection, 'goal')
else:
log.error('Unable to plot: No details found in ini file for person %d. Exiting.' % personID)
sys.exit(127)
#-----------------------------------------------------------------------------------------
# Import data from csv
df = pd.read_csv(csvPath, header=None, names=['timestamp','weight','fat','muscle','bone','water','kcal','bmi'])
# Retrieve most recent timestamp
last_timestamp=df['timestamp'].iat[-1]
# Calcultate the timestamp equal to the last timestamp minus the given time window
min_timestamp = (datetime.fromtimestamp(int(last_timestamp)) - timedelta(days=timeWindow)).strftime('%s')
# Tail the data frame
df = df[(df.timestamp > int(min_timestamp))]
# Retrieve oldest timestamp
first_timestamp = df.loc[df.index[0], 'timestamp']
# Create datetime from timestamp
df['datetime'] = df['timestamp'].apply(convert_to_datetime)
# Retrieve most recent values
last_weight=df['weight'].iat[-1]
last_fat=df['fat'].iat[-1]
last_muscle=df['muscle'].iat[-1]
last_bone=df['bone'].iat[-1]
last_water=df['water'].iat[-1]
last_kcal=df['kcal'].iat[-1]
last_bmi=df['bmi'].iat[-1]
last_datetime=df['datetime'].iat[-1]
# Prepare text from weight and suffix kg
df['weightText'] = df['weight'].apply(prepare_weight_text)
# Prepare hover data
df['fatText'] = df['fat'].apply(prepare_fat_text)
df['muscleText'] = df['muscle'].apply(prepare_muscle_text)
df['boneText'] = df['bone'].apply(prepare_bone_text)
df['waterText'] = df['water'].apply(prepare_water_text)
df['kcalText'] = df['kcal'].apply(prepare_kcal_text)
df['bmiText'] = df['bmi'].apply(prepare_bmi_text)
df['dataText'] = df['fatText'] + df['muscleText'] + df['waterText'] + df['boneText'] + df['bmiText'] + df['kcalText']
#-----------------------------------------------------------------------------------------
# Configure main plot
#-----------------------------------------------------------------------------------------
# Weight tendancy
weightSlope, weightIntercept, weight_r_value, weight_p_value, weight_std_err = stats.linregress(df['timestamp'],df['weight'])
weightTendancyLine = weightSlope*df['timestamp']+weightIntercept
if (float(goal) > last_weight) and (weightSlope > 0):
weightBarColor=['rgb(51,255,102)',]
elif (float(goal) < last_weight) and (weightSlope < 0):
weightBarColor=['rgb(51,255,102)',]
else:
weightBarColor=['rgb(255,102,0)',]
weightDiv = barDiv(weightSlope*1000, last_weight, 'kg<br>Weight', '<br>g/week', '%.0f', weightBarColor, [-2000,2000])
tendancyTrace = go.Scatter(
x=df['timestamp'], y=weightTendancyLine, # Data
mode='lines', name='Tendancy', hoverinfo='none', visible=False # Additional options
)
# Traces
weightTrace = go.Scatter(
x=df['timestamp'], y=df['weight'], # Data
mode='markers+lines+text', text=df['weightText'], textposition='top center', textfont=dict(size='16', color='#1f77b4'), name='weight',
line=dict(shape='spline', smoothing='0.5'), hoverinfo='none', # Additional options
)
dataTrace = go.Scatter(
x=df['timestamp'], y=df['weight'], # Data
mode='markers', name='', text=df['dataText'], hoverinfo='text', # Additional options
)
goalTrace = go.Scatter(
x=[first_timestamp, last_timestamp], y=[goal, goal], # Data
mode='lines', name='goal', line=dict(shape='spline', smoothing='0.5'), hoverinfo='none', # Additional options
)
traceData=[weightTrace, dataTrace, goalTrace, tendancyTrace]
# If tick labels (date and time on x axis) are too long, increase bottom margin and height to give room
layoutBottomRoom=0
layoutHeight=450+layoutBottomRoom
layoutBottomMargin=80+layoutBottomRoom
weightLayout = go.Layout(
title = person + ' Body Weight (kg)', titlefont=dict(size='20', color='#ffffff'),
showlegend=False, height=layoutHeight, margin=dict(l=40, r=40, t=60, b=layoutBottomMargin),
xaxis=dict(showticklabels=True, tickvals=df['timestamp'], ticktext=df['datetime'], tickfont=dict(size='14', color='#ffffff'), ),
yaxis=dict(mirror='ticks', tickfont=dict(size='14', color='#ffffff')), paper_bgcolor="rgba(0,0,0,0)"
)
weightFig = dict(data=traceData, layout=weightLayout)
# Plot weight trace
plotDiv = plotly.plot(weightFig, include_plotlyjs=False, show_link=False, output_type='div')
#-----------------------------------------------------------------------------------------
# Configure gauges
#-----------------------------------------------------------------------------------------
# BMI gauge
bmiDiv = gaugeDiv(["-", "14", "18.5", "25", "30", "40"],
[" ", "Underweight", "Normal", "Overweight", "Obese"],
['rgba(0,0,0,0)','rgb(204,204,204)','rgb(51,255,102)','rgb(255,255,51)','rgb(255,102,0)'],
last_bmi, '<br>BMI')
# Fat gauge
if gender == 'male':
fatBaseLabels = ["-", "2", "6", "13", "17", "22", "30", "40"]
elif gender == 'female':
fatBaseLabels = ["-", "10", "14", "21", "25", "31", "40", "50"]
fatDiv = gaugeDiv(fatBaseLabels,
[" ", "Essential fat", "Athlete", "Fitness", "Average", "Overweight", "Obese"],
['rgba(0,0,0,0)','rgb(204,204,204)','rgb(102,255,153)','rgb(51,255,102)','rgb(255,255,51)','rgb(255,153,51)','rgb(255,102,0)'],
last_fat, '%<br>Fat')
# Water gauge
if gender == 'male':
if last_fat >= 4 and last_fat < 15:
waterBaseLabels = ["-", "30", "63", "70", "80"]
if last_fat >= 15 and last_fat < 22:
waterBaseLabels = ["-", "30", "57", "63", "80"]
if last_fat >= 22 and last_fat < 25:
waterBaseLabels = ["-", "30", "55", "57", "80"]
if last_fat >= 25:
waterBaseLabels = ["-", "30", "37", "55", "80"]
elif gender == 'female':
if last_fat >= 4 and last_fat < 21:
waterBaseLabels = ["-", "30", "58", "70", "80"]
if last_fat >= 21 and last_fat < 30:
waterBaseLabels = ["-", "30", "52", "58", "80"]
if last_fat >= 30 and last_fat < 33:
waterBaseLabels = ["-", "30", "49", "52", "80"]
if last_fat >= 33:
waterBaseLabels = ["-", "30", "37", "49", "80"]
waterDiv = gaugeDiv(waterBaseLabels,
[" ", "Low", "Optimal", "High"],
['rgba(0,0,0,0)','rgb(255,255,51)','rgb(51,255,102)','rgb(255,255,51)'],
last_water, '%<br>Water')
# Fat tendancy
fatSlope, fatIntercept, fat_r_value, fat_value, fat_std_err = stats.linregress(df['timestamp'],df['fat'])
fatTendancyLine = fatSlope*df['timestamp']+fatIntercept
if fatSlope < 0:
fatBarColor=['rgb(51,255,102)',]
else:
fatBarColor=['rgb(255,102,0)',]
fatBarDiv = barDiv(fatSlope, last_fat, '%<br>Fat', '<br>%/week', '%.2f', fatBarColor, [-0.6,0.6])
# Muscle tendancy
muscleSlope, muscleIntercept, muscle_r_value, muscle_value, muscle_std_err = stats.linregress(df['timestamp'],df['muscle'])
muscleTendancyLine = muscleSlope*df['timestamp']+muscleIntercept
if muscleSlope > 0:
muscleBarColor=['rgb(51,255,102)',]
else:
muscleBarColor=['rgb(255,102,0)',]
muscleDiv = barDiv(muscleSlope, last_muscle, '%<br>Muscle', '<br>%/week', '%.2f', muscleBarColor, [-0.6,0.6])
# kCal tendancy
kcalSlope, kcalIntercept, kcal_r_value, kcal_value, kcal_std_err = stats.linregress(df['timestamp'],df['kcal'])
kcalTendancyLine = kcalSlope*df['timestamp']+kcalIntercept
kcalDiv = barDiv(kcalSlope, last_kcal, 'kcal<br>Needs', '<br>kcal/week', '%.0f', weightBarColor, [-100,100])
# Bone tendancy
boneSlope, boneIntercept, bone_r_value, bone_value, bone_std_err = stats.linregress(df['timestamp'],df['bone'])
boneTendancyLine = boneSlope*df['timestamp']+boneIntercept
if boneSlope > 0:
boneBarColor=['rgb(51,255,102)',]
else:
boneBarColor=['rgb(255,102,0)',]
boneDiv = barDiv(boneSlope*1000, last_bone, 'kg<br>Bone', '<br>g/week', '%.0f', boneBarColor, [-100,100])
#-----------------------------------------------------------------------------------------
# Build HTML
#-----------------------------------------------------------------------------------------
# Prepare HTML
plotlyHTML = """
<div id="trace">
"""
plotlyHTML += plotDiv
plotlyHTML += """
</div><br/>"""
plotlyHTML += """
<div id="gaugesandbars">
<div id="gauges">
<div class="gauge">
"""
plotlyHTML += bmiDiv + """
</div>"""
plotlyHTML += """
<div class="gauge">
"""
plotlyHTML += fatDiv + """
</div>"""
plotlyHTML += """
<div class="gauge">
"""
plotlyHTML += waterDiv + """
</div>"""
plotlyHTML += """
</div>
<div id="bars">
<div class="bar">
"""
plotlyHTML += weightDiv + """
</div>"""
plotlyHTML += """
<div class="bar">
"""
plotlyHTML += fatBarDiv + """
</div>"""
plotlyHTML += """
<div class="bar">
"""
plotlyHTML += muscleDiv + """
</div>"""
plotlyHTML += """
<div class="bar">
"""
plotlyHTML += kcalDiv + """
</div>"""
plotlyHTML += """
<div class="bar">
"""
plotlyHTML += boneDiv + """
</div>
</div>
</div>"""
# Generate template to be used by Flask
fileName= './templates/plot-' + str(personID) + '-' + str(timeWindow) + '.html'
try:
f = open(fileName,'w')
f.write(plotlyHTML)
f.close()
log.info('Plot file %s generated successfully for user %s.' % (fileName, person) )
except:
log.error('Failed to generate plot file %s for user %s.' % (fileName, person) )
sys.exit(126)
|
mit
|
selective-inference/selective-inference
|
selectinf/randomized/tests/sandbox/test_cv.py
|
3
|
6370
|
import numpy as np
import pandas as pd
from statsmodels.sandbox.stats.multicomp import multipletests
import regreg.api as rr
from ...api import (randomization,
glm_group_lasso,
multiple_queries)
from ...tests.instance import (gaussian_instance,
logistic_instance)
from ...tests.flags import SMALL_SAMPLES, SET_SEED
from ...tests.decorators import (wait_for_return_value,
set_seed_iftrue,
set_sampling_params_iftrue)
from ..query import naive_confidence_intervals, naive_pvalues
from ..M_estimator import restricted_Mest
from ..cv_view import CV_view
from ..glm import (glm_nonparametric_bootstrap,
pairs_bootstrap_glm)
if SMALL_SAMPLES:
nboot = 10
else:
nboot = -1
@set_seed_iftrue(SET_SEED)
@set_sampling_params_iftrue(SMALL_SAMPLES, burnin=10, ndraw=10)
@wait_for_return_value()
def test_cv(n=100, p=50, s=5, signal=7.5, K=5, rho=0.,
randomizer = 'gaussian',
randomizer_scale = 1.,
scale1 = 0.1,
scale2 = 0.2,
lam_frac = 1.,
glmnet = True,
loss = 'gaussian',
bootstrap = False,
condition_on_CVR = True,
marginalize_subgrad = True,
ndraw = 10000,
burnin = 2000,
nboot = nboot):
print(n,p,s, condition_on_CVR, scale1, scale2)
if randomizer == 'laplace':
randomizer = randomization.laplace((p,), scale=randomizer_scale)
elif randomizer == 'gaussian':
randomizer = randomization.isotropic_gaussian((p,),randomizer_scale)
elif randomizer == 'logistic':
randomizer = randomization.logistic((p,), scale=randomizer_scale)
if loss == "gaussian":
X, y, beta, nonzero, sigma = gaussian_instance(n=n, p=p, s=s, rho=rho, signal=signal, sigma=1)
glm_loss = rr.glm.gaussian(X, y)
elif loss == "logistic":
X, y, beta, _ = logistic_instance(n=n, p=p, s=s, rho=rho, signal=signal)
glm_loss = rr.glm.logistic(X, y)
epsilon = 1./np.sqrt(n)
# view 1
cv = CV_view(glm_loss,
loss_label=loss,
lasso_randomization=randomizer,
epsilon=epsilon,
scale1=scale1,
scale2=scale2)
if glmnet:
try:
cv.solve(glmnet=glmnet)
except ImportError:
cv.solve(glmnet=False)
else:
cv.solve(glmnet=False)
# for the test make sure we also run the python code
cv_py = CV_view(glm_loss,
loss_label=loss,
lasso_randomization=randomizer,
epsilon=epsilon,
scale1=scale1,
scale2=scale2)
cv_py.solve(glmnet=False)
lam = cv.lam_CVR
print("lam", lam)
if condition_on_CVR:
cv.condition_on_opt_state()
lam = cv.one_SD_rule(direction="up")
print("new lam", lam)
# non-randomized Lasso, just looking how many vars it selects
problem = rr.simple_problem(glm_loss, rr.l1norm(p, lagrange=lam))
beta_hat = problem.solve()
active_hat = beta_hat !=0
print("non-randomized lasso ", active_hat.sum())
# view 2
W = lam_frac * np.ones(p) * lam
penalty = rr.group_lasso(np.arange(p),
weights=dict(zip(np.arange(p), W)), lagrange=1.)
M_est = glm_group_lasso(glm_loss, epsilon, penalty, randomizer)
if nboot > 0:
cv.nboot = M_est.nboot = nboot
mv = multiple_queries([cv, M_est])
mv.solve()
active_union = M_est._overall
nactive = np.sum(active_union)
print("nactive", nactive)
if nactive==0:
return None
nonzero = np.where(beta)[0]
if set(nonzero).issubset(np.nonzero(active_union)[0]):
active_set = np.nonzero(active_union)[0]
true_vec = beta[active_union]
if marginalize_subgrad == True:
M_est.decompose_subgradient(conditioning_groups=np.zeros(p, bool),
marginalizing_groups=np.ones(p, bool))
selected_features = np.zeros(p, np.bool)
selected_features[active_set] = True
unpenalized_mle = restricted_Mest(M_est.loss, selected_features)
form_covariances = glm_nonparametric_bootstrap(n, n)
target_info, target_observed = pairs_bootstrap_glm(M_est.loss, selected_features, inactive=None)
cov_info = M_est.setup_sampler()
target_cov, score_cov = form_covariances(target_info,
cross_terms=[cov_info],
nsample=M_est.nboot)
opt_sample = M_est.sampler.sample(ndraw,
burnin)
pvalues = M_est.sampler.coefficient_pvalues(unpenalized_mle,
target_cov,
score_cov,
parameter=np.zeros(selected_features.sum()),
sample=opt_sample)
intervals = M_est.sampler.confidence_intervals(unpenalized_mle, target_cov, score_cov, sample=opt_sample)
L, U = intervals.T
sel_covered = np.zeros(nactive, np.bool)
sel_length = np.zeros(nactive)
LU_naive = naive_confidence_intervals(np.diag(target_cov), target_observed)
naive_covered = np.zeros(nactive, np.bool)
naive_length = np.zeros(nactive)
naive_pvals = naive_pvalues(np.diag(target_cov), target_observed, true_vec)
active_var = np.zeros(nactive, np.bool)
for j in range(nactive):
if (L[j] <= true_vec[j]) and (U[j] >= true_vec[j]):
sel_covered[j] = 1
if (LU_naive[j, 0] <= true_vec[j]) and (LU_naive[j, 1] >= true_vec[j]):
naive_covered[j] = 1
sel_length[j] = U[j]-L[j]
naive_length[j] = LU_naive[j,1]-LU_naive[j,0]
active_var[j] = active_set[j] in nonzero
q = 0.2
BH_desicions = multipletests(pvalues, alpha=q, method="fdr_bh")[0]
return sel_covered, sel_length, naive_pvals, naive_covered, naive_length, active_var, BH_desicions, active_var
|
bsd-3-clause
|
rew4332/tensorflow
|
tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py
|
15
|
9742
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.metrics.python.ops import histogram_ops
class Strict1dCumsumTest(tf.test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = tf.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = tf.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = tf.constant([3], dtype=tf.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = tf.constant([3], dtype=tf.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = tf.constant([1, 2, 3], dtype=tf.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = tf.constant([1, 3, 6], dtype=tf.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(tf.test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = tf.constant([], shape=[0], dtype=tf.bool)
scores = tf.constant([], shape=[0], dtype=tf.float32)
score_range = [0, 1.]
auc, update_op = tf.contrib.metrics.auc_using_histogram(labels, scores,
score_range)
tf.initialize_local_variables().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = tf.placeholder(tf.bool, shape=[num_records])
scores = tf.placeholder(tf.float32, shape=[num_records])
auc, update_op = tf.contrib.metrics.auc_using_histogram(labels,
scores,
score_range,
nbins=nbins)
tf.initialize_local_variables().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
|
cl4rke/scikit-learn
|
benchmarks/bench_sgd_regression.py
|
283
|
5569
|
"""
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
|
bsd-3-clause
|
dataworkshop/prerequisite
|
run.py
|
1
|
3916
|
from clint.textui import colored
from distutils.version import StrictVersion
import importlib
import argparse
packages = {
"IPython": "4.0",
"ipywidgets": "4.1.1",
"pandas": "0.18",
"numpy": "1.15",
"matplotlib": "2.0",
"seaborn": "0.7",
"sklearn": "0.18",
"xgboost": "0.7",
"ml_metrics": "0.1.4",
"hyperopt": "0.1",
"hyperas": "0.4",
"tensorflow": "1.12",
"gensim": "3.4",
"sqlalchemy": "1.2",
"tqdm": "4.28",
"xgbfir": "0.3",
"graphviz": "0.8",
"livelossplot": "0.3",
"rfpimp": "1.3",
"eli5": "0.8",
"skimage": "0.13",
"scikitplot": "0.3",
"deepreplay": "0.1.1a2",
"albumentations": "0.2",
"clint": "0.5",
"pandas_profiling": "1.4",
"mpld3": "0.3",
"modin": "0.6",
"qgrid": "1.1",
"imblearn": "0.5",
}
additional_visual_packages = {
"gmplot": "1.1.1",
"geoplotlib": "0.3.2",
"folium": "0.2.1",
"vincent": "0.4.4",
"geopandas": "0.2.1",
"mpl_toolkits.basemap": "1.0.7"
}
def verify_packages(packages):
missing_packages = []
upgrade_packages = []
for package_name, package_version in packages.items():
current_version = get_version_package(package_name)
if False == current_version:
print(colored.red("{0} - missing".format(package_name)))
missing_packages.append(package_name)
elif current_version is None:
handle_package_without_version(current_version, package_version, package_name)
else:
if version_is_good(current_version, package_version):
print(colored.green("{0}-{1} - OK".format(package_name, current_version)))
else:
print(colored.yellow("{0}-{1} should be upgraded to {0}-{2}".format(package_name, current_version, package_version)))
upgrade_packages.append(package_name)
return missing_packages, upgrade_packages
def handle_package_without_version(current_version, package_version, package_name):
if current_version != package_version:
print(colored.yellow("{0} exists, but has no attribute version. Expected version {1}".format(package_name, package_version)))
else:
print(colored.green("{0} - OK".format(package_name)))
def get_version_package(package_name):
try:
return importlib.import_module(package_name).__version__
except ImportError:
return False
except AttributeError:
return None
def version_is_good(actual_version, expected_version):
return StrictVersion(actual_version) >= StrictVersion(expected_version)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Prerequisite')
parser.add_argument('--extra_visual', '-ev', action='store_true')
args = parser.parse_args()
if args.extra_visual:
packages.update(additional_visual_packages)
missing_packages, upgrade_packages = verify_packages(packages)
if not missing_packages and not upgrade_packages:
print("")
print(colored.green("=" * 50))
print(colored.green("All right, you are ready to go on DataWorkshop!"))
if missing_packages:
print("")
print(colored.red("=" * 50))
print(colored.red("REQUIRED"))
print(colored.red("Please install those packages before DataWorkshop: " + ", ".join(missing_packages)))
print(colored.blue("pip install {0}".format( " ".join(missing_packages) )))
if 'xgboost' in missing_packages:
print(colored.red("More info how to install xgboost: ") + colored.blue("http://xgboost.readthedocs.org/en/latest/build.html"))
if upgrade_packages:
print("")
print(colored.yellow("=" * 50))
print(colored.yellow("RECOMMENDATION (without upgrade some needed features could be missing)"))
print(colored.blue("pip install --upgrade {0}".format( " ".join(upgrade_packages) )))
|
mit
|
cmoutard/mne-python
|
examples/inverse/plot_dics_beamformer.py
|
18
|
2905
|
"""
=====================================
Compute DICS beamfomer on evoked data
=====================================
Compute a Dynamic Imaging of Coherent Sources (DICS) beamformer from single
trial activity in a time-frequency window to estimate source time courses based
on evoked data.
The original reference for DICS is:
Gross et al. Dynamic imaging of coherent sources: Studying neural interactions
in the human brain. PNAS (2001) vol. 98 (2) pp. 694-699
"""
# Author: Roman Goj <[email protected]>
#
# License: BSD (3-clause)
import mne
import matplotlib.pyplot as plt
import numpy as np
from mne.io import Raw
from mne.datasets import sample
from mne.time_frequency import compute_epochs_csd
from mne.beamformer import dics
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
subjects_dir = data_path + '/subjects'
###############################################################################
# Read raw data
raw = Raw(raw_fname)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
# Set picks
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
stim=False, exclude='bads')
# Read epochs
event_id, tmin, tmax = 1, -0.2, 0.5
events = mne.read_events(event_fname)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, mag=4e-12))
evoked = epochs.average()
# Read forward operator
forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Computing the data and noise cross-spectral density matrices
# The time-frequency window was chosen on the basis of spectrograms from
# example time_frequency/plot_time_frequency.py
data_csd = compute_epochs_csd(epochs, mode='multitaper', tmin=0.04, tmax=0.15,
fmin=6, fmax=10)
noise_csd = compute_epochs_csd(epochs, mode='multitaper', tmin=-0.11, tmax=0.0,
fmin=6, fmax=10)
evoked = epochs.average()
# Compute DICS spatial filter and estimate source time courses on evoked data
stc = dics(evoked, forward, noise_csd, data_csd)
plt.figure()
ts_show = -30 # show the 40 largest responses
plt.plot(1e3 * stc.times,
stc.data[np.argsort(stc.data.max(axis=1))[ts_show:]].T)
plt.xlabel('Time (ms)')
plt.ylabel('DICS value')
plt.title('DICS time course of the 30 largest sources.')
plt.show()
# Plot brain in 3D with PySurfer if available
brain = stc.plot(hemi='rh', subjects_dir=subjects_dir)
brain.set_data_time_index(180)
brain.show_view('lateral')
# Uncomment to save image
# brain.save_image('DICS_map.png')
|
bsd-3-clause
|
akionakamura/scikit-learn
|
examples/covariance/plot_lw_vs_oas.py
|
248
|
2903
|
"""
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
|
bsd-3-clause
|
uglyboxer/linear_neuron
|
net-p3/lib/python3.5/site-packages/matplotlib/cbook.py
|
10
|
70573
|
"""
A collection of utility functions and classes. Originally, many
(but not all) were from the Python Cookbook -- hence the name cbook.
This module is safe to import from anywhere within matplotlib;
it imports matplotlib only at runtime.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
from itertools import repeat
import datetime
import errno
from functools import reduce
import glob
import gzip
import io
import locale
import os
import re
import sys
import threading
import time
import traceback
import types
import warnings
from weakref import ref, WeakKeyDictionary
import numpy as np
import numpy.ma as ma
class MatplotlibDeprecationWarning(UserWarning):
"""
A class for issuing deprecation warnings for Matplotlib users.
In light of the fact that Python builtin DeprecationWarnings are ignored
by default as of Python 2.7 (see link below), this class was put in to
allow for the signaling of deprecation, but via UserWarnings which are not
ignored by default.
http://docs.python.org/dev/whatsnew/2.7.html#the-future-for-python-2-x
"""
pass
mplDeprecation = MatplotlibDeprecationWarning
def _generate_deprecation_message(since, message='', name='',
alternative='', pending=False,
obj_type='attribute'):
if not message:
altmessage = ''
if pending:
message = (
'The %(func)s %(obj_type)s will be deprecated in a '
'future version.')
else:
message = (
'The %(func)s %(obj_type)s was deprecated in version '
'%(since)s.')
if alternative:
altmessage = ' Use %s instead.' % alternative
message = ((message % {
'func': name,
'name': name,
'alternative': alternative,
'obj_type': obj_type,
'since': since}) +
altmessage)
return message
def warn_deprecated(
since, message='', name='', alternative='', pending=False,
obj_type='attribute'):
"""
Used to display deprecation warning in a standard way.
Parameters
------------
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The format
specifier `%(func)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function; if not provided the name
is automatically determined from the passed in function,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user about
this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
obj_type : str, optional
The object type being deprecated.
Examples
--------
Basic example::
# To warn of the deprecation of "matplotlib.name_of_module"
warn_deprecated('1.4.0', name='matplotlib.name_of_module',
obj_type='module')
"""
message = _generate_deprecation_message(
since, message, name, alternative, pending, obj_type)
warnings.warn(message, mplDeprecation, stacklevel=1)
def deprecated(since, message='', name='', alternative='', pending=False,
obj_type='function'):
"""
Decorator to mark a function as deprecated.
Parameters
------------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier `%(func)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function; if not provided the name
is automatically determined from the passed in function,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user about
this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
Examples
--------
Basic example::
@deprecated('1.4.0')
def the_function_to_deprecate():
pass
"""
def deprecate(func, message=message, name=name, alternative=alternative,
pending=pending):
import functools
import textwrap
if isinstance(func, classmethod):
try:
func = func.__func__
except AttributeError:
# classmethods in Python2.6 and below lack the __func__
# attribute so we need to hack around to get it
method = func.__get__(None, object)
if hasattr(method, '__func__'):
func = method.__func__
elif hasattr(method, 'im_func'):
func = method.im_func
else:
# Nothing we can do really... just return the original
# classmethod
return func
is_classmethod = True
else:
is_classmethod = False
if not name:
name = func.__name__
message = _generate_deprecation_message(
since, message, name, alternative, pending, obj_type)
@functools.wraps(func)
def deprecated_func(*args, **kwargs):
warnings.warn(message, mplDeprecation, stacklevel=2)
return func(*args, **kwargs)
old_doc = deprecated_func.__doc__
if not old_doc:
old_doc = ''
old_doc = textwrap.dedent(old_doc).strip('\n')
message = message.strip()
new_doc = (('\n.. deprecated:: %(since)s'
'\n %(message)s\n\n' %
{'since': since, 'message': message}) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
deprecated_func.__doc__ = new_doc
if is_classmethod:
deprecated_func = classmethod(deprecated_func)
return deprecated_func
return deprecate
# On some systems, locale.getpreferredencoding returns None,
# which can break unicode; and the sage project reports that
# some systems have incorrect locale specifications, e.g.,
# an encoding instead of a valid locale name. Another
# pathological case that has been reported is an empty string.
# On some systems, getpreferredencoding sets the locale, which has
# side effects. Passing False eliminates those side effects.
def unicode_safe(s):
import matplotlib
if isinstance(s, bytes):
try:
preferredencoding = locale.getpreferredencoding(
matplotlib.rcParams['axes.formatter.use_locale']).strip()
if not preferredencoding:
preferredencoding = None
except (ValueError, ImportError, AttributeError):
preferredencoding = None
if preferredencoding is None:
return six.text_type(s)
else:
return six.text_type(s, preferredencoding)
return s
class converter(object):
"""
Base class for handling string -> python type with support for
missing values
"""
def __init__(self, missing='Null', missingval=None):
self.missing = missing
self.missingval = missingval
def __call__(self, s):
if s == self.missing:
return self.missingval
return s
def is_missing(self, s):
return not s.strip() or s == self.missing
class tostr(converter):
'convert to string or None'
def __init__(self, missing='Null', missingval=''):
converter.__init__(self, missing=missing, missingval=missingval)
class todatetime(converter):
'convert to a datetime or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s):
return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.datetime(*tup[:6])
class todate(converter):
'convert to a date or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s):
return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.date(*tup[:3])
class tofloat(converter):
'convert to a float or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
self.missingval = missingval
def __call__(self, s):
if self.is_missing(s):
return self.missingval
return float(s)
class toint(converter):
'convert to an int or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
def __call__(self, s):
if self.is_missing(s):
return self.missingval
return int(s)
class _BoundMethodProxy(object):
'''
Our own proxy object which enables weak references to bound and unbound
methods and arbitrary callables. Pulls information about the function,
class, and instance out of a bound method. Stores a weak reference to the
instance to support garbage collection.
@organization: IBM Corporation
@copyright: Copyright (c) 2005, 2006 IBM Corporation
@license: The BSD License
Minor bugfixes by Michael Droettboom
'''
def __init__(self, cb):
try:
try:
self.inst = ref(cb.im_self)
except TypeError:
self.inst = None
if six.PY3:
self.func = cb.__func__
self.klass = cb.__self__.__class__
else:
self.func = cb.im_func
self.klass = cb.im_class
except AttributeError:
self.inst = None
self.func = cb
self.klass = None
def __getstate__(self):
d = self.__dict__.copy()
# de-weak reference inst
inst = d['inst']
if inst is not None:
d['inst'] = inst()
return d
def __setstate__(self, statedict):
self.__dict__ = statedict
inst = statedict['inst']
# turn inst back into a weakref
if inst is not None:
self.inst = ref(inst)
def __call__(self, *args, **kwargs):
'''
Proxy for a call to the weak referenced object. Take
arbitrary params to pass to the callable.
Raises `ReferenceError`: When the weak reference refers to
a dead object
'''
if self.inst is not None and self.inst() is None:
raise ReferenceError
elif self.inst is not None:
# build a new instance method with a strong reference to the
# instance
mtd = types.MethodType(self.func, self.inst())
else:
# not a bound method, just return the func
mtd = self.func
# invoke the callable and return the result
return mtd(*args, **kwargs)
def __eq__(self, other):
'''
Compare the held function and instance with that held by
another proxy.
'''
try:
if self.inst is None:
return self.func == other.func and other.inst is None
else:
return self.func == other.func and self.inst() == other.inst()
except Exception:
return False
def __ne__(self, other):
'''
Inverse of __eq__.
'''
return not self.__eq__(other)
class CallbackRegistry:
"""
Handle registering and disconnecting for a set of signals and
callbacks:
>>> def oneat(x):
... print('eat', x)
>>> def ondrink(x):
... print('drink', x)
>>> from matplotlib.cbook import CallbackRegistry
>>> callbacks = CallbackRegistry()
>>> id_eat = callbacks.connect('eat', oneat)
>>> id_drink = callbacks.connect('drink', ondrink)
>>> callbacks.process('drink', 123)
drink 123
>>> callbacks.process('eat', 456)
eat 456
>>> callbacks.process('be merry', 456) # nothing will be called
>>> callbacks.disconnect(id_eat)
>>> callbacks.process('eat', 456) # nothing will be called
In practice, one should always disconnect all callbacks when they
are no longer needed to avoid dangling references (and thus memory
leaks). However, real code in matplotlib rarely does so, and due
to its design, it is rather difficult to place this kind of code.
To get around this, and prevent this class of memory leaks, we
instead store weak references to bound methods only, so when the
destination object needs to die, the CallbackRegistry won't keep
it alive. The Python stdlib weakref module can not create weak
references to bound methods directly, so we need to create a proxy
object to handle weak references to bound methods (or regular free
functions). This technique was shared by Peter Parente on his
`"Mindtrove" blog
<http://mindtrove.info/articles/python-weak-references/>`_.
"""
def __init__(self, *args):
if len(args):
warn_deprecated(
'1.3',
message="CallbackRegistry no longer requires a list of "
"callback types. Ignoring arguments. *args will "
"be removed in 1.5")
self.callbacks = dict()
self._cid = 0
self._func_cid_map = {}
def __getstate__(self):
# We cannot currently pickle the callables in the registry, so
# return an empty dictionary.
return {}
def __setstate__(self, state):
# re-initialise an empty callback registry
self.__init__()
def connect(self, s, func):
"""
register *func* to be called when a signal *s* is generated
func will be called
"""
self._func_cid_map.setdefault(s, WeakKeyDictionary())
if func in self._func_cid_map[s]:
return self._func_cid_map[s][func]
self._cid += 1
cid = self._cid
self._func_cid_map[s][func] = cid
self.callbacks.setdefault(s, dict())
proxy = _BoundMethodProxy(func)
self.callbacks[s][cid] = proxy
return cid
def disconnect(self, cid):
"""
disconnect the callback registered with callback id *cid*
"""
for eventname, callbackd in list(six.iteritems(self.callbacks)):
try:
del callbackd[cid]
except KeyError:
continue
else:
for category, functions in list(
six.iteritems(self._func_cid_map)):
for function, value in list(six.iteritems(functions)):
if value == cid:
del functions[function]
return
def process(self, s, *args, **kwargs):
"""
process signal *s*. All of the functions registered to receive
callbacks on *s* will be called with *\*args* and *\*\*kwargs*
"""
if s in self.callbacks:
for cid, proxy in list(six.iteritems(self.callbacks[s])):
# Clean out dead references
if proxy.inst is not None and proxy.inst() is None:
del self.callbacks[s][cid]
else:
proxy(*args, **kwargs)
class Scheduler(threading.Thread):
"""
Base class for timeout and idle scheduling
"""
idlelock = threading.Lock()
id = 0
def __init__(self):
threading.Thread.__init__(self)
self.id = Scheduler.id
self._stopped = False
Scheduler.id += 1
self._stopevent = threading.Event()
def stop(self):
if self._stopped:
return
self._stopevent.set()
self.join()
self._stopped = True
class Timeout(Scheduler):
"""
Schedule recurring events with a wait time in seconds
"""
def __init__(self, wait, func):
Scheduler.__init__(self)
self.wait = wait
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(self.wait)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b:
break
class Idle(Scheduler):
"""
Schedule callbacks when scheduler is idle
"""
# the prototype impl is a bit of a poor man's idle handler. It
# just implements a short wait time. But it will provide a
# placeholder for a proper impl ater
waittime = 0.05
def __init__(self, func):
Scheduler.__init__(self)
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(Idle.waittime)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b:
break
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a given type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None:
self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def __str__(self):
return repr(self)
def __getstate__(self):
# store a dictionary of this SilentList's state
return {'type': self.type, 'seq': self[:]}
def __setstate__(self, state):
self.type = state['type']
self.extend(state['seq'])
def strip_math(s):
'remove latex formatting from mathtext'
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove:
s = s.replace(r, '')
return s
class Bunch:
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables::
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52308
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
keys = six.iterkeys(self.__dict__)
return 'Bunch(%s)' % ', '.join(['%s=%s' % (k, self.__dict__[k])
for k
in keys])
def unique(x):
'Return a list of unique elements of *x*'
return list(six.iterkeys(dict([(val, 1) for val in x])))
def iterable(obj):
'return true if *obj* is iterable'
try:
iter(obj)
except TypeError:
return False
return True
def is_string_like(obj):
'Return True if *obj* looks like a string'
if isinstance(obj, six.string_types):
return True
# numpy strings are subclass of str, ma strings are not
if ma.isMaskedArray(obj):
if obj.ndim == 0 and obj.dtype.kind in 'SU':
return True
else:
return False
try:
obj + ''
except:
return False
return True
def is_sequence_of_strings(obj):
"""
Returns true if *obj* is iterable and contains strings
"""
if not iterable(obj):
return False
if is_string_like(obj):
return False
for o in obj:
if not is_string_like(o):
return False
return True
def is_writable_file_like(obj):
'return true if *obj* looks like a file object with a *write* method'
return hasattr(obj, 'write') and six.callable(obj.write)
def file_requires_unicode(x):
"""
Returns `True` if the given writable file-like object requires Unicode
to be written to it.
"""
try:
x.write(b'')
except TypeError:
return True
else:
return False
def is_scalar(obj):
'return true if *obj* is not string like and is not iterable'
return not is_string_like(obj) and not iterable(obj)
def is_numlike(obj):
'return true if *obj* looks like a number'
try:
obj + 1
except:
return False
else:
return True
def to_filehandle(fname, flag='rU', return_opened=False):
"""
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in .gz. *flag* is a
read/write flag for :func:`file`
"""
if is_string_like(fname):
if fname.endswith('.gz'):
# get rid of 'U' in flag for gzipped files.
flag = flag.replace('U', '')
fh = gzip.open(fname, flag)
elif fname.endswith('.bz2'):
# get rid of 'U' in flag for bz2 files
flag = flag.replace('U', '')
import bz2
fh = bz2.BZ2File(fname, flag)
else:
fh = open(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fh, opened
return fh
def is_scalar_or_string(val):
"""Return whether the given object is a scalar or string like."""
return is_string_like(val) or not iterable(val)
def _string_to_bool(s):
if not is_string_like(s):
return s
if s == 'on':
return True
if s == 'off':
return False
raise ValueError("string argument must be either 'on' or 'off'")
def get_sample_data(fname, asfileobj=True):
"""
Return a sample data file. *fname* is a path relative to the
`mpl-data/sample_data` directory. If *asfileobj* is `True`
return a file object, otherwise just a file path.
Set the rc parameter examples.directory to the directory where we should
look, if sample_data files are stored in a location different than
default (which is 'mpl-data/sample_data` at the same level of 'matplotlib`
Python module files).
If the filename ends in .gz, the file is implicitly ungzipped.
"""
import matplotlib
if matplotlib.rcParams['examples.directory']:
root = matplotlib.rcParams['examples.directory']
else:
root = os.path.join(os.path.dirname(__file__),
"mpl-data", "sample_data")
path = os.path.join(root, fname)
if asfileobj:
if (os.path.splitext(fname)[-1].lower() in
('.csv', '.xrc', '.txt')):
mode = 'r'
else:
mode = 'rb'
base, ext = os.path.splitext(fname)
if ext == '.gz':
return gzip.open(path, mode)
else:
return open(path, mode)
else:
return path
def flatten(seq, scalarp=is_scalar_or_string):
"""
Returns a generator of flattened nested containers
For example:
>>> from matplotlib.cbook import flatten
>>> l = (('John', ['Hunter']), (1, 23), [[([42, (5, 23)], )]])
>>> print(list(flatten(l)))
['John', 'Hunter', 1, 23, 42, 5, 23]
By: Composite of Holger Krekel and Luther Blissett
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/121294
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item):
yield item
else:
for subitem in flatten(item, scalarp):
yield subitem
class Sorter:
"""
Sort by attribute or item
Example usage::
sort = Sorter()
list = [(1, 2), (4, 8), (0, 3)]
dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
{'a': 9, 'b': 9}]
sort(list) # default sort
sort(list, 1) # sort by index 1
sort(dict, 'a') # sort a list of dicts by key 'a'
"""
def _helper(self, data, aux, inplace):
aux.sort()
result = [data[i] for junk, i in aux]
if inplace:
data[:] = result
return result
def byItem(self, data, itemindex=None, inplace=1):
if itemindex is None:
if inplace:
data.sort()
result = data
else:
result = data[:]
result.sort()
return result
else:
aux = [(data[i][itemindex], i) for i in range(len(data))]
return self._helper(data, aux, inplace)
def byAttribute(self, data, attributename, inplace=1):
aux = [(getattr(data[i], attributename), i) for i in range(len(data))]
return self._helper(data, aux, inplace)
# a couple of handy synonyms
sort = byItem
__call__ = byItem
class Xlator(dict):
"""
All-in-one multiple-string-substitution class
Example usage::
text = "Larry Wall is the creator of Perl"
adict = {
"Larry Wall" : "Guido van Rossum",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print multiple_replace(adict, text)
xlat = Xlator(adict)
print xlat.xlat(text)
"""
def _make_regex(self):
""" Build re object based on the keys of the current dictionary """
return re.compile("|".join(map(re.escape, list(six.iterkeys(self)))))
def __call__(self, match):
""" Handler invoked for each regex *match* """
return self[match.group(0)]
def xlat(self, text):
""" Translate *text*, returns the modified text. """
return self._make_regex().sub(self, text)
def soundex(name, len=4):
""" soundex module conforming to Odell-Russell algorithm """
# digits holds the soundex values for the alphabet
soundex_digits = '01230120022455012623010202'
sndx = ''
fc = ''
# Translate letters in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc:
fc = c # Remember first letter
d = soundex_digits[ord(c) - ord('A')]
# Duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# Replace first digit with first letter
sndx = fc + sndx[1:]
# Remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# Return soundex code truncated or 0-padded to len characters
return (sndx + (len * '0'))[:len]
class Null:
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __str__(self):
return "Null()"
def __repr__(self):
return "Null()"
if six.PY3:
def __bool__(self):
return 0
else:
def __nonzero__(self):
return 0
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
def mkdirs(newdir, mode=0o777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
try:
if not os.path.exists(newdir):
parts = os.path.split(newdir)
for i in range(1, len(parts) + 1):
thispart = os.path.join(*parts[:i])
if not os.path.exists(thispart):
os.makedirs(thispart, mode)
except OSError as err:
# Reraise the error unless it's about an already existing directory
if err.errno != errno.EEXIST or not os.path.isdir(newdir):
raise
class GetRealpathAndStat:
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
get_realpath_and_stat = GetRealpathAndStat()
def dict_delall(d, keys):
'delete all of the *keys* from the :class:`dict` *d*'
for key in keys:
try:
del d[key]
except KeyError:
pass
class RingBuffer:
""" class that implements a not-yet-full buffer """
def __init__(self, size_max):
self.max = size_max
self.data = []
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.data[self.cur] = x
self.cur = (self.cur + 1) % self.max
def get(self):
""" return list of elements in correct order """
return self.data[self.cur:] + self.data[:self.cur]
def append(self, x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = __Full
def get(self):
""" Return a list of elements from the oldest to the newest. """
return self.data
def __get_item__(self, i):
return self.data[i % len(self.data)]
def get_split_ind(seq, N):
"""
*seq* is a list of words. Return the index into seq such that::
len(' '.join(seq[:ind])<=N
.
"""
sLen = 0
# todo: use Alex's xrange pattern from the cbook for efficiency
for (word, ind) in zip(seq, xrange(len(seq))):
sLen += len(word) + 1 # +1 to account for the len(' ')
if sLen >= N:
return ind
return len(seq)
def wrap(prefix, text, cols):
'wrap *text* with *prefix* at length *cols*'
pad = ' ' * len(prefix.expandtabs())
available = cols - len(pad)
seq = text.split(' ')
Nseq = len(seq)
ind = 0
lines = []
while ind < Nseq:
lastInd = ind
ind += get_split_ind(seq[ind:], available)
lines.append(seq[lastInd:ind])
# add the prefix to the first line, pad with spaces otherwise
ret = prefix + ' '.join(lines[0]) + '\n'
for line in lines[1:]:
ret += pad + ' '.join(line) + '\n'
return ret
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile("(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path
import fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
results = []
for dirname, dirs, files in os.walk(root):
# Append to results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if return_folders or os.path.isfile(fullname):
for pattern in pattern_list:
if fnmatch.fnmatch(name, pattern):
results.append(fullname)
break
# Block recursion if recursion was disallowed
if not recurse:
break
return results
def get_recursive_filelist(args):
"""
Recurse all the files and dirs in *args* ignoring symbolic links
and return the files as a list of strings
"""
files = []
for arg in args:
if os.path.isfile(arg):
files.append(arg)
continue
if os.path.isdir(arg):
newfiles = listFiles(arg, recurse=1, return_folders=1)
files.extend(newfiles)
return [f for f in files if not os.path.islink(f)]
def pieces(seq, num=2):
"Break up the *seq* into *num* tuples"
start = 0
while 1:
item = seq[start:start + num]
if not len(item):
break
yield item
start += num
def exception_to_str(s=None):
if six.PY3:
sh = io.StringIO()
else:
sh = io.BytesIO()
if s is not None:
print(s, file=sh)
traceback.print_exc(file=sh)
return sh.getvalue()
def allequal(seq):
"""
Return *True* if all elements of *seq* compare equal. If *seq* is
0 or 1 length, return *True*
"""
if len(seq) < 2:
return True
val = seq[0]
for i in xrange(1, len(seq)):
thisval = seq[i]
if thisval != val:
return False
return True
def alltrue(seq):
"""
Return *True* if all elements of *seq* evaluate to *True*. If
*seq* is empty, return *False*.
"""
if not len(seq):
return False
for val in seq:
if not val:
return False
return True
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq):
return False
for val in seq:
if val:
return True
return False
def allpairs(x):
"""
return all possible pairs in sequence *x*
Condensed by Alex Martelli from this thread_ on c.l.python
.. _thread: http://groups.google.com/groups?q=all+pairs+group:*python*&hl=en&lr=&ie=UTF-8&selm=mailman.4028.1096403649.5135.python-list%40python.org&rnum=1
"""
return [(s, f) for i, f in enumerate(x) for s in x[i + 1:]]
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to contrain size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if k not in self:
if len(self) >= self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
self._killkeys.append(k)
dict.__setitem__(self, k, v)
class Stack(object):
"""
Implement a stack where elements can be pushed on and you can move
back and forth. But no pop. Should mimic home / back / forward
in a browser
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
'return the current element, or None'
if not len(self._elements):
return self._default
else:
return self._elements[self._pos]
def __len__(self):
return self._elements.__len__()
def __getitem__(self, ind):
return self._elements.__getitem__(ind)
def forward(self):
'move the position forward and return the current element'
N = len(self._elements)
if self._pos < N - 1:
self._pos += 1
return self()
def back(self):
'move the position back and return the current element'
if self._pos > 0:
self._pos -= 1
return self()
def push(self, o):
"""
push object onto stack at current position - all elements
occurring later than the current position are discarded
"""
self._elements = self._elements[:self._pos + 1]
self._elements.append(o)
self._pos = len(self._elements) - 1
return self()
def home(self):
'push the first element onto the top of the stack'
if not len(self._elements):
return
self.push(self._elements[0])
return self()
def empty(self):
return len(self._elements) == 0
def clear(self):
'empty the stack'
self._pos = -1
self._elements = []
def bubble(self, o):
"""
raise *o* to the top of the stack and return *o*. *o* must be
in the stack
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso == o:
bubbles.append(thiso)
else:
self.push(thiso)
for thiso in bubbles:
self.push(o)
return o
def remove(self, o):
'remove element *o* from the stack'
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso == o:
continue
else:
self.push(thiso)
def popall(seq):
'empty a list'
for i in xrange(len(seq)):
seq.pop()
def finddir(o, match, case=False):
"""
return all attributes of *o* which match string in match. if case
is True require an exact case match.
"""
if case:
names = [(name, name) for name in dir(o) if is_string_like(name)]
else:
names = [(name.lower(), name) for name in dir(o)
if is_string_like(name)]
match = match.lower()
return [orig for name, orig in names if name.find(match) >= 0]
def reverse_dict(d):
'reverse the dictionary -- may lose data if values are not unique!'
return dict([(v, k) for k, v in six.iteritems(d)])
def restrict_dict(d, keys):
"""
Return a dictionary that contains those keys that appear in both
d and keys, with values from d.
"""
return dict([(k, v) for (k, v) in six.iteritems(d) if k in keys])
def report_memory(i=0): # argument may go away
'return the memory consumed by process'
from matplotlib.compat.subprocess import Popen, PIPE
pid = os.getpid()
if sys.platform == 'sunos5':
try:
a2 = Popen('ps -p %d -o osz' % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Sun OS only if "
"the 'ps' program is found")
mem = int(a2[-1].strip())
elif sys.platform.startswith('linux'):
try:
a2 = Popen('ps -p %d -o rss,sz' % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Linux only if "
"the 'ps' program is found")
mem = int(a2[1].split()[1])
elif sys.platform.startswith('darwin'):
try:
a2 = Popen('ps -p %d -o rss,vsz' % pid, shell=True,
stdout=PIPE).stdout.readlines()
except OSError:
raise NotImplementedError(
"report_memory works on Mac OS only if "
"the 'ps' program is found")
mem = int(a2[1].split()[0])
elif sys.platform.startswith('win'):
try:
a2 = Popen(["tasklist", "/nh", "/fi", "pid eq %d" % pid],
stdout=PIPE).stdout.read()
except OSError:
raise NotImplementedError(
"report_memory works on Windows only if "
"the 'tasklist' program is found")
mem = int(a2.strip().split()[-2].replace(',', ''))
else:
raise NotImplementedError(
"We don't have a memory monitor for %s" % sys.platform)
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
def safezip(*args):
'make sure *args* are equal len before zipping'
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i + 1, len(arg)))
return list(zip(*args))
def issubclass_safe(x, klass):
'return issubclass(x, klass) and return False on a TypeError'
try:
return issubclass(x, klass)
except TypeError:
return False
def safe_masked_invalid(x):
x = np.asanyarray(x)
try:
xm = np.ma.masked_invalid(x, copy=False)
xm.shrink_mask()
except TypeError:
return x
return xm
class MemoryMonitor:
def __init__(self, nmax=20000):
self._nmax = nmax
self._mem = np.zeros((self._nmax,), np.int32)
self.clear()
def clear(self):
self._n = 0
self._overflow = False
def __call__(self):
mem = report_memory()
if self._n < self._nmax:
self._mem[self._n] = mem
self._n += 1
else:
self._overflow = True
return mem
def report(self, segments=4):
n = self._n
segments = min(n, segments)
dn = int(n / segments)
ii = list(xrange(0, n, dn))
ii[-1] = n - 1
print()
print('memory report: i, mem, dmem, dmem/nloops')
print(0, self._mem[0])
for i in range(1, len(ii)):
di = ii[i] - ii[i - 1]
if di == 0:
continue
dm = self._mem[ii[i]] - self._mem[ii[i - 1]]
print('%5d %5d %3d %8.3f' % (ii[i], self._mem[ii[i]],
dm, dm / float(di)))
if self._overflow:
print("Warning: array size was too small for the number of calls.")
def xy(self, i0=0, isub=1):
x = np.arange(i0, self._n, isub)
return x, self._mem[i0:self._n:isub]
def plot(self, i0=0, isub=1, fig=None):
if fig is None:
from .pylab import figure
fig = figure()
ax = fig.add_subplot(111)
ax.plot(*self.xy(i0, isub))
fig.canvas.draw()
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
from types import FrameType
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
if isinstance(step, dict):
for key, val in six.iteritems(step):
if val is next:
outstream.write("[%s]" % repr(key))
break
if key is next:
outstream.write("[key] = %s" % repr(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write("Examining: %r\n" % (obj,))
recurse(obj, obj, {}, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retreived by
using the object as an iterator.
The objects being joined must be hashable and weak-referenceable.
For example:
>>> from matplotlib.cbook import Grouper
>>> class Foo(object):
... def __init__(self, s):
... self.s = s
... def __repr__(self):
... return self.s
...
>>> a, b, c, d, e, f = [Foo(x) for x in 'abcdef']
>>> grp = Grouper()
>>> grp.join(a, b)
>>> grp.join(b, c)
>>> grp.join(d, e)
>>> sorted(map(tuple, grp))
[(a, b, c), (d, e)]
>>> grp.joined(a, b)
True
>>> grp.joined(a, c)
True
>>> grp.joined(a, d)
False
"""
def __init__(self, init=[]):
mapping = self._mapping = {}
for x in init:
mapping[ref(x)] = [ref(x)]
def __contains__(self, item):
return ref(item) in self._mapping
def clean(self):
"""
Clean dead weak references from the dictionary
"""
mapping = self._mapping
to_drop = [key for key in mapping if key() is None]
for key in to_drop:
val = mapping.pop(key)
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more
arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(ref(a), [ref(a)])
for arg in args:
set_b = mapping.get(ref(arg))
if set_b is None:
set_a.append(ref(arg))
mapping[ref(arg)] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""
Returns True if *a* and *b* are members of the same set.
"""
self.clean()
mapping = self._mapping
try:
return mapping[ref(a)] is mapping[ref(b)]
except KeyError:
return False
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
class Token:
pass
token = Token()
# Mark each group as we come across if by appending a token,
# and don't yield it twice
for group in six.itervalues(self._mapping):
if not group[-1] is token:
yield [x() for x in group]
group.append(token)
# Cleanup the tokens
for group in six.itervalues(self._mapping):
if group[-1] is token:
del group[-1]
def get_siblings(self, a):
"""
Returns all of the items joined with *a*, including itself.
"""
self.clean()
siblings = self._mapping.get(ref(a), [ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
if steps == 1:
return a
steps = int(np.floor(steps))
new_length = ((len(a) - 1) * steps) + 1
new_shape = list(a.shape)
new_shape[0] = new_length
result = np.zeros(new_shape, a.dtype)
result[0] = a[0]
a0 = a[0:-1]
a1 = a[1:]
delta = ((a1 - a0) / steps)
for i in range(1, steps):
result[i::steps] = delta * i + a0
result[steps::steps] = a1
return result
def recursive_remove(path):
if os.path.isdir(path):
for fname in (glob.glob(os.path.join(path, '*')) +
glob.glob(os.path.join(path, '.*'))):
if os.path.isdir(fname):
recursive_remove(fname)
os.removedirs(fname)
else:
os.remove(fname)
#os.removedirs(path)
else:
os.remove(path)
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if (is_string_like(args[0]) or not iterable(args[0])):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if (not is_string_like(x)) and iterable(x) and len(x) == nrecs:
seqlist[i] = True
if ma.isMA(x):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if ma.isMA(x):
masks.append(~ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except: # Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = reduce(np.logical_and, masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x.take(igood, axis=0)
for i, x in enumerate(margs):
if seqlist[i] and ma.isMA(x):
margs[i] = x.filled()
return margs
def boxplot_stats(X, whis=1.5, bootstrap=None, labels=None):
'''
Returns list of dictionaries of staticists to be use to draw a series of
box and whisker plots. See the `Returns` section below to the required
keys of the dictionary. Users can skip this function and pass a user-
defined set of dictionaries to the new `axes.bxp` method instead of
relying on MPL to do the calcs.
Parameters
----------
X : array-like
Data that will be represented in the boxplots. Should have 2 or fewer
dimensions.
whis : float, string, or sequence (default = 1.5)
As a float, determines the reach of the whiskers past the first and
third quartiles (e.g., Q3 + whis*IQR, QR = interquartile range, Q3-Q1).
Beyond the whiskers, data are considered outliers and are plotted as
individual points. Set this to an unreasonably high value to force the
whiskers to show the min and max data. Alternatively, set this to an
ascending sequence of percentile (e.g., [5, 95]) to set the whiskers
at specific percentiles of the data. Finally, can `whis` be the
string 'range' to force the whiskers to the min and max of the data.
In the edge case that the 25th and 75th percentiles are equivalent,
`whis` will be automatically set to 'range'
bootstrap : int or None (default)
Number of times the confidence intervals around the median should
be bootstrapped (percentile method).
labels : sequence
Labels for each dataset. Length must be compatible with dimensions
of `X`
Returns
-------
bxpstats : list of dict
A list of dictionaries containing the results for each column
of data. Keys of each dictionary are the following:
======== ===================================
Key Value Description
======== ===================================
label tick label for the boxplot
mean arithemetic mean value
med 50th percentile
q1 first quartile (25th percentile)
q3 third quartile (75th percentile)
cilo lower notch around the median
cihi upper notch around the median
whislo end of the lower whisker
whishi end of the upper whisker
fliers outliers
======== ===================================
Notes
-----
Non-bootstrapping approach to confidence interval uses Gaussian-based
asymptotic approximation:
.. math::
\mathrm{med} \pm 1.57 \\times \\frac{\mathrm{iqr}}{\sqrt{N}}
General approach from:
McGill, R., Tukey, J.W., and Larsen, W.A. (1978) "Variations of
Boxplots", The American Statistician, 32:12-16.
'''
def _bootstrap_median(data, N=5000):
# determine 95% confidence intervals of the median
M = len(data)
percentiles = [2.5, 97.5]
ii = np.random.randint(M, size=(N, M))
bsData = x[ii]
estimate = np.median(bsData, axis=1, overwrite_input=True)
CI = np.percentile(estimate, percentiles)
return CI
def _compute_conf_interval(data, med, iqr, bootstrap):
if bootstrap is not None:
# Do a bootstrap estimate of notch locations.
# get conf. intervals around median
CI = _bootstrap_median(data, N=bootstrap)
notch_min = CI[0]
notch_max = CI[1]
else:
N = len(data)
notch_min = med - 1.57 * iqr / np.sqrt(N)
notch_max = med + 1.57 * iqr / np.sqrt(N)
return notch_min, notch_max
# output is a list of dicts
bxpstats = []
# convert X to a list of lists
X = _reshape_2D(X)
ncols = len(X)
if labels is None:
labels = repeat(None)
elif len(labels) != ncols:
raise ValueError("Dimensions of labels and X must be compatible")
input_whis = whis
for ii, (x, label) in enumerate(zip(X, labels), start=0):
# empty dict
stats = {}
if label is not None:
stats['label'] = label
# restore whis to the input values in case it got changed in the loop
whis = input_whis
# note tricksyness, append up here and then mutate below
bxpstats.append(stats)
# if empty, bail
if len(x) == 0:
stats['fliers'] = np.array([])
stats['mean'] = np.nan
stats['med'] = np.nan
stats['q1'] = np.nan
stats['q3'] = np.nan
stats['cilo'] = np.nan
stats['cihi'] = np.nan
stats['whislo'] = np.nan
stats['whishi'] = np.nan
stats['med'] = np.nan
continue
# up-convert to an array, just to be safe
x = np.asarray(x)
# arithmetic mean
stats['mean'] = np.mean(x)
# medians and quartiles
q1, med, q3 = np.percentile(x, [25, 50, 75])
# interquartile range
stats['iqr'] = q3 - q1
if stats['iqr'] == 0:
whis = 'range'
# conf. interval around median
stats['cilo'], stats['cihi'] = _compute_conf_interval(
x, med, stats['iqr'], bootstrap
)
# lowest/highest non-outliers
if np.isscalar(whis):
if np.isreal(whis):
loval = q1 - whis * stats['iqr']
hival = q3 + whis * stats['iqr']
elif whis in ['range', 'limit', 'limits', 'min/max']:
loval = np.min(x)
hival = np.max(x)
else:
whismsg = ('whis must be a float, valid string, or '
'list of percentiles')
raise ValueError(whismsg)
else:
loval = np.percentile(x, whis[0])
hival = np.percentile(x, whis[1])
# get high extreme
wiskhi = np.compress(x <= hival, x)
if len(wiskhi) == 0 or np.max(wiskhi) < q3:
stats['whishi'] = q3
else:
stats['whishi'] = np.max(wiskhi)
# get low extreme
wisklo = np.compress(x >= loval, x)
if len(wisklo) == 0 or np.min(wisklo) > q1:
stats['whislo'] = q1
else:
stats['whislo'] = np.min(wisklo)
# compute a single array of outliers
stats['fliers'] = np.hstack([
np.compress(x < stats['whislo'], x),
np.compress(x > stats['whishi'], x)
])
# add in the remaining stats
stats['q1'], stats['med'], stats['q3'] = q1, med, q3
return bxpstats
# FIXME I don't think this is used anywhere
def unmasked_index_ranges(mask, compressed=True):
"""
Find index ranges where *mask* is *False*.
*mask* will be flattened if it is not already 1-D.
Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
indices for slices of the compressed :class:`numpy.ndarray`
corresponding to each of *N* uninterrupted runs of unmasked
values. If optional argument *compressed* is *False*, it returns
the start and stop indices into the original :class:`numpy.ndarray`,
not the compressed :class:`numpy.ndarray`. Returns *None* if there
are no unmasked values.
Example::
y = ma.array(np.arange(5), mask = [0,0,1,0,0])
ii = unmasked_index_ranges(ma.getmaskarray(y))
# returns array [[0,2,] [2,4,]]
y.compressed()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
# returns array [[0, 2], [3, 5]]
y.filled()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
Prior to the transforms refactoring, this was used to support
masked arrays in Line2D.
"""
mask = mask.reshape(mask.size)
m = np.concatenate(((1,), mask, (1,)))
indices = np.arange(len(mask) + 1)
mdif = m[1:] - m[:-1]
i0 = np.compress(mdif == -1, indices)
i1 = np.compress(mdif == 1, indices)
assert len(i0) == len(i1)
if len(i1) == 0:
return None # Maybe this should be np.zeros((0,2), dtype=int)
if not compressed:
return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
seglengths = i1 - i0
breakpoints = np.cumsum(seglengths)
ic0 = np.concatenate(((0,), breakpoints[:-1]))
ic1 = breakpoints
return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
# a dict to cross-map linestyle arguments
_linestyles = [('-', 'solid'),
('--', 'dashed'),
('-.', 'dashdot'),
(':', 'dotted')]
ls_mapper = dict(_linestyles)
ls_mapper.update([(ls[1], ls[0]) for ls in _linestyles])
def align_iterators(func, *iterables):
"""
This generator takes a bunch of iterables that are ordered by func
It sends out ordered tuples::
(func(row), [rows from all iterators matching func(row)])
It is used by :func:`matplotlib.mlab.recs_join` to join record arrays
"""
class myiter:
def __init__(self, it):
self.it = it
self.key = self.value = None
self.iternext()
def iternext(self):
try:
self.value = next(self.it)
self.key = func(self.value)
except StopIteration:
self.value = self.key = None
def __call__(self, key):
retval = None
if key == self.key:
retval = self.value
self.iternext()
elif self.key and key > self.key:
raise ValueError("Iterator has been left behind")
return retval
# This can be made more efficient by not computing the minimum key for each
# iteration
iters = [myiter(it) for it in iterables]
minvals = minkey = True
while 1:
minvals = ([_f for _f in [it.key for it in iters] if _f])
if minvals:
minkey = min(minvals)
yield (minkey, [it(minkey) for it in iters])
else:
break
def is_math_text(s):
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
try:
s = six.text_type(s)
except UnicodeDecodeError:
raise ValueError(
"matplotlib display text must have all code points < 128 or use "
"Unicode strings")
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
return even_dollars
def _reshape_2D(X):
"""
Converts a non-empty list or an ndarray of two or fewer dimensions
into a list of iterable objects so that in
for v in _reshape_2D(X):
v is iterable and can be used to instantiate a 1D array.
"""
if hasattr(X, 'shape'):
# one item
if len(X.shape) == 1:
if hasattr(X[0], 'shape'):
X = list(X)
else:
X = [X, ]
# several items
elif len(X.shape) == 2:
nrows, ncols = X.shape
if nrows == 1:
X = [X]
elif ncols == 1:
X = [X.ravel()]
else:
X = [X[:, i] for i in xrange(ncols)]
else:
raise ValueError("input `X` must have 2 or fewer dimensions")
if not hasattr(X[0], '__len__'):
X = [X]
else:
X = [np.ravel(x) for x in X]
return X
def violin_stats(X, method, points=100):
'''
Returns a list of dictionaries of data which can be used to draw a series
of violin plots. See the `Returns` section below to view the required keys
of the dictionary. Users can skip this function and pass a user-defined set
of dictionaries to the `axes.vplot` method instead of using MPL to do the
calculations.
Parameters
----------
X : array-like
Sample data that will be used to produce the gaussian kernel density
estimates. Must have 2 or fewer dimensions.
method : callable
The method used to calculate the kernel density estimate for each
column of data. When called via `method(v, coords)`, it should
return a vector of the values of the KDE evaluated at the values
specified in coords.
points : scalar, default = 100
Defines the number of points to evaluate each of the gaussian kernel
density estimates at.
Returns
-------
A list of dictionaries containing the results for each column of data.
The dictionaries contain at least the following:
- coords: A list of scalars containing the coordinates this particular
kernel density estimate was evaluated at.
- vals: A list of scalars containing the values of the kernel density
estimate at each of the coordinates given in `coords`.
- mean: The mean value for this column of data.
- median: The median value for this column of data.
- min: The minimum value for this column of data.
- max: The maximum value for this column of data.
'''
# List of dictionaries describing each of the violins.
vpstats = []
# Want X to be a list of data sequences
X = _reshape_2D(X)
for x in X:
# Dictionary of results for this distribution
stats = {}
# Calculate basic stats for the distribution
min_val = np.min(x)
max_val = np.max(x)
# Evaluate the kernel density estimate
coords = np.linspace(min_val, max_val, points)
stats['vals'] = method(x, coords)
stats['coords'] = coords
# Store additional statistics for this distribution
stats['mean'] = np.mean(x)
stats['median'] = np.median(x)
stats['min'] = min_val
stats['max'] = max_val
# Append to output
vpstats.append(stats)
return vpstats
class _NestedClassGetter(object):
# recipe from http://stackoverflow.com/a/11493777/741316
"""
When called with the containing class as the first argument,
and the name of the nested class as the second argument,
returns an instance of the nested class.
"""
def __call__(self, containing_class, class_name):
nested_class = getattr(containing_class, class_name)
# make an instance of a simple object (this one will do), for which we
# can change the __class__ later on.
nested_instance = _NestedClassGetter()
# set the class of the instance, the __init__ will never be called on
# the class but the original state will be set later on by pickle.
nested_instance.__class__ = nested_class
return nested_instance
class _InstanceMethodPickler(object):
"""
Pickle cannot handle instancemethod saving. _InstanceMethodPickler
provides a solution to this.
"""
def __init__(self, instancemethod):
"""Takes an instancemethod as its only argument."""
if six.PY3:
self.parent_obj = instancemethod.__self__
self.instancemethod_name = instancemethod.__func__.__name__
else:
self.parent_obj = instancemethod.im_self
self.instancemethod_name = instancemethod.im_func.__name__
def get_instancemethod(self):
return getattr(self.parent_obj, self.instancemethod_name)
# Numpy > 1.6.x deprecates putmask in favor of the new copyto.
# So long as we support versions 1.6.x and less, we need the
# following local version of putmask. We choose to make a
# local version of putmask rather than of copyto because the
# latter includes more functionality than the former. Therefore
# it is easy to make a local version that gives full putmask
# behavior, but duplicating the full copyto behavior would be
# more difficult.
try:
np.copyto
except AttributeError:
_putmask = np.putmask
else:
def _putmask(a, mask, values):
return np.copyto(a, values, where=mask)
|
mit
|
basnijholt/holoviews
|
holoviews/tests/plotting/matplotlib/testelementplot.py
|
2
|
6569
|
import numpy as np
from holoviews.core.spaces import DynamicMap
from holoviews.element import Image, Curve, Scatter, Scatter3D
from holoviews.streams import Stream
from .testplot import TestMPLPlot, mpl_renderer
try:
from matplotlib.ticker import FormatStrFormatter, FuncFormatter, PercentFormatter
except:
pass
class TestElementPlot(TestMPLPlot):
def test_stream_cleanup(self):
stream = Stream.define(str('Test'), test=1)()
dmap = DynamicMap(lambda test: Curve([]), streams=[stream])
plot = mpl_renderer.get_plot(dmap)
self.assertTrue(bool(stream._subscribers))
plot.cleanup()
self.assertFalse(bool(stream._subscribers))
def test_element_xlabel(self):
element = Curve(range(10)).options(xlabel='custom x-label')
axes = mpl_renderer.get_plot(element).handles['axis']
self.assertEqual(axes.get_xlabel(), 'custom x-label')
def test_element_ylabel(self):
element = Curve(range(10)).options(ylabel='custom y-label')
axes = mpl_renderer.get_plot(element).handles['axis']
self.assertEqual(axes.get_ylabel(), 'custom y-label')
def test_element_xformatter_string(self):
curve = Curve(range(10)).options(xformatter='%d')
plot = mpl_renderer.get_plot(curve)
xaxis = plot.handles['axis'].xaxis
xformatter = xaxis.get_major_formatter()
self.assertIsInstance(xformatter, FormatStrFormatter)
self.assertEqual(xformatter.fmt, '%d')
def test_element_yformatter_string(self):
curve = Curve(range(10)).options(yformatter='%d')
plot = mpl_renderer.get_plot(curve)
yaxis = plot.handles['axis'].yaxis
yformatter = yaxis.get_major_formatter()
self.assertIsInstance(yformatter, FormatStrFormatter)
self.assertEqual(yformatter.fmt, '%d')
def test_element_zformatter_string(self):
curve = Scatter3D([]).options(zformatter='%d')
plot = mpl_renderer.get_plot(curve)
zaxis = plot.handles['axis'].zaxis
zformatter = zaxis.get_major_formatter()
self.assertIsInstance(zformatter, FormatStrFormatter)
self.assertEqual(zformatter.fmt, '%d')
def test_element_xformatter_function(self):
def formatter(value):
return str(value) + ' %'
curve = Curve(range(10)).options(xformatter=formatter)
plot = mpl_renderer.get_plot(curve)
xaxis = plot.handles['axis'].xaxis
xformatter = xaxis.get_major_formatter()
self.assertIsInstance(xformatter, FuncFormatter)
def test_element_yformatter_function(self):
def formatter(value):
return str(value) + ' %'
curve = Curve(range(10)).options(yformatter=formatter)
plot = mpl_renderer.get_plot(curve)
yaxis = plot.handles['axis'].yaxis
yformatter = yaxis.get_major_formatter()
self.assertIsInstance(yformatter, FuncFormatter)
def test_element_zformatter_function(self):
def formatter(value):
return str(value) + ' %'
curve = Scatter3D([]).options(zformatter=formatter)
plot = mpl_renderer.get_plot(curve)
zaxis = plot.handles['axis'].zaxis
zformatter = zaxis.get_major_formatter()
self.assertIsInstance(zformatter, FuncFormatter)
def test_element_xformatter_instance(self):
formatter = PercentFormatter()
curve = Curve(range(10)).options(xformatter=formatter)
plot = mpl_renderer.get_plot(curve)
xaxis = plot.handles['axis'].xaxis
xformatter = xaxis.get_major_formatter()
self.assertIs(xformatter, formatter)
def test_element_yformatter_instance(self):
formatter = PercentFormatter()
curve = Curve(range(10)).options(yformatter=formatter)
plot = mpl_renderer.get_plot(curve)
yaxis = plot.handles['axis'].yaxis
yformatter = yaxis.get_major_formatter()
self.assertIs(yformatter, formatter)
def test_element_zformatter_instance(self):
formatter = PercentFormatter()
curve = Scatter3D([]).options(zformatter=formatter)
plot = mpl_renderer.get_plot(curve)
zaxis = plot.handles['axis'].zaxis
zformatter = zaxis.get_major_formatter()
self.assertIs(zformatter, formatter)
class TestColorbarPlot(TestMPLPlot):
def test_colormapper_unsigned_int(self):
img = Image(np.array([[1, 1, 1, 2], [2, 2, 3, 4]]).astype('uint16'))
plot = mpl_renderer.get_plot(img)
artist = plot.handles['artist']
self.assertEqual(artist.get_clim(), (1, 4))
def test_colormapper_symmetric(self):
img = Image(np.array([[0, 1], [2, 3]])).options(symmetric=True)
plot = mpl_renderer.get_plot(img)
artist = plot.handles['artist']
self.assertEqual(artist.get_clim(), (-3, 3))
def test_colormapper_clims(self):
img = Image(np.array([[0, 1], [2, 3]])).options(clims=(0, 4))
plot = mpl_renderer.get_plot(img)
artist = plot.handles['artist']
self.assertEqual(artist.get_clim(), (0, 4))
def test_colormapper_color_levels(self):
img = Image(np.array([[0, 1], [2, 3]])).options(color_levels=5)
plot = mpl_renderer.get_plot(img)
artist = plot.handles['artist']
self.assertEqual(len(artist.cmap.colors), 5)
def test_colormapper_transparent_nan(self):
img = Image(np.array([[0, 1], [2, 3]])).options(clipping_colors={'NaN': 'transparent'})
plot = mpl_renderer.get_plot(img)
cmap = plot.handles['artist'].cmap
self.assertEqual(cmap._rgba_bad, (1.0, 1.0, 1.0, 0))
def test_colormapper_min_max_colors(self):
img = Image(np.array([[0, 1], [2, 3]])).options(clipping_colors={'min': 'red', 'max': 'blue'})
plot = mpl_renderer.get_plot(img)
cmap = plot.handles['artist'].cmap
self.assertEqual(cmap._rgba_under, (1.0, 0, 0, 1))
self.assertEqual(cmap._rgba_over, (0, 0, 1.0, 1))
def test_colorbar_label(self):
scatter = Scatter(np.random.rand(100, 3), vdims=["y", "c"]).options(color_index=2, colorbar=True)
plot = mpl_renderer.get_plot(scatter)
cbar_ax = plot.handles['cax']
self.assertEqual(cbar_ax.get_ylabel(), 'c')
def test_colorbar_label_style_mapping(self):
scatter = Scatter(np.random.rand(100, 3), vdims=["y", "color"]).options(color='color', colorbar=True)
plot = mpl_renderer.get_plot(scatter)
cbar_ax = plot.handles['cax']
self.assertEqual(cbar_ax.get_ylabel(), 'color')
|
bsd-3-clause
|
assamite/creamas
|
examples/spiro/spiro_agent.py
|
1
|
29872
|
"""
.. py:module:: spiro_agent
:platform: Unix
Agent that creates spirographs and evaluates them by their novelty as explained
in:
Linkola, S., Takala, T., and Toivonen, H. 2016. Novelty-Seeking Multi-Agent
Systems. In The Proceedings of The Seventh International Conference on
Computational Creativity (ICCC2016), 1-8. Paris, France. Sony CSL Paris,
France.
.. note::
This example is research code and provided as is. Therefore, the quality
of the code and its documentation is insufficient.
"""
import os
import sys
import time
from collections import Counter
import functools
import logging
import operator
from random import shuffle
import numpy as np
from scipy import ndimage, misc
from creamas import Artifact
from creamas.math import gaus_pdf
from creamas.vote import VoteAgent, VoteEnvironment, VoteOrganizer, vote_mean
from spiro import give_dots, give_dots_yield, spiro_image
class SpiroAgent(VoteAgent):
"""Agent that creates spirographs and evaluates them with short term memory
(``STMemory``) learned from previously seen spirographs.
"""
def __init__(self, environment, desired_novelty, search_width=10,
img_size=32, log_folder=None, log_level=logging.DEBUG,
memsize=36, learning_method='closest', learning_amount=3,
learn_on_add=True, veto_threshold=0.10,
critic_threshold=0.10, jump='none', move_radius=10.0):
"""
:param environment:
The environment for the agent.
:param desired_novelty:
Agent's desired novelty, if maximizing novelty use -1.
:param search_width:
The number of new spirographs agent creates per simulation
iteration. Defaults to 10.
:param img_size:
Preferred side length for the generated spirograph images. Defaults
to 32.
:param log_folder:
Logging folder for the agent, if not given the logging folder is
generated via standard means.
:param log_level:
Logging level for the agent. Defaults to DEBUG.
:param memsize:
Size of the agent's short term memory. Defaults to 36
:param learning_method:
Method for agent to learn from the artifacts already in the domain.
Should be one of the following: 'closet', 'random' or 'none.
Defaults to 'closest'.
:param learning_amount:
The number of the domain artifacts learned per iterations. Defaults
to 3.
:param learn_on_add:
Learn new domain artifacts when they are added. Defaults to 'True'.
:param veto_threshold:
Threshold by which the agent rejects artifacts generated by other
agents. Should be a value in [0, 1], values that perform well are
in [0.06, 0.16]. Defaults to 0.10.
:param critic_threshold:
Threshold by which the agent rejects its own artifacts. Should be
a value in [0, 1], values that perform well are in [0.06, 0.16].
Defaults to 0.10.
:param jump:
Jump to a location of other agent's artifact if agent itself has
not been able to generate artifact that passed its own
``critic_threshold`` in the last iteration. Should be either
'random' or 'none'. Defaults to 'none'.
:param move_radius:
The standard deviation for agent's movement, i.e. from how large
area new parameters for the spirograph generation are considered
given the agent's current position in the parameter space. Defaults
to 10.0.
"""
# Call first the constructor of the super class
super().__init__(environment, log_folder=log_folder,
log_level=log_level)
self.name = "{}_N{}".format(self.name, desired_novelty)
self.spiro_args = np.random.uniform(-199, 199, [2,])
# How many spirographs are generated to find the best one per iteration.
self.search_width = search_width
self.teaching_iterations = 1
self.img_size = img_size
self.desired_novelty = desired_novelty
#init_func = functools.partial(np.random.normal, 0.9, 0.4)
#self.stmem = ImageSOM(6, 6, self.img_size**2, init_func, coef=0.01)
self.stmem = STMemory(length=memsize)
self.env_learn_on_add = learn_on_add
self.env_learning_method = learning_method
self.env_learning_amount = learning_amount
self._save_images = False
self._novelty_threshold = veto_threshold
self._own_threshold = critic_threshold
self.added_last = False
self.jump = jump
self.move_radius = move_radius
self.arg_history = []
self.age = 0
def create(self, r, r_, R=200):
"""Create new spirograph image with given arguments. Returned image is
scaled to agent's preferred image size.
"""
x, y = give_dots(R, r, r_, spins=20)
xy = np.array([x, y]).T
xy = np.array(np.around(xy), dtype=np.int64)
xy = xy[(xy[:, 0] >= -250) & (xy[:, 1] >= -250) &
(xy[:, 0] < 250) & (xy[:, 1] < 250)]
xy = xy + 250
img = np.ones([500, 500], dtype=np.uint8)
img[:] = 255
img[xy[:, 0], xy[:, 1]] = 0
img = misc.imresize(img, [self.img_size, self.img_size])
fimg = img / 255.0
return fimg
def randomize_args(self):
"""Get new parameters for spirograph generation near agent's current
location (*spiro_args*).
"""
args = self.spiro_args + np.random.normal(0, self.move_radius,
self.spiro_args.shape)
np.clip(args, -199, 199, args)
while args[0] == 0 or args[1] == 0:
args = self.spiro_args + np.random.normal(0, self.move_radius,
self.spiro_args.shape)
np.clip(args, -199, 199, args)
return args
def hedonic_value(self, novelty):
"""Given the agent's desired novelty, how good the novelty value is.
Not used if *desired_novelty*=-1
"""
lmax = gaus_pdf(self.desired_novelty, self.desired_novelty, 4)
pdf = gaus_pdf(novelty, self.desired_novelty, 4)
return pdf / lmax
def novelty(self, img):
"""Image's distance to the agent's short-term memory. Usually distance
to the closest object/prototypical object model in the memory.
"""
dist = self.stmem.distance(img.flatten())
return dist
def evaluate(self, artifact):
"""Evaluate the artifact with respect to the agents short term memory.
Returns value in [0, 1].
"""
if self.desired_novelty > 0:
return self.hedonic_value(self.novelty(artifact.obj))
return self.novelty(artifact.obj) / self.img_size, None
def invent(self, n):
"""Invent new spirograph by taking n random steps from current position
(spirograph generation parameters) and selecting the best one based
on the agent's evaluation (hedonic function).
:param int n: how many spirographs are created for evaluation
:returns: Best created artifact.
:rtype: :py:class:`~creamas.core.agent.Artifact`
"""
args = self.randomize_args()
img = self.create(args[0], args[1])
best_artifact = SpiroArtifact(self, img, domain='image')
ev, _ = self.evaluate(best_artifact)
best_artifact.add_eval(self, ev, fr={'args': args})
for i in range(n-1):
args = self.randomize_args()
img = self.create(args[0], args[1])
artifact = SpiroArtifact(self, img, domain='image')
ev, _ = self.evaluate(artifact)
artifact.add_eval(self, ev, fr={'args': args})
if ev > best_artifact.evals[self.name]:
best_artifact = artifact
self.spiro_args = best_artifact.framings[self.name]['args']
best_artifact.in_domain = False
best_artifact.self_criticism = 'reject'
best_artifact.creation_time = self.age
return best_artifact
async def act(self):
"""Agent's main method to create new spirographs.
See Simulation and CreativeAgent documentation for details.
"""
# Learn from domain artifacts.
self.age += 1
self.added_last = False
self.learn_from_domain(method=self.env_learning_method,
amount=self.env_learning_amount)
# Invent new artifact
artifact = self.invent(self.search_width)
args = artifact.framings[self.name]['args']
val = artifact.evals[self.name]
self._log(logging.DEBUG, "Created spirograph with args={}, val={}"
.format(args, val))
self.spiro_args = args
self.arg_history.append(self.spiro_args)
self.add_artifact(artifact)
if val >= self._own_threshold:
artifact.self_criticism = 'pass'
# Train SOM with the invented artifact
self.learn(artifact, self.teaching_iterations)
# Save images if logger is defined
# Add created artifact to voting candidates in the environment
self.add_candidate(artifact)
self.added_last = True
elif self.jump == 'random':
largs = self.spiro_args
self.spiro_args = np.random.uniform(-199, 199,
self.spiro_args.shape)
self._log(logging.DEBUG, "Jumped from {} to {}"
.format(largs, self.spiro_args))
self.save_images(artifact)
def learn_from_domain(self, method='random', amount=10):
"""Learn SOM from artifacts introduced to the environment.
:param str method:
learning method, should be either 'random' or 'closest', where
'random' chooses **amount** random artifacts, and 'closest' samples
closest artifacts based on spirograph generation artifacts.
:param int amount:
Maximum amount of artifacts sampled
:param bool last:
Learn from last domain artifact in any case
"""
if method == 'none':
return
arts = self.env.artifacts
if len(arts) == 0:
return
if 'random' in method:
samples = min(len(arts), amount)
ars = np.random.choice(arts, samples, replace=False)
for a in ars:
self.learn(a, self.teaching_iterations)
if 'closest' in method:
ars = arts
dists = []
for a in ars:
args = a.framings[a.creator]['args']
d = np.sqrt(np.sum(np.square(args - self.spiro_args)))
dists.append((d,a))
dists.sort(key=operator.itemgetter(0))
for d,a in dists[:amount]:
self.learn(a, self.teaching_iterations)
def learn(self, spiro, iterations=1):
"""Train short term memory with given spirograph.
:param spiro:
:py:class:`SpiroArtifact` object
"""
for i in range(iterations):
self.stmem.train_cycle(spiro.obj.flatten())
async def domain_artifact_added(self, spiro, iterations=1):
if spiro.creator == self.name:
for a in self.A:
if a == spiro:
a.in_domain = True
self.save_images(a)
if self.env_learn_on_add:
self.learn(spiro)
def validate(self, candidates):
besteval = 0.0
bestcand = None
valid = []
for c in candidates:
if c.creator != self.name:
ceval, _ = self.evaluate(c)
if ceval >= self._novelty_threshold:
valid.append(c)
if ceval > besteval:
besteval = ceval
bestcand = c
else:
valid.append(c)
if self.jump == 'best':
if bestcand is not None and not self.added_last:
largs = self.spiro_args
self.spiro_args = bestcand.framings[bestcand.creator]['args']
self._log(logging.INFO,
"Jumped from {} to {}".format(largs, self.spiro_args))
return valid
def save_images(self, artifact):
if not self._save_images:
return
img = artifact.obj
sc = artifact.self_criticism
domain = artifact.in_domain
ctime = artifact.creation_time
if self.logger is not None:
im_name = '{}_N{}_{:0>4}_sc={}_d={}.png'.format(self.sanitized_name(), self.desired_novelty,
ctime, sc, domain)
path = os.path.join(self.logger.folder, im_name)
misc.imsave(path, img)
def _artifact_distances(self):
accepted = [a for a in self.A if a.self_criticism == 'pass']
accepted = sorted(accepted, key=lambda x: x.creation_time)
distances = []
indeces = []
for i,a1 in enumerate(accepted[1:]):
spiro1 = a1.obj
j = i+1
mdist = np.sqrt(spiro1.flatten().shape[0])
for a2 in accepted[:j]:
spiro2 = a2.obj
dist = np.sqrt(np.sum(np.square(spiro1.flatten() - spiro2.flatten())))
if dist < mdist:
mdist = dist
distances.append(mdist)
indeces.append(i)
mean_dist = np.mean(distances)
return mean_dist, distances, indeces
def plot_distances(self, mean_dist, distances, indeces):
"""Plot distances of the generated spirographs w.r.t. the previously
generated spirogaphs.
"""
from matplotlib import pyplot as plt
x = np.arange(len(distances))
y = [mean_dist for i in x]
fig, ax = plt.subplots()
data_line = ax.plot(indeces, distances, label='Min Distance to previous',
marker='.', color='black', linestyle="")
mean_line = ax.plot(indeces, y, label='Mean', linestyle='--', color='green')
if len(distances) > 0:
z = np.poly1d(np.polyfit(x,distances,2))
f = [z(i) for i in x]
mean_line = ax.plot(indeces, f, label='Fitted', linestyle='-', color='red')
legend = ax.legend(loc='upper right', prop={'size':8})
agent_vars = "{}_{}_{}{}_last={}_stmem=list{}_veto={}_sc={}_jump={}_sw={}_mr={}_maxN".format(
self.sanitized_name(), self.age, self.env_learning_method, self.env_learning_amount, self.env_learn_on_add,
self.stmem.length, self._novelty_threshold, self._own_threshold,
self.jump, self.search_width, self.move_radius)
ax.set_title("{} min distances: env_learn={} {}"
.format(self.name, self.env_learning_method,
self.env_learning_amount))
ax.set_ylabel('min distance to preceding artifact')
ax.set_xlabel('iteration')
if self.logger is not None:
imname = os.path.join(self.logger.folder, '{}_dists.png'.format(agent_vars))
plt.savefig(imname)
plt.close()
else:
plt.show()
def plot_places(self):
"""Plot places where the agent has been and generated a spirograph.
"""
from matplotlib import pyplot as plt
fig, ax = plt.subplots()
x = []
y = []
if len(self.arg_history) > 1:
xs = []
ys = []
for p in self.arg_history:
xs.append(p[0])
ys.append(p[1])
ax.plot(xs, ys, color=(0.0, 0.0, 1.0, 0.1))
for a in self.A:
if a.self_criticism == 'pass':
args = a.framings[a.creator]['args']
x.append(args[0])
y.append(args[1])
sc = ax.scatter(x, y, marker="x", color='red')
ax.set_xlim([-200, 200])
ax.set_ylim([-200, 200])
agent_vars = "{}_{}_{}{}_last={}_stmem=list{}_veto={}_sc={}_jump={}_sw={}_mr={}_maxN".format(
self.sanitized_name(), self.age, self.env_learning_method, self.env_learning_amount, self.env_learn_on_add,
self.stmem.length, self._novelty_threshold, self._own_threshold,
self.jump, self.search_width, self.move_radius)
if self.logger is not None:
imname = os.path.join(self.logger.folder, '{}.png'.format(agent_vars))
plt.savefig(imname)
plt.close()
fname = os.path.join(self.logger.folder, '{}.txt'.format(agent_vars))
with open(fname, "w") as f:
f.write(" ".join([str(e) for e in xs]))
f.write("\n")
f.write(" ".join([str(e) for e in ys]))
f.write("\n")
f.write(" ".join([str(e) for e in x]))
f.write("\n")
f.write(" ".join([str(e) for e in y]))
f.write("\n")
else:
plt.show()
def close(self, folder):
mean_dist, dists, indeces = self._artifact_distances()
if len(dists) == 0:
mean_dist = 0.0
self._log(logging.INFO, "Mean of distances: {}".format(mean_dist))
self.plot_distances(mean_dist, dists, indeces)
self.plot_places()
return mean_dist
class SpiroArtifact(Artifact):
"""Artifact class for Spirographs.
"""
def __str__(self):
return "Spirograph by: {} {}".format(self.creator,
self.framings[self.creator])
def __repr__(self):
return self.__str__()
def __lt__(self, other):
return str(self) < str(other)
def __eq__(self, other):
return hash(str(self)) == hash(str(other))
def __hash__(self):
return hash(str(self))
class SpiroEnvironment(VoteEnvironment):
"""Environment for agents creating spirographs.
"""
def __init__(self, base_url, clock, connect_kwargs):
super().__init__(base_url, clock, connect_kwargs)
self.save_image_number = 1
self.img_size = 32
self.age = 0
self.voting_method = vote_mean
self.valid_cand = []
self.suggested_cand = []
logger = logging.getLogger('creamas.spiro.vo')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
self.vote_organizer = VoteOrganizer(self, logger=logger)
def vote_and_save_info(self, age):
self.age = age
self.vote_organizer.gather_candidates()
self.suggested_cand.append(len(self.vote_organizer.candidates))
self.vote_organizer.validate_candidates()
self.valid_cand.append(len(self.vote_organizer.candidates))
self.vote_organizer.gather_votes()
artifacts = self.vote_organizer.compute_results(voting_method=self.voting_method)
threshold = 0.0
for a,v in artifacts:
accepted = True if v >= threshold else False
a.accepted = accepted
self.add_artifact(a)
for agent in self.get_agents(addr=False):
agent.domain_artifact_added(a)
self.vote_organizer.clear_candidates()
self.valid_candidates = []
def _calc_distances(self):
accepted_x = []
accepted_y = []
rejected_x = []
rejected_y = []
sort_arts = sorted(self.artifacts, key=lambda x: x.env_time)
distances = []
for i,a1 in enumerate(sort_arts[1:]):
spiro1 = a1.obj
i = i+1
mdist = np.sqrt(spiro1.flatten().shape[0])
for a2 in sort_arts[:i]:
spiro2 = a2.obj
dist = np.sqrt(np.sum(np.square(spiro1.flatten() - spiro2.flatten())))
if dist < mdist:
mdist = dist
if a1.accepted:
accepted_x.append(a1.env_time)
accepted_y.append(mdist)
else:
rejected_x.append(a1.env_time)
rejected_y.append(mdist)
mean_dist = np.mean(accepted_y)
self._log(logging.INFO, "Mean of (accepted) distances: {}".format(mean_dist))
return mean_dist, (accepted_x, accepted_y), (rejected_x, rejected_y)
def save_info(self, folder, ameans):
mean_dist, accs, rejs = self._calc_distances()
fitted_curve = None
axs, adists = accs
if len(axs) > 0:
fitted_curve = np.poly1d(np.polyfit(axs, adists, 2))
self.plot_distances(ameans, accs, rejs, fitted_curve)
self.plot_creators()
self.plot_places()
mean_sug_cand = np.mean(self.suggested_cand)
mean_valid_cand = np.mean(self.valid_cand)
return mean_dist, accs, rejs, mean_sug_cand, mean_valid_cand
def plot_creators(self):
from matplotlib import pyplot as plt
counter = Counter([a.creator for a in self.artifacts])
ticks = np.arange(len(counter.values()))
c = list(counter.items())
c.sort(key=operator.itemgetter(0))
labels = [e[0] for e in c]
counts = [e[1] for e in c]
fig, ax = plt.subplots()
rects1 = ax.bar(ticks, counts, color='green')
ax.set_ylabel('env artifacts')
ax.set_title('Number of environment artifacts per agent')
ax.set_xticks(ticks + 0.5)
ax.set_xticklabels(labels)
plt.xticks(rotation=90)
if self.logger is not None:
imname = os.path.join(self.logger.folder, 'env_a#_a{}_i{}_v{}'
.format(len(self.get_agents()), self.age,
self.voting_method))
plt.savefig(imname)
plt.close()
else:
plt.show()
def plot_distances(self, ameans, accs, rejs, fitted_curve=None):
from matplotlib import pyplot as plt
title="Minimum distance to preceding domain artifact ({} env artifacts)".format(len(self.artifacts))
vxs = np.arange(1, self.age+1)
axs, adists = accs
rxs, rdists = rejs
mean_dist = np.mean(adists)
y = [mean_dist for i in vxs]
amin = [ameans[0] for i in vxs]
amax = [ameans[1] for i in vxs]
amean = [ameans[2] for i in vxs]
fig, ax = plt.subplots()
data_line = ax.plot(axs, adists, label='accepted artifact',
marker='.', color='black', linestyle="")
if len(rxs) > 0: # if there are rejected artifacts, plot them
no_line = ax.plot(rxs, rdists, label='rejected artifact',
marker='x', color='red', linestyle="")
mean_line = ax.plot(vxs, y, label='domain mean distance', linestyle='--', color='green')
mean_line = ax.plot(vxs, amin, label='agent min mean', linestyle='-.', color='magenta')
mean_line = ax.plot(vxs, amean, label='agent mean', linestyle='--', color='magenta')
mean_line = ax.plot(vxs, amax, label='agent max mean', linestyle=':', color='magenta')
if fitted_curve is not None:
f = [fitted_curve(i) for i in axs]
fitted_line = ax.plot(axs, f, label='Fitted', linestyle='-', color='red')
legend = ax.legend(loc='upper right', prop={'size':8})
ax.set_title(title)
ax.set_ylabel('min distance to preceding env artifact')
ax.set_xlabel('iteration')
ax2 = ax.twinx()
valid_line = ax2.plot(vxs, self.valid_cand, color='cornflowerblue',
marker="x", linestyle="")
ax2.set_ylabel('valid candidates after veto', color='cornflowerblue')
a = self.get_agents(addr=False)[0]
agent_vars = "{}{}_last={}_stmem=list{}_veto={}_sc={}_jump={}_sw={}_mr={}_mean={}_amean={}_maxN".format(
a.env_learning_method, a.env_learning_amount, a.env_learn_on_add, a.stmem.length,
a._novelty_threshold, a._own_threshold, a.jump, a.search_width, a.move_radius,
mean_dist, ameans[2])
if self.logger is not None:
imname = os.path.join(self.logger.folder, 'env_a{}_i{}_v{}_{}.png'
.format(len(self.get_agents()), self.age,
self.voting_method, agent_vars))
plt.savefig(imname)
plt.close()
else:
plt.show()
def plot_places(self):
"""Plot places (in the parameter space) of all the generated artifacts
and the artifacts accepted to the domain.
"""
from matplotlib import pyplot as plt
fig, ax = plt.subplots()
title = "Agent places, artifacts and env artifacts ({} env artifacts)".format(len(self.artifacts))
x = []
y = []
for a in self.get_agents(addr=False):
args = a.arg_history
x = x + [e[0] for e in args]
y = y + [e[1] for e in args]
sc = ax.scatter(x, y, marker='.', color=(0, 0, 1, 0.1), label='agent place')
x = []
y = []
for a in self.get_agents(addr=False):
arts = a.A
for ar in arts:
if ar.self_criticism == 'pass':
args = ar.framings[ar.creator]['args']
x.append(args[0])
y.append(args[1])
sc = ax.scatter(x, y, marker="x", color=(0, 0, 1, 0.3), label='agent artifact')
x = []
y = []
for a in self.artifacts:
args = a.framings[a.creator]['args']
x.append(args[0])
y.append(args[1])
sc = ax.scatter(x, y, marker="x", color='red', label='env artifact',
s=40)
ax.set_xlim([-200, 200])
ax.set_ylim([-200, 200])
ax.set_xlabel('r')
ax.set_ylabel('r_')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=10)
ax.set_title(title)
a = self.get_agents(addr=False)[0]
agent_vars = "{}{}_last={}, stmem=list{}_veto={}_sc={}_jump={}_sw={}_mr={}_maxN".format(
a.env_learning_method, a.env_learning_amount, a.env_learn_on_add, a.stmem.length,
a._novelty_threshold, a._own_threshold, a.jump, a.search_width, a.move_radius)
plt.tight_layout(rect=(0,0,0.8,1))
if self.logger is not None:
imname = os.path.join(self.logger.folder, 'arts_a{}_i{}_v{}_{}.png'
.format(len(self.get_agents()), self.age,
self.voting_method, agent_vars))
plt.savefig(imname)
plt.close()
else:
plt.show()
def destroy(self, folder):
ameans = []
for a in self.get_agents(addr=False):
md = a.close(folder=folder)
ameans.append(md)
amin = min(ameans)
amax = max(ameans)
amean = np.mean(ameans)
a = self.get_agents(addr=False)[0]
ret = self.save_info(folder, [amin, amax, amean])
agent_vars = "{}{}_last={}_veto={}_sc={}_jump={}_stmem=list{}_sw={}_mr={}_maxN".format(
a.env_learning_method, a.env_learning_amount, a.env_learn_on_add,
a._novelty_threshold, a._own_threshold, a.jump,
a.stmem.length, a.search_width, a.move_radius)
if self.logger is not None:
fname = os.path.join(self.logger.folder, 'runinfo_a{}_i{}_v{}_{}.txt'
.format(len(self.get_agents()), self.age,
self.voting_method, agent_vars))
with open(fname, "w") as f:
for e in ret:
f.write("{}\n".format(e))
f.write("amin:{}\n".format(amin))
f.write("amax:{}\n".format(amax))
f.write("amean:{}\n".format(amean))
self.shutdown()
ret = ret + ((amin,amax, amean),)
return ret
class STMemory():
"""Agent's short-term memory model using a simple list which stores
artifacts as is."""
def __init__(self, length):
self.length = length
self.artifacts = []
def _add_artifact(self, artifact):
if len(self.artifacts) == self.length:
self.artifacts = self.artifacts[:-1]
self.artifacts.insert(0, artifact)
def learn(self, artifact):
"""Learn new artifact. Removes last artifact from the memory if it is
full."""
self._add_artifact(artifact)
def train_cycle(self, artifact):
"""Train cycle method to keep the interfaces the same with the SOM
implementation of the short term memory.
"""
self.learn(artifact)
def distance(self, artifact):
mdist = np.sqrt(artifact.shape[0])
if len(self.artifacts) == 0:
return np.random.random()*mdist
for a in self.artifacts:
d = np.sqrt(np.sum(np.square(a - artifact)))
if d < mdist:
mdist = d
return mdist
if __name__ == "__main__":
import aiomas
from creamas.core import Simulation
from matplotlib import pyplot as plt
addr = ('localhost', 5555)
log_folder = 'logs'
env = SpiroEnvironment.create(addr, codec=aiomas.MsgPack)
env.log_folder = log_folder
for e in range(10):
SpiroAgent(env, desired_novelty=-1, search_width=20,
log_folder=log_folder, log_level=logging.DEBUG)
sim = Simulation(env, log_folder=log_folder,
callback=env.vote_and_save_info)
sim.steps(10)
ret = sim.end()
print(ret)
|
gpl-2.0
|
simon-pepin/scikit-learn
|
sklearn/pipeline.py
|
162
|
21103
|
"""
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
|
bsd-3-clause
|
jonyroda97/redbot-amigosprovaveis
|
lib/numpy/lib/twodim_base.py
|
13
|
25584
|
""" Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx+1,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny+1,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
|
gpl-3.0
|
sarahgrogan/scikit-learn
|
examples/linear_model/plot_sparse_recovery.py
|
243
|
7461
|
"""
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
|
bsd-3-clause
|
nikste/tensorflow
|
tensorflow/examples/learn/text_classification.py
|
13
|
4967
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
def bag_of_words_model(features, target):
"""A bag-of-words model. Note it disregards the word order in the text."""
target = tf.one_hot(target, 15, 1, 0)
features = tf.contrib.layers.bow_encoder(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
logits = tf.contrib.layers.fully_connected(features, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def rnn_model(features, target):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.contrib.rnn.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.contrib.rnn.static_rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
target = tf.one_hot(target, 15, 1, 0)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
model_fn = bag_of_words_model
classifier = learn.Estimator(model_fn=model_fn)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
rishikksh20/scikit-learn
|
doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py
|
104
|
3139
|
"""Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the mean and std for each candidate along with the parameter
# settings for all the candidates explored by grid search.
n_candidates = len(grid_search.cv_results_['params'])
for i in range(n_candidates):
print(i, 'params - %s; mean - %0.2f; std - %0.2f'
% (grid_search.cv_results_['params'][i],
grid_search.cv_results_['mean_test_score'][i],
grid_search.cv_results_['std_test_score'][i]))
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
|
bsd-3-clause
|
saiwing-yeung/scikit-learn
|
sklearn/neighbors/classification.py
|
17
|
14354
|
"""Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable, optional (default = 'uniform')
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
|
bsd-3-clause
|
edgarRd/incubator-airflow
|
airflow/www/views.py
|
1
|
111891
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import ast
import codecs
import copy
import datetime as dt
import itertools
import json
import logging
import math
import os
import traceback
from collections import defaultdict
from datetime import timedelta
from functools import wraps
from textwrap import dedent
import bleach
import markdown
import nvd3
import pendulum
import pkg_resources
import sqlalchemy as sqla
from flask import (
abort, jsonify, redirect, url_for, request, Markup, Response,
current_app, render_template, make_response)
from flask import flash
from flask._compat import PY2
from flask_admin import BaseView, expose, AdminIndexView
from flask_admin.actions import action
from flask_admin.babel import lazy_gettext
from flask_admin.contrib.sqla import ModelView
from flask_admin.form.fields import DateTimeField
from flask_admin.tools import iterdecode
from jinja2 import escape
from jinja2.sandbox import ImmutableSandboxedEnvironment
from past.builtins import basestring, unicode
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
from sqlalchemy import or_, desc, and_, union_all
from wtforms import (
Form, SelectField, TextAreaField, PasswordField,
StringField, validators)
import airflow
from airflow import configuration as conf
from airflow import models
from airflow import settings
from airflow.api.common.experimental.mark_tasks import (set_dag_run_state_to_running,
set_dag_run_state_to_success,
set_dag_run_state_to_failed)
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.models import XCom, DagRun
from airflow.operators.subdag_operator import SubDagOperator
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS
from airflow.utils import timezone
from airflow.utils.dates import infer_time_unit, scale_time_units, parse_execution_date
from airflow.utils.db import create_session, provide_session
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils.json import json_ser
from airflow.utils.net import get_hostname
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from airflow.www import utils as wwwutils
from airflow.www.forms import (DateTimeForm, DateTimeWithNumRunsForm,
DateTimeWithNumRunsWithDagRunsForm)
from airflow.www.validators import GreaterEqualThan
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
UTF8_READER = codecs.getreader('utf-8')
dagbag = models.DagBag(settings.DAGS_FOLDER)
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
PAGE_SIZE = conf.getint('webserver', 'page_size')
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
if m.dag_id is None:
return Markup()
dag_id = bleach.clean(m.dag_id)
url = url_for(
'airflow.graph',
dag_id=dag_id,
execution_date=m.execution_date)
return Markup(
'<a href="{}">{}</a>'.format(url, dag_id))
def log_url_formatter(v, c, m, p):
return Markup(
'<a href="{m.log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def dag_run_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
url = url_for(
'airflow.graph',
dag_id=m.dag_id,
run_id=m.run_id,
execution_date=m.execution_date)
return Markup('<a href="{url}">{m.run_id}</a>'.format(**locals()))
def task_instance_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
task_id = bleach.clean(m.task_id)
url = url_for(
'airflow.task',
dag_id=dag_id,
task_id=task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=dag_id,
root=task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def parse_datetime_f(value):
if not isinstance(value, dt.datetime):
return value
return timezone.make_aware(value)
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if timezone.utcnow().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = ast.literal_eval(m.default_params)
except Exception:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
wwwutils.get_python_source(x),
lexers.PythonLexer,
),
}
def data_profiling_required(f):
"""Decorator for views requiring data profiling access"""
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):
if isinstance(tasks, list):
for task in tasks:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
return
if isinstance(tasks, SubDagOperator):
subtasks = tasks.subdag.tasks
dag_ids.append(tasks.subdag.dag_id)
for subtask in subtasks:
if subtask.task_id not in task_ids:
task_ids.append(subtask.task_id)
task_id_to_dag[subtask.task_id] = tasks.subdag
recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)
if isinstance(tasks, BaseOperator):
task_id_to_dag[tasks.task_id] = tasks.dag
def get_chart_height(dag):
"""
TODO(aoen): See [AIRFLOW-1263] We use the number of tasks in the DAG as a heuristic to
approximate the size of generated chart (otherwise the charts are tiny and unreadable
when DAGs have a large number of tasks). Ideally nvd3 should allow for dynamic-height
charts, that is charts that take up space based on the size of the components within.
"""
return 600 + len(dag.tasks) * 10
def get_date_time_num_runs_dag_runs_form_data(request, session, dag):
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
dttm = dag.latest_execution_date or timezone.utcnow()
base_date = request.args.get('base_date')
if base_date:
base_date = timezone.parse(base_date)
else:
# The DateTimeField widget truncates milliseconds and would loose
# the first dag run. Round to next second.
base_date = (dttm + timedelta(seconds=1)).replace(microsecond=0)
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
DR = models.DagRun
drs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date)
.order_by(desc(DR.execution_date))
.limit(num_runs)
.all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
# Happens if base_date was changed and the selected dag run is not in result
if not dr_state and drs:
dr = drs[0]
dttm = dr.execution_date
dr_state = dr.state
return {
'dttm': dttm,
'base_date': base_date,
'num_runs': num_runs,
'execution_date': dttm.isoformat(),
'dr_choices': dr_choices,
'dr_state': dr_state,
}
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
from airflow import macros
import pandas as pd
if conf.getboolean('core', 'secure_mode'):
abort(404)
with create_session() as session:
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
payload = {
"state": "ERROR",
"error": ""
}
# Processing templated fields
try:
args = ast.literal_eval(chart.default_params)
if not isinstance(args, dict):
raise AirflowException('Not a dict')
except Exception:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
args.update(request_dict)
args['macros'] = macros
sandbox = ImmutableSandboxedEnvironment()
sql = sandbox.from_string(chart.sql).render(**args)
label = sandbox.from_string(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(
wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns' and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart.show_datatable or chart_type == "datatable":
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
payload['data'] = data
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
except Exception as e:
payload['error'] = "Time conversion failed"
if chart_type == 'datatable':
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
df = df.fillna(0)
NVd3ChartClass = chart_mapping.get(chart.chart_type)
NVd3ChartClass = getattr(nvd3, NVd3ChartClass)
nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)
for col in df.columns:
nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())
try:
nvd3_chart.buildcontent()
payload['chart_type'] = nvd3_chart.__class__.__name__
payload['htmlcontent'] = nvd3_chart.htmlcontent
except Exception as e:
payload['error'] = str(e)
payload['state'] = 'SUCCESS'
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
if conf.getboolean('core', 'secure_mode'):
abort(404)
with create_session() as session:
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
NVd3ChartClass = chart_mapping.get(chart.chart_type)
if not NVd3ChartClass:
flash(
"Not supported anymore as the license was incompatible, "
"sorry",
"danger")
redirect('/admin/chart/')
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/nvd3.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
@login_required
@provide_session
def dag_stats(self, session=None):
ds = models.DagStat
ds.update(
dag_ids=[dag.dag_id for dag in dagbag.dags.values() if not dag.is_subdag]
)
qry = (
session.query(ds.dag_id, ds.state, ds.count)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.dag_states:
try:
count = data[dag.dag_id][state]
except Exception:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/task_stats')
@login_required
@provide_session
def task_stats(self, session=None):
TI = models.TaskInstance
DagRun = models.DagRun
Dag = models.DagModel
LastDagRun = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING)
.filter(Dag.is_active == True) # noqa: E712
.filter(Dag.is_subdag == False) # noqa: E712
.group_by(DagRun.dag_id)
.subquery('last_dag_run')
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING)
.filter(Dag.is_active == True) # noqa: E712
.filter(Dag.is_subdag == False) # noqa: E712
.subquery('running_dag_run')
)
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun, and_(
LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
)
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun, and_(
RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
)
UnionTI = union_all(LastTI, RunningTI).alias('union_ti')
qry = (
session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())
.group_by(UnionTI.c.dag_id, UnionTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.task_states:
try:
count = data[dag.dag_id][state]
except Exception:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = dag_id
try:
with open(dag.fileloc, 'r') as f:
code = f.read()
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except IOError as e:
html_code = str(e)
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
@provide_session
def dag_details(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=get_hostname()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=get_hostname(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/pickle_info')
@login_required
def pickle_info(self):
d = {}
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
flash('You have been logged out.')
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title, )
@expose('/get_logs_with_metadata')
@login_required
@wwwutils.action_logging
@provide_session
def get_logs_with_metadata(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
try_number = int(request.args.get('try_number'))
metadata = request.args.get('metadata')
metadata = json.loads(metadata)
# metadata may be null
if not metadata:
metadata = {}
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(
execution_date))
response = jsonify({'error': error_message})
response.status_code = 400
return response
logger = logging.getLogger('airflow.task')
task_log_reader = conf.get('core', 'task_log_reader')
handler = next((handler for handler in logger.handlers
if handler.name == task_log_reader), None)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
try:
if ti is None:
logs = ["*** Task instance did not exist in the DB\n"]
metadata['end_of_log'] = True
else:
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(ti.task_id)
logs, metadatas = handler.read(ti, try_number, metadata=metadata)
metadata = metadatas[0]
for i, log in enumerate(logs):
if PY2 and not isinstance(log, unicode):
logs[i] = log.decode('utf-8')
message = logs[0]
return jsonify(message=message, metadata=metadata)
except AttributeError as e:
error_message = ["Task log handler {} does not support read logs.\n{}\n"
.format(task_log_reader, str(e))]
metadata['end_of_log'] = True
return jsonify(message=error_message, error=True, metadata=metadata)
@expose('/log')
@login_required
@wwwutils.action_logging
@provide_session
def log(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
logs = [''] * (ti.next_try_number - 1 if ti is not None else 0)
return self.render(
'airflow/ti_log.html',
logs=logs, dag=dag, title="Log by attempts",
dag_id=dag.dag_id, task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task): # noqa: E721
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer: # noqa: E721
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
no_failed_deps_result = [(
"Unknown",
dedent("""\
All dependencies are met but the task instance is not running.
In most cases this just means that the task will probably
be scheduled soon unless:<br/>
- The scheduler is down or under heavy load<br/>
- The following configuration values may be limiting the number
of queueable processes:
<code>parallelism</code>,
<code>dag_concurrency</code>,
<code>max_active_dag_runs_per_dag</code>,
<code>non_pooled_task_slot_count</code><br/>
{}
<br/>
If this task instance does not start soon please contact your Airflow """
"""administrator for assistance."""
.format(
"- This task instance already ran and had its state changed "
"manually (e.g. cleared in the UI)<br/>"
if ti.state == State.NONE else "")))]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/xcom')
@login_required
@wwwutils.action_logging
@provide_session
def xcom(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
dag=dag, title=title)
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = pendulum.parse(execution_date)
ignore_all_deps = request.args.get('ignore_all_deps') == "true"
ignore_task_deps = request.args.get('ignore_task_deps') == "true"
ignore_ti_state = request.args.get('ignore_ti_state') == "true"
from airflow.executors import GetDefaultExecutor
executor = GetDefaultExecutor()
valid_celery_config = False
valid_kubernetes_config = False
try:
from airflow.executors.celery_executor import CeleryExecutor
valid_celery_config = isinstance(executor, CeleryExecutor)
except ImportError:
pass
try:
from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor
valid_kubernetes_config = isinstance(executor, KubernetesExecutor)
except ImportError:
pass
if not valid_celery_config and not valid_kubernetes_config:
flash("Only works with the Celery or Kubernetes executors, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be queued
dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/delete')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def delete(self):
from airflow.api.common.experimental import delete_dag
from airflow.exceptions import DagNotFound, DagFileExists
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/admin/"
try:
delete_dag.delete_dag(dag_id)
except DagNotFound:
flash("DAG with id {} not found. Cannot delete".format(dag_id))
return redirect(request.referrer)
except DagFileExists:
flash("Dag id {} is still in DagBag. "
"Remove the DAG file first.".format(dag_id))
return redirect(request.referrer)
flash("Deleting DAG with id {}. May take a couple minutes to fully"
" disappear.".format(dag_id))
# Upon successful delete return to origin
return redirect(origin)
@expose('/trigger')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def trigger(self):
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/admin/"
dag = dagbag.get_dag(dag_id)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = timezone.utcnow()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
flash(
"Triggered {}, "
"it should start any moment now.".format(dag_id))
return redirect(origin)
def _clear_dag_tis(self, dag, start_date, end_date, origin,
recursive=False, confirmed=False):
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=("Here's the list of task instances you are about "
"to clear:"),
details=details)
return response
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.args.get('execution_date')
execution_date = pendulum.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=recursive, confirmed=confirmed)
@expose('/dagrun_clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_clear(self):
dag_id = request.args.get('dag_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
dag = dagbag.get_dag(dag_id)
execution_date = pendulum.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=True, confirmed=confirmed)
@expose('/blocked')
@login_required
@provide_session
def blocked(self, session=None):
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
def _mark_dagrun_state_as_failed(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = pendulum.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_failed(dag, execution_date, commit=confirmed)
if confirmed:
flash('Marked failed on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render('airflow/confirm.html',
message=("Here's the list of task instances you are "
"about to mark as failed"),
details=details)
return response
def _mark_dagrun_state_as_success(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = pendulum.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_success(dag, execution_date,
commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render('airflow/confirm.html',
message=("Here's the list of task instances you are "
"about to mark as success"),
details=details)
return response
@expose('/dagrun_failed')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_failed(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
return self._mark_dagrun_state_as_failed(dag_id, execution_date,
confirmed, origin)
@expose('/dagrun_success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_success(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
return self._mark_dagrun_state_as_success(dag_id, execution_date,
confirmed, origin)
def _mark_task_instance_state(self, dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, state):
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
execution_date = pendulum.parse(execution_date)
if not dag:
flash("Cannot find DAG: {}".format(dag_id))
return redirect(origin)
if not task:
flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id))
return redirect(origin)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=state,
commit=True)
flash("Marked {} on {} task instances".format(state, len(altered)))
return redirect(origin)
to_be_altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=state,
commit=False)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render("airflow/confirm.html",
message=("Here's the list of task instances you are "
"about to mark as {}:".format(state)),
details=details)
return response
@expose('/failed')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def failed(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, State.FAILED)
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, State.SUCCESS)
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
@provide_session
def tree(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date)
.order_by(DR.execution_date.desc())
.limit(num_runs)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
dates = sorted(list(dag_runs.keys()))
max_date = max(dates) if dates else None
min_date = min(dates) if dates else None
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
task_instances = {}
for ti in tis:
tid = alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
def set_duration(tid):
if (isinstance(tid, dict) and tid.get("state") == State.RUNNING and
tid["start_date"] is not None):
d = timezone.utcnow() - pendulum.parse(tid["start_date"])
tid["duration"] = d.total_seconds()
return tid
return {
'name': task.task_id,
'instances': [
set_duration(task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
# minimize whitespace as this can be huge for bigger dags
data = json.dumps(data, default=json_ser, separators=(',', ':'))
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur, num_runs=num_runs)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
@provide_session
def graph(self, session=None):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dt_nr_dr_data['arrange'] = arrange
dttm = dt_nr_dr_data['dttm']
class GraphForm(DateTimeWithNumRunsWithDagRunsForm):
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') and dag.doc_md else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dt_nr_dr_data['dr_state']),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2), )
@expose('/duration')
@login_required
@wwwutils.action_logging
@provide_session
def duration(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=chart_height, width="1200")
y = defaultdict(list)
x = defaultdict(list)
cum_y = defaultdict(list)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
TF = models.TaskFail
ti_fails = (
session
.query(TF)
.filter(
TF.dag_id == dag.dag_id,
TF.execution_date >= min_date,
TF.execution_date <= base_date,
TF.task_id.in_([t.task_id for t in dag.tasks]))
.all()
)
fails_totals = defaultdict(int)
for tf in ti_fails:
dict_key = (tf.dag_id, tf.task_id, tf.execution_date)
fails_totals[dict_key] += tf.duration
for ti in tis:
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x[ti.task_id].append(dttm)
y[ti.task_id].append(float(ti.duration))
fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)
fails_total = fails_totals[fails_dict_key]
cum_y[ti.task_id].append(float(ti.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(cum_y_unit))
cum_chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
cum_chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(cum_y[task.task_id],
cum_y_unit))
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +
"$(function() {$( document ).trigger('chartload') })" +
cum_chart.htmlcontent[s_index:])
return self.render(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
cum_chart=cum_chart.htmlcontent
)
@expose('/tries')
@login_required
@wwwutils.action_logging
@provide_session
def tries(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height,
width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
y.append(ti.try_number)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
@provide_session
def landing_times(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
y = {}
x = {}
for task in dag.tasks:
y[task.task_id] = []
x[task.task_id] = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
if ti.end_date:
ts = ti.execution_date
following_schedule = dag.following_schedule(ts)
if dag.schedule_interval and following_schedule:
ts = following_schedule
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x[ti.task_id].append(dttm)
y[ti.task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Landing Time ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
chart=chart.htmlcontent,
height=str(chart_height + 100) + "px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused', methods=['POST'])
@login_required
@wwwutils.action_logging
@provide_session
def paused(self, session=None):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
@provide_session
def refresh(self, session=None):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = timezone.utcnow()
session.merge(orm_dag)
session.commit()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
@provide_session
def gantt(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dttm = dt_nr_dr_data['dttm']
form = DateTimeWithNumRunsWithDagRunsForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
tis = [
ti for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
TF = models.TaskFail
ti_fails = list(itertools.chain(*[(
session
.query(TF)
.filter(TF.dag_id == ti.dag_id,
TF.task_id == ti.task_id,
TF.execution_date == ti.execution_date)
.all()
) for ti in tis]))
tis_with_fails = sorted(tis + ti_fails, key=lambda ti: ti.start_date)
tasks = []
for ti in tis_with_fails:
end_date = ti.end_date if ti.end_date else timezone.utcnow()
state = ti.state if type(ti) == models.TaskInstance else State.FAILED
tasks.append({
'startDate': wwwutils.epoch(ti.start_date),
'endDate': wwwutils.epoch(end_date),
'isoStart': ti.start_date.isoformat()[:-4],
'isoEnd': end_date.isoformat()[:-4],
'taskName': ti.task_id,
'duration': "{}".format(end_date - ti.start_date)[:-4],
'status': state,
'executionDate': ti.execution_date.isoformat(),
})
states = {task['status']: task['status'] for task in tasks}
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'taskStatus': states,
'height': len(tis) * 25 + 25,
}
session.commit()
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=json.dumps(data, indent=2),
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/object/task_instances')
@login_required
@wwwutils.action_logging
@provide_session
def task_instances(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
return "Error: Invalid execution_date"
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
return json.dumps(task_instances)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
with create_session() as session:
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except Exception:
# prevent XSS
form = escape(form)
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
@expose('/varimport', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def varimport(self):
try:
d = json.load(UTF8_READER(request.files['file']))
except Exception as e:
flash("Missing file or syntax error: {}.".format(e))
else:
suc_count = fail_count = 0
for k, v in d.items():
try:
models.Variable.set(k, v, serialize_json=isinstance(v, dict))
except Exception as e:
logging.info('Variable import failed: {}'.format(repr(e)))
fail_count += 1
else:
suc_count += 1
flash("{} variable(s) successfully updated.".format(suc_count), 'info')
if fail_count:
flash(
"{} variables(s) failed to be updated.".format(fail_count), 'error')
return redirect('/admin/variable')
class HomeView(AdminIndexView):
@expose("/")
@login_required
@provide_session
def index(self, session=None):
DM = models.DagModel
# restrict the dags shown if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower()
hide_paused_dags_by_default = conf.getboolean('webserver',
'hide_paused_dags_by_default')
show_paused_arg = request.args.get('showPaused', 'None')
def get_int_arg(value, default=0):
try:
return int(value)
except ValueError:
return default
arg_current_page = request.args.get('page', '0')
arg_search_query = request.args.get('search', None)
dags_per_page = PAGE_SIZE
current_page = get_int_arg(arg_current_page, default=0)
if show_paused_arg.strip().lower() == 'false':
hide_paused = True
elif show_paused_arg.strip().lower() == 'true':
hide_paused = False
else:
hide_paused = hide_paused_dags_by_default
# read orm_dags from the db
sql_query = session.query(DM)
if do_filter and owner_mode == 'ldapgroup':
sql_query = sql_query.filter(
~DM.is_subdag,
DM.is_active,
DM.owners.in_(current_user.ldap_groups)
)
elif do_filter and owner_mode == 'user':
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active,
DM.owners == current_user.user.username
)
else:
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active
)
# optionally filter out "paused" dags
if hide_paused:
sql_query = sql_query.filter(~DM.is_paused)
orm_dags = {dag.dag_id: dag for dag
in sql_query
.all()}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
# get a list of all non-subdag dags visible to everyone
# optionally filter out "paused" dags
if hide_paused:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag and not dag.is_paused]
else:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag]
# optionally filter to get only dags that the user should see
if do_filter and owner_mode == 'ldapgroup':
# only show dags owned by someone in @current_user.ldap_groups
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner in current_user.ldap_groups
}
elif do_filter and owner_mode == 'user':
# only show dags owned by @current_user.user.username
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner == current_user.user.username
}
else:
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
}
if arg_search_query:
lower_search_query = arg_search_query.lower()
# filter by dag_id
webserver_dags_filtered = {
dag_id: dag
for dag_id, dag in webserver_dags.items()
if (lower_search_query in dag_id.lower() or
lower_search_query in dag.owner.lower())
}
all_dag_ids = (set([dag.dag_id for dag in orm_dags.values()
if lower_search_query in dag.dag_id.lower() or
lower_search_query in dag.owners.lower()]) |
set(webserver_dags_filtered.keys()))
sorted_dag_ids = sorted(all_dag_ids)
else:
webserver_dags_filtered = webserver_dags
sorted_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))
start = current_page * dags_per_page
end = start + dags_per_page
num_of_all_dags = len(sorted_dag_ids)
page_dag_ids = sorted_dag_ids[start:end]
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
auto_complete_data = set()
for dag in webserver_dags_filtered.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owner)
for dag in orm_dags.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owners)
return self.render(
'airflow/dags.html',
webserver_dags=webserver_dags_filtered,
orm_dags=orm_dags,
hide_paused=hide_paused,
current_page=current_page,
search_query=arg_search_query if arg_search_query else '',
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=start + 1,
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(current_page, num_of_pages,
search=arg_search_query,
showPaused=not hide_paused),
dag_ids_in_page=page_dag_ids,
auto_complete_data=auto_complete_data)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/', methods=['POST', 'GET'])
@wwwutils.gzipped
@provide_session
def query(self, session=None):
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.form.get('conn_id')
csv = request.form.get('csv') == "true"
sql = request.form.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
column_display_actions = True
page_size = PAGE_SIZE
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
form_args = {
'pool': {
'validators': [
validators.DataRequired(),
]
}
}
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
filter_converter = wwwutils.UtcFilterConverter()
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
@provide_session
def _connection_ids(session=None):
return [
(c.conn_id, c.conn_id)
for c in (
session.query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',
)
column_list = (
'label',
'conn_id',
'chart_type',
'owner',
'last_modified',
)
column_sortable_list = (
'label',
'conn_id',
'chart_type',
('owner', 'owner.username'),
'last_modified',
)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': _connection_ids()
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = timezone.utcnow()
chart_mapping = (
('line', 'lineChart'),
('spline', 'lineChart'),
('bar', 'multiBarChart'),
('column', 'multiBarChart'),
('area', 'stackedAreaChart'),
('stacked_area', 'stackedAreaChart'),
('percent_area', 'stackedAreaChart'),
('datatable', 'datatable'),
)
chart_mapping = dict(chart_mapping)
class KnownEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description',
)
form_args = {
'label': {
'validators': [
validators.DataRequired(),
],
},
'event_type': {
'validators': [
validators.DataRequired(),
],
},
'start_date': {
'validators': [
validators.DataRequired(),
],
'filters': [
parse_datetime_f,
],
},
'end_date': {
'validators': [
validators.DataRequired(),
GreaterEqualThan(fieldname='start_date'),
],
'filters': [
parse_datetime_f,
]
},
'reported_by': {
'validators': [
validators.DataRequired(),
],
}
}
column_list = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
)
column_default_sort = ("start_date", True)
column_sortable_list = (
'label',
# todo: yes this has a spelling error
('event_type', 'event_type.know_event_type'),
'start_date',
'end_date',
('reported_by', 'reported_by.username'),
)
filter_converter = wwwutils.UtcFilterConverter()
form_overrides = dict(start_date=DateTimeField, end_date=DateTimeField)
class KnownEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
# NOTE: For debugging / troubleshooting
# mv = KnowEventTypeView(
# models.KnownEventType,
# Session, name="Known Event Types", category="Manage")
# admin.add_view(mv)
# class DagPickleView(SuperUserMixin, ModelView):
# pass
# mv = DagPickleView(
# models.DagPickle,
# Session, name="Pickles", category="Manage")
# admin.add_view(mv)
class VariableView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
list_template = 'airflow/variable_list.html'
def hidden_field_formatter(view, context, model, name):
if wwwutils.should_hide_value_for_key(model.key):
return Markup('*' * 8)
val = getattr(model, name)
if val:
return val
else:
return Markup('<span class="label label-danger">Invalid</span>')
form_columns = (
'key',
'val',
)
column_list = ('key', 'val', 'is_encrypted',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val', 'is_encrypted',)
column_default_sort = ('key', False)
form_widget_args = {
'is_encrypted': {'disabled': True},
'val': {
'rows': 20,
}
}
form_args = {
'key': {
'validators': {
validators.DataRequired(),
},
},
}
column_sortable_list = (
'key',
'val',
'is_encrypted',
)
column_formatters = {
'val': hidden_field_formatter,
}
# Default flask-admin export functionality doesn't handle serialized json
@action('varexport', 'Export', None)
@provide_session
def action_varexport(self, ids, session=None):
V = models.Variable
qry = session.query(V).filter(V.id.in_(ids)).all()
var_dict = {}
d = json.JSONDecoder()
for var in qry:
val = None
try:
val = d.decode(var.val)
except Exception:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
return response
def on_form_prefill(self, form, id):
if wwwutils.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
class XComView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "XCom"
verbose_name_plural = "XComs"
form_columns = (
'key',
'value',
'execution_date',
'task_id',
'dag_id',
)
form_extra_fields = {
'value': StringField('Value'),
}
form_args = {
'execution_date': {
'filters': [
parse_datetime_f,
]
}
}
column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
filter_converter = wwwutils.UtcFilterConverter()
form_overrides = dict(execution_date=DateTimeField)
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_display_actions = False
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
filter_converter = wwwutils.UtcFilterConverter()
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
form_args = dict(
dag_id=dict(validators=[validators.DataRequired()])
)
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
filter_converter = wwwutils.UtcFilterConverter()
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link,
run_id=dag_run_link
)
@action('new_delete', "Delete", "Are you sure you want to delete selected records?")
@provide_session
def action_new_delete(self, ids, session=None):
deleted = set(session.query(models.DagRun)
.filter(models.DagRun.id.in_(ids))
.all())
session.query(models.DagRun) \
.filter(models.DagRun.id.in_(ids)) \
.delete(synchronize_session='fetch')
session.commit()
dirty_ids = []
for row in deleted:
dirty_ids.append(row.dag_id)
models.DagStat.update(dirty_ids, dirty_only=False, session=session)
@action('set_running', "Set state to 'running'", None)
@provide_session
def action_set_running(self, ids, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.state = State.RUNNING
dr.start_date = timezone.utcnow()
models.DagStat.update(dirty_ids, session=session)
flash(
"{count} dag runs were set to running".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
@action('set_failed', "Set state to 'failed'",
"All running task instances would also be marked as failed, are you sure?")
@provide_session
def action_set_failed(self, ids, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
altered_tis = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += \
set_dag_run_state_to_failed(dagbag.get_dag(dr.dag_id),
dr.execution_date,
commit=True,
session=session)
models.DagStat.update(dirty_ids, session=session)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to failed".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
@action('set_success', "Set state to 'success'",
"All task instances would also be marked as success, are you sure?")
@provide_session
def action_set_success(self, ids, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
altered_tis = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += \
set_dag_run_state_to_success(dagbag.get_dag(dr.dag_id),
dr.execution_date,
commit=True,
session=session)
models.DagStat.update(dirty_ids, session=session)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to success".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
# Called after editing DagRun model in the UI.
@provide_session
def after_model_change(self, form, dagrun, is_created, session=None):
altered_tis = []
if dagrun.state == State.SUCCESS:
altered_tis = set_dag_run_state_to_success(
dagbag.get_dag(dagrun.dag_id),
dagrun.execution_date,
commit=True,
session=session)
elif dagrun.state == State.FAILED:
altered_tis = set_dag_run_state_to_failed(
dagbag.get_dag(dagrun.dag_id),
dagrun.execution_date,
commit=True,
session=session)
elif dagrun.state == State.RUNNING:
altered_tis = set_dag_run_state_to_running(
dagbag.get_dag(dagrun.dag_id),
dagrun.execution_date,
commit=True,
session=session)
altered_ti_count = len(altered_tis)
models.DagStat.update([dagrun.dag_id], session=session)
flash(
"1 dag run and {altered_ti_count} task instances "
"were set to '{dagrun.state}'".format(**locals()))
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_display_actions = False
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date', 'extra')
filter_converter = wwwutils.UtcFilterConverter()
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
filter_converter = wwwutils.UtcFilterConverter()
named_filter_urls = True
column_formatters = dict(
log_url=log_url_formatter,
task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link,
run_id=dag_run_link,
duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('job_id', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url')
page_size = PAGE_SIZE
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_task_instance_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_task_instance_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_task_instance_state(ids, State.SUCCESS)
@action('set_retry', "Set state to 'up_for_retry'", None)
def action_set_retry(self, ids):
self.set_task_instance_state(ids, State.UP_FOR_RETRY)
@provide_session
@action('clear',
lazy_gettext('Clear'),
lazy_gettext(
'Are you sure you want to clear the state of the selected task instance(s)'
' and set their dagruns to the running state?'))
def action_clear(self, ids, session=None):
try:
TI = models.TaskInstance
dag_to_task_details = {}
dag_to_tis = {}
# Collect dags upfront as dagbag.get_dag() will reset the session
for id_str in ids:
task_id, dag_id, execution_date = iterdecode(id_str)
dag = dagbag.get_dag(dag_id)
task_details = dag_to_task_details.setdefault(dag, [])
task_details.append((task_id, execution_date))
for dag, task_details in dag_to_task_details.items():
for task_id, execution_date in task_details:
execution_date = parse_execution_date(execution_date)
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag.dag_id,
TI.execution_date == execution_date).one()
tis = dag_to_tis.setdefault(dag, [])
tis.append(ti)
for dag, tis in dag_to_tis.items():
models.clear_task_instances(tis, session, dag=dag)
session.commit()
flash("{0} task instances have been cleared".format(len(ids)))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to clear task instances', 'error')
@provide_session
def set_task_instance_state(self, ids, target_state, session=None):
try:
TI = models.TaskInstance
count = len(ids)
for id in ids:
task_id, dag_id, execution_date = iterdecode(id)
execution_date = parse_execution_date(execution_date)
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
ti.state = target_state
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
def get_one(self, id):
"""
As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
task_id, dag_id, execution_date = iterdecode(id)
execution_date = pendulum.parse(execution_date)
return self.session.query(self.model).get((task_id, dag_id, execution_date))
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__keyfile_dict',
'extra__google_cloud_platform__scope',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)
form_overrides = dict(_password=PasswordField, _extra=TextAreaField)
form_widget_args = {
'is_extra_encrypted': {'disabled': True},
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path': StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
'extra__google_cloud_platform__project': StringField('Project Id'),
'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),
'extra__google_cloud_platform__keyfile_dict': PasswordField('Keyfile JSON'),
'extra__google_cloud_platform__scope': StringField('Scopes (comma separated)'),
}
form_choices = {
'conn_type': models.Connection._types
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:
extra = {
key: formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
fk = None
try:
fk = conf.get('core', 'fernet_key')
except Exception:
pass
return fk is None
@classmethod
def is_secure(cls):
"""
Used to display a message in the Connection list view making it clear
that the passwords and `extra` field can't be encrypted.
"""
is_secure = False
try:
import cryptography # noqa F401
conf.get('core', 'fernet_key')
is_secure = True
except Exception:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class VersionView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def version(self):
# Look at the version from setup.py
try:
airflow_version = pkg_resources.require("apache-airflow")[0].version
except Exception as e:
airflow_version = None
logging.error(e)
# Get the Git repo and git hash
git_version = None
try:
with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:
git_version = f.readline()
except Exception as e:
logging.error(e)
# Render information
title = "Version Info"
return self.render('airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = conf.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(conf.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# Your Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
filter_converter = wwwutils.UtcFilterConverter()
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = PAGE_SIZE
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
|
apache-2.0
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/sklearn/linear_model/tests/test_perceptron.py
|
4
|
1559
|
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_true
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False, seed=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
|
agpl-3.0
|
mrkowalski/kaggle_santander
|
scikit/src/nosql/02-cross_validate.py
|
1
|
2189
|
import commons, sys, os
import logging as log
import pandas as pd
import xgboost as xgb
import numpy as np
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix
def save_model(clf, feature):
if not os.path.exists("models"): os.makedirs("models")
joblib.dump(clf, "models/" + feature + ".pkl")
def add_activations(df):
for ind in commons.indicators:
log.info("Adding activations for {}".format(ind))
ind_prev = ind + "_1"
res = df[ind].sub(df[ind_prev])
res[res < 0] = 0
df["act_" + ind] = res.fillna(0)
return df
chunks = 6
df = pd.DataFrame()
for n in range(1, chunks+1):
log.info('Loading dataframe...#{}'.format(n))
df = df.append(pd.read_hdf(commons.FILE_DF + "." + str(n), key='santander'))
df = add_activations(df)
df.drop(commons.indicators ,inplace=True,axis=1)
df.drop(['ncodpers'], inplace=True, axis=1)
activation_columns=["act_" + i for i in commons.indicators]
for ind in commons.indicators:
log.info("Building model for {}".format(ind))
X = df.drop(activation_columns, axis = 1)
Y = df["act_" + ind]
if sum(Y) > 0:
ratio = (Y.shape[0] - sum(Y)) / sum(Y)
log.info("Negative / positive: {}".format(ratio))
# log.info("X: {}, Y: {}".format(list(X.columns.values), Y.name))
clf = xgb.XGBClassifier(objective = 'binary:logistic', nthread=8, silent=1, max_depth=6, scale_pos_weight=ratio)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
clf = clf.fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
log.info("Feature importances: {}".format(sorted(zip(list(X), clf.feature_importances_), key=lambda x: x[1])))
log.info("Accuracy: {:.2%}, precision: {:.2%}, recall: {:.2%}".format(
accuracy_score(Y_test, Y_pred),
precision_score(Y_test, Y_pred),
recall_score(Y_test, Y_pred)))
save_model(clf, ind)
else:
log.info("Skipping {} due to no positive cases.".format(ind))
|
mit
|
shangwuhencc/scikit-learn
|
sklearn/metrics/scorer.py
|
211
|
13141
|
"""
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
|
bsd-3-clause
|
fspaolo/scikit-learn
|
examples/svm/plot_svm_nonlinear.py
|
12
|
1064
|
"""
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learn by the SVC.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
pl.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=pl.cm.PuOr_r)
contours = pl.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
pl.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=pl.cm.Paired)
pl.xticks(())
pl.yticks(())
pl.axis([-3, 3, -3, 3])
pl.show()
|
bsd-3-clause
|
justthetips/PerformanceAnalytics
|
performanceanalytics/charts/boxplot.py
|
1
|
2111
|
# MIT License
# Copyright (c) 2017 Jacob Bourne
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import matplotlib.pyplot as plt
def boxplot(data, manager_col=0, other_cols=None, **kwargs):
"""
create a box and whisker chart. There is not much here, but its a nice wrapper and
keeps all the charing consitent
:param data: the data
:param manager_col: the manager column
:param other_cols: any other columns to display
:param kwargs: other arguments to pass to the plot
:return: the plot
"""
# prepare the data
ax_cols = [manager_col]
if other_cols is not None:
for oc in other_cols:
ax_cols.append(oc)
df1 = data[data.columns[ax_cols]]
df1 = df1.ix[::, ::-1]
# box charts are so easy
f = plt.figure(figsize=kwargs.pop('figsize', (8, 6)))
ax = f.add_subplot(111)
ax = df1.boxplot(grid=True, vert=False)
# pretty it up a little bit
f.suptitle(kwargs.pop('title', 'Return Distribution'))
ax_t = ax.get_xticks()
ax.set_xticklabels(['{:0.1f}%'.format(x * 100) for x in ax_t])
return plt
|
mit
|
hwroitzsch/BikersLifeSaver
|
lib/python3.5/site-packages/numpy/linalg/linalg.py
|
32
|
75738
|
"""Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([6, 6])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if not (0 <= row_axis < nd and 0 <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
# cost1 = cost((AB)C)
cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
# cost2 = cost((AB)C)
cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
|
mit
|
RPGOne/Skynet
|
pactools-master/pactools/tests/test_comodulogram.py
|
1
|
5312
|
from functools import partial
import numpy as np
import matplotlib.pyplot as plt
from pactools.dar_model import AR, DAR, HAR, StableDAR
from pactools.utils.testing import assert_equal
from pactools.utils.testing import assert_raises, assert_array_equal
from pactools.utils.testing import assert_true, assert_array_almost_equal
from pactools.comodulogram import Comodulogram
from pactools.comodulogram import ALL_PAC_METRICS, BICOHERENCE_PAC_METRICS
from pactools.simulate_pac import simulate_pac
# Parameters used for the simulated signal in the test
low_fq_range = [1., 3., 5., 7.]
high_fq_range = [25., 50., 75.]
n_low = len(low_fq_range)
n_high = len(high_fq_range)
high_fq = high_fq_range[1]
low_fq = low_fq_range[1]
n_points = 1024
fs = 200.
signal = simulate_pac(n_points=n_points, fs=fs, high_fq=high_fq, low_fq=low_fq,
low_fq_width=1., noise_level=0.1, random_state=0)
signal_copy = signal.copy()
class ComodTest(Comodulogram):
# A comodulogram call with default params used for testing
def __init__(self, fs=fs, low_fq_range=low_fq_range, low_fq_width=1.,
high_fq_range=high_fq_range, high_fq_width='auto',
method='tort', n_surrogates=0, vmin=None, vmax=None,
progress_bar=False, ax_special=None, minimum_shift=1.0,
random_state=0, coherence_params=dict(), low_fq_width_2=4.0):
super(ComodTest, self).__init__(
fs=fs, low_fq_range=low_fq_range, low_fq_width=low_fq_width,
high_fq_range=high_fq_range, high_fq_width=high_fq_width,
method=method, n_surrogates=n_surrogates, vmin=vmin, vmax=vmax,
progress_bar=progress_bar, ax_special=ax_special,
minimum_shift=minimum_shift, random_state=random_state,
coherence_params=coherence_params, low_fq_width_2=low_fq_width_2)
def fast_comod(low_sig=signal, high_sig=None, mask=None, *args, **kwargs):
return ComodTest(*args, **kwargs).fit(low_sig=low_sig, high_sig=high_sig,
mask=mask).comod_
def test_input_checking():
# test that we have a ValueError for bad parameters
func = partial(fast_comod, method='wrong')
assert_raises(ValueError, func)
func = partial(fast_comod, fs='wrong')
assert_raises(ValueError, func)
func = partial(fast_comod, low_sig='wrong')
assert_raises(ValueError, func)
func = partial(fast_comod, high_sig='wrong')
assert_raises(ValueError, func)
def test_different_dimension_in_input():
# Test that 1D or 2D signals are accepted, but not 3D
for dim in [
(4, -1),
(-1, ),
(1, -1),
]:
fast_comod(signal.reshape(*dim))
dim = (2, 2, -1)
assert_raises(ValueError, fast_comod, signal.reshape(*dim))
def test_high_sig_identical():
# Test that we have the same result with high_sig=low_sig and high_sig=None
for method in ALL_PAC_METRICS:
if method in BICOHERENCE_PAC_METRICS:
continue
comod_0 = fast_comod(method=method)
comod_1 = fast_comod(high_sig=signal, method=method)
assert_array_equal(comod_0, comod_1)
def test_comod_correct_maximum():
# Test that the PAC is maximum at the correct location in the comodulogram
for method in ALL_PAC_METRICS:
est = ComodTest(method=method, progress_bar=True).fit(signal)
comod = est.comod_
# test the shape of the comodulogram
assert_array_equal(comod.shape, (n_low, n_high))
# the bicoherence metrics fail this test with current parameters
if method in BICOHERENCE_PAC_METRICS or method == 'jiang':
continue
low_fq_0, high_fq_0, max_pac = est.get_maximum_pac()
assert_equal(low_fq_0, low_fq)
assert_equal(high_fq_0, high_fq)
assert_equal(max_pac, comod.max())
assert_true(np.all(comod > 0))
def test_empty_mask():
# Test that using an empty mask does not change the results
mask = np.zeros(n_points, dtype=bool)
for method in ALL_PAC_METRICS:
comod_0 = fast_comod(mask=mask, method=method)
comod_1 = fast_comod(low_sig=signal[~mask], method=method)
assert_array_almost_equal(comod_0, comod_1, decimal=7)
def test_comodulogram_dar_models():
# Smoke test with DAR models
for klass in (AR, DAR, HAR, StableDAR):
if klass is StableDAR:
model = klass(ordar=10, ordriv=2, iter_newton=10)
else:
model = klass(ordar=10, ordriv=2)
comod = fast_comod(method=model)
assert_true(~np.any(np.isnan(comod)))
def test_plot_comodulogram():
# Smoke test with the standard plotting function
est = ComodTest().fit(signal)
est.plot()
# Smoke test with the special plotting functions
ax = plt.figure().gca()
for method in ALL_PAC_METRICS:
est = ComodTest(low_fq_range=[low_fq], method=method,
ax_special=ax).fit(signal)
# Test that it raises an error if ax_special is not None and low_fq_range
# has more than one element
func = partial(fast_comod, ax_special=ax)
assert_raises(ValueError, func)
plt.close('all')
def test_signal_unchanged():
# Test that signal has not been changed during the test
assert_array_equal(signal_copy, signal)
|
bsd-3-clause
|
timahutchinson/elg-templates
|
python/elg_templates/chi2vz.py
|
1
|
2189
|
from os import environ
from os.path import exists, join
import numpy as np
from scipy import linalg
from astropy.io import fits
import matplotlib.pyplot as p
from redmonster.physics.misc import poly_array
plate = 8123
mjd = 56931
npoly = 4
w = 10
rmpath = join( environ['REDMONSTER_SPECTRO_REDUX'], environ['RUN2D'], environ['REDMONSTER_VER'], '%s' % plate, 'redmonster-%s-%s.fits' % (plate,mjd) )
platepath = join( environ['BOSS_SPECTRO_REDUX'], environ['RUN2D'], '%s' % plate, 'spPlate-%s-%s.fits' % (plate,mjd) )
hdu = fits.open(rmpath)
hduplate = fits.open(platepath)
npix = hdu[2].data.shape[-1]
nfibers = hdu[0].header['NFIBERS']
wave = 10**(hdu[0].header['COEFF0'] + np.arange(npix)*hdu[0].header['COEFF1'])
pwave = 10**(3.0 + np.arange(10000)*0.0001)
count = np.zeros(10000)
#chi2 = np.zeros(10000)
chi2 = []
z = []
for i in xrange(1000):
print i
if hdu[1].data.ZWARNING[i] == 0:
if hdu[1].data.CLASS1[i] == 'ssp_galaxy_glob':
z.append(hdu[1].data.Z1[i])
spec = hduplate[0].data[i]
ivar = hduplate[1].data[i]
thiswave = wave / (1+hdu[1].data.Z1[i])
#pT = poly_array(npoly, npix)
#ninv = np.diag(ivar)
#a = linalg.solve( np.dot(np.dot(pT,ninv),np.transpose(pT)), np.dot(np.dot(pT,ninv), spec) )
#pmod = np.dot(np.transpose(pT),a)
#for j in xrange(w/2,npix-w/2):
#pmodchi2 = np.sum(((spec[j-w/2:j+w/2+1]-pmod[j-w/2:j+w/2+1])**2)*ivar[j-w/2:j+w/2+1]) / float(w)
j = np.abs(thiswave-3950).argmin()
w = 500
tmodchi2 = np.sum(((spec[j-w/2:j+w/2+1]-hdu[2].data[i,0][j-w/2:j+w/2+1])**2)*ivar[j-w/2:j+w/2+1]) / float(w)
#ind = np.abs(thiswave[j] - pwave).argmin()
#chi2[ind] += (pmodchi2 - tmodchi2)
#count[ind] += 1
chi2.append(tmodchi2)
#y = chi2/count
#for i,val in enumerate(y):
# if np.isnan(val): y[i] = 0
#p.plot(pwave, y)
#p.axis([pwave[0], pwave[-1], min(y)*2, max(y)*1.2])
p.scatter(z, chi2, marker='.')
p.axis([0,1.4,0,3])
p.xlabel(r'$z$', size=14)
p.ylabel(r'$\chi_\mathrm{Ca H&K}^2$', size=14)
p.savefig('../../plots/chi2vz.pdf')
|
mit
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/matplotlib/ticker.py
|
4
|
78990
|
"""
Tick locating and formatting
============================
This module contains classes to support completely configurable tick
locating and formatting. Although the locators know nothing about major
or minor ticks, they are used by the Axis class to support major and
minor tick locating and formatting. Generic tick locators and
formatters are provided, as well as domain specific custom ones.
Default Formatter
-----------------
The default formatter identifies when the x-data being
plotted is a small range on top of a large off set. To
reduce the chances that the ticklabels overlap the ticks
are labeled as deltas from a fixed offset. For example::
ax.plot(np.arange(2000, 2010), range(10))
will have tick of 0-9 with an offset of +2e3. If this
is not desired turn off the use of the offset on the default
formatter::
ax.get_xaxis().get_major_formatter().set_useOffset(False)
set the rcParam ``axes.formatter.useoffset=False`` to turn it off
globally, or set a different formatter.
Tick locating
-------------
The Locator class is the base class for all tick locators. The locators
handle autoscaling of the view limits based on the data limits, and the
choosing of tick locations. A useful semi-automatic tick locator is
MultipleLocator. You initialize this with a base, e.g., 10, and it
picks axis limits and ticks that are multiples of your base.
The Locator subclasses defined here are
:class:`NullLocator`
No ticks
:class:`FixedLocator`
Tick locations are fixed
:class:`IndexLocator`
locator for index plots (e.g., where x = range(len(y)))
:class:`LinearLocator`
evenly spaced ticks from min to max
:class:`LogLocator`
logarithmically ticks from min to max
:class:`SymmetricalLogLocator`
locator for use with with the symlog norm, works like the
`LogLocator` for the part outside of the threshold and add 0 if
inside the limits
:class:`MultipleLocator`
ticks and range are a multiple of base;
either integer or float
:class:`OldAutoLocator`
choose a MultipleLocator and dyamically reassign it for
intelligent ticking during navigation
:class:`MaxNLocator`
finds up to a max number of ticks at nice locations
:class:`AutoLocator`
:class:`MaxNLocator` with simple defaults. This is the default
tick locator for most plotting.
:class:`AutoMinorLocator`
locator for minor ticks when the axis is linear and the
major ticks are uniformly spaced. It subdivides the major
tick interval into a specified number of minor intervals,
defaulting to 4 or 5 depending on the major interval.
:class:`LogitLocator`
Locator for logit scaling.
There are a number of locators specialized for date locations - see
the dates module
You can define your own locator by deriving from Locator. You must
override the __call__ method, which returns a sequence of locations,
and you will probably want to override the autoscale method to set the
view limits from the data limits.
If you want to override the default locator, use one of the above or a
custom locator and pass it to the x or y axis instance. The relevant
methods are::
ax.xaxis.set_major_locator( xmajorLocator )
ax.xaxis.set_minor_locator( xminorLocator )
ax.yaxis.set_major_locator( ymajorLocator )
ax.yaxis.set_minor_locator( yminorLocator )
The default minor locator is the NullLocator, e.g., no minor ticks on by
default.
Tick formatting
---------------
Tick formatting is controlled by classes derived from Formatter. The
formatter operates on a single tick value and returns a string to the
axis.
:class:`NullFormatter`
No labels on the ticks
:class:`IndexFormatter`
Set the strings from a list of labels
:class:`FixedFormatter`
Set the strings manually for the labels
:class:`FuncFormatter`
User defined function sets the labels
:class:`StrMethodFormatter`
Use string `format` method
:class:`FormatStrFormatter`
Use an old-style sprintf format string
:class:`ScalarFormatter`
Default formatter for scalars: autopick the format string
:class:`LogFormatter`
Formatter for log axes
:class:`LogFormatterExponent`
Format values for log axis using ``exponent = log_base(value)``.
:class:`LogFormatterMathtext`
Format values for log axis using ``exponent = log_base(value)``
using Math text.
:class:`LogFormatterSciNotation`
Format values for log axis using scientific notation.
:class:`LogitFormatter`
Probability formatter.
You can derive your own formatter from the Formatter base class by
simply overriding the ``__call__`` method. The formatter class has
access to the axis view and data limits.
To control the major and minor tick label formats, use one of the
following methods::
ax.xaxis.set_major_formatter( xmajorFormatter )
ax.xaxis.set_minor_formatter( xminorFormatter )
ax.yaxis.set_major_formatter( ymajorFormatter )
ax.yaxis.set_minor_formatter( yminorFormatter )
See :ref:`pylab_examples-major_minor_demo1` for an example of setting
major and minor ticks. See the :mod:`matplotlib.dates` module for
more information and examples of using date locators and formatters.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import decimal
import itertools
import locale
import math
import numpy as np
from matplotlib import rcParams
from matplotlib import cbook
from matplotlib import transforms as mtransforms
from matplotlib.cbook import mplDeprecation
import warnings
if six.PY3:
long = int
# Work around numpy/numpy#6127.
def _divmod(x, y):
if isinstance(x, np.generic):
x = x.item()
if isinstance(y, np.generic):
y = y.item()
return six.moves.builtins.divmod(x, y)
def _mathdefault(s):
return '\\mathdefault{%s}' % s
class _DummyAxis(object):
def __init__(self, minpos=0):
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self._minpos = minpos
def get_view_interval(self):
return self.viewLim.intervalx
def set_view_interval(self, vmin, vmax):
self.viewLim.intervalx = vmin, vmax
def get_minpos(self):
return self._minpos
def get_data_interval(self):
return self.dataLim.intervalx
def set_data_interval(self, vmin, vmax):
self.dataLim.intervalx = vmin, vmax
def get_tick_space(self):
# Just use the long-standing default of nbins==9
return 9
class TickHelper(object):
axis = None
def set_axis(self, axis):
self.axis = axis
def create_dummy_axis(self, **kwargs):
if self.axis is None:
self.axis = _DummyAxis(**kwargs)
def set_view_interval(self, vmin, vmax):
self.axis.set_view_interval(vmin, vmax)
def set_data_interval(self, vmin, vmax):
self.axis.set_data_interval(vmin, vmax)
def set_bounds(self, vmin, vmax):
self.set_view_interval(vmin, vmax)
self.set_data_interval(vmin, vmax)
class Formatter(TickHelper):
"""
Create a string based on a tick value and location.
"""
# some classes want to see all the locs to help format
# individual ones
locs = []
def __call__(self, x, pos=None):
"""
Return the format for tick value `x` at position pos.
``pos=None`` indicates an unspecified location.
"""
raise NotImplementedError('Derived must override')
def format_data(self, value):
"""
Returns the full string representation of the value with the
position unspecified.
"""
return self.__call__(value)
def format_data_short(self, value):
"""
Return a short string version of the tick value.
Defaults to the position-independent long value.
"""
return self.format_data(value)
def get_offset(self):
return ''
def set_locs(self, locs):
self.locs = locs
def fix_minus(self, s):
"""
Some classes may want to replace a hyphen for minus with the
proper unicode symbol (U+2212) for typographical correctness.
The default is to not replace it.
Note, if you use this method, e.g., in :meth:`format_data` or
call, you probably don't want to use it for
:meth:`format_data_short` since the toolbar uses this for
interactive coord reporting and I doubt we can expect GUIs
across platforms will handle the unicode correctly. So for
now the classes that override :meth:`fix_minus` should have an
explicit :meth:`format_data_short` method
"""
return s
class IndexFormatter(Formatter):
"""
Format the position x to the nearest i-th label where i=int(x+0.5)
"""
def __init__(self, labels):
self.labels = labels
self.n = len(labels)
def __call__(self, x, pos=None):
"""
Return the format for tick value `x` at position pos.
The position is ignored and the value is rounded to the nearest
integer, which is used to look up the label.
"""
i = int(x + 0.5)
if i < 0 or i >= self.n:
return ''
else:
return self.labels[i]
class NullFormatter(Formatter):
"""
Always return the empty string.
"""
def __call__(self, x, pos=None):
"""
Returns an empty string for all inputs.
"""
return ''
class FixedFormatter(Formatter):
"""
Return fixed strings for tick labels based only on position, not
value.
"""
def __init__(self, seq):
"""
Set the sequence of strings that will be used for labels.
"""
self.seq = seq
self.offset_string = ''
def __call__(self, x, pos=None):
"""
Returns the label that matches the position regardless of the
value.
For positions ``pos < len(seq)``, return `seq[i]` regardless of
`x`. Otherwise return empty string. `seq` is the sequence of
strings that this object was initialized with.
"""
if pos is None or pos >= len(self.seq):
return ''
else:
return self.seq[pos]
def get_offset(self):
return self.offset_string
def set_offset_string(self, ofs):
self.offset_string = ofs
class FuncFormatter(Formatter):
"""
Use a user-defined function for formatting.
The function should take in two inputs (a tick value ``x`` and a
position ``pos``), and return a string containing the corresponding
tick label.
"""
def __init__(self, func):
self.func = func
def __call__(self, x, pos=None):
"""
Return the value of the user defined function.
`x` and `pos` are passed through as-is.
"""
return self.func(x, pos)
class FormatStrFormatter(Formatter):
"""
Use an old-style ('%' operator) format string to format the tick.
The format string should have a single variable format (%) in it.
It will be applied to the value (not the position) of the tick.
"""
def __init__(self, fmt):
self.fmt = fmt
def __call__(self, x, pos=None):
"""
Return the formatted label string.
Only the value `x` is formatted. The position is ignored.
"""
return self.fmt % x
class StrMethodFormatter(Formatter):
"""
Use a new-style format string (as used by `str.format()`)
to format the tick.
The field used for the value must be labeled `x` and the field used
for the position must be labeled `pos`.
"""
def __init__(self, fmt):
self.fmt = fmt
def __call__(self, x, pos=None):
"""
Return the formatted label string.
`x` and `pos` are passed to `str.format` as keyword arguments
with those exact names.
"""
return self.fmt.format(x=x, pos=pos)
class OldScalarFormatter(Formatter):
"""
Tick location is a plain old number.
"""
def __call__(self, x, pos=None):
"""
Return the format for tick val `x` based on the width of the
axis.
The position `pos` is ignored.
"""
xmin, xmax = self.axis.get_view_interval()
d = abs(xmax - xmin)
return self.pprint_val(x, d)
def pprint_val(self, x, d):
"""
Formats the value `x` based on the size of the axis range `d`.
"""
#if the number is not too big and it's an int, format it as an
#int
if abs(x) < 1e4 and x == int(x):
return '%d' % x
if d < 1e-2:
fmt = '%1.3e'
elif d < 1e-1:
fmt = '%1.3f'
elif d > 1e5:
fmt = '%1.1e'
elif d > 10:
fmt = '%1.1f'
elif d > 1:
fmt = '%1.2f'
else:
fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup) == 2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' % (mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class ScalarFormatter(Formatter):
"""
Format tick values as a number.
Tick value is interpreted as a plain old number. If
``useOffset==True`` and the data range is much smaller than the data
average, then an offset will be determined such that the tick labels
are meaningful. Scientific notation is used for ``data < 10^-n`` or
``data >= 10^m``, where ``n`` and ``m`` are the power limits set
using ``set_powerlimits((n,m))``. The defaults for these are
controlled by the ``axes.formatter.limits`` rc parameter.
"""
def __init__(self, useOffset=None, useMathText=None, useLocale=None):
# useOffset allows plotting small data ranges with large offsets: for
# example: [1+1e-9,1+2e-9,1+3e-9] useMathText will render the offset
# and scientific notation in mathtext
if useOffset is None:
useOffset = rcParams['axes.formatter.useoffset']
self._offset_threshold = rcParams['axes.formatter.offset_threshold']
self.set_useOffset(useOffset)
self._usetex = rcParams['text.usetex']
if useMathText is None:
useMathText = rcParams['axes.formatter.use_mathtext']
self._useMathText = useMathText
self.orderOfMagnitude = 0
self.format = ''
self._scientific = True
self._powerlimits = rcParams['axes.formatter.limits']
if useLocale is None:
useLocale = rcParams['axes.formatter.use_locale']
self._useLocale = useLocale
def get_useOffset(self):
return self._useOffset
def set_useOffset(self, val):
if val in [True, False]:
self.offset = 0
self._useOffset = val
else:
self._useOffset = False
self.offset = val
useOffset = property(fget=get_useOffset, fset=set_useOffset)
def get_useLocale(self):
return self._useLocale
def set_useLocale(self, val):
if val is None:
self._useLocale = rcParams['axes.formatter.use_locale']
else:
self._useLocale = val
useLocale = property(fget=get_useLocale, fset=set_useLocale)
def fix_minus(self, s):
"""
Replace hyphens with a unicode minus.
"""
if rcParams['text.usetex'] or not rcParams['axes.unicode_minus']:
return s
else:
return s.replace('-', '\u2212')
def __call__(self, x, pos=None):
"""
Return the format for tick value `x` at position `pos`.
"""
if len(self.locs) == 0:
return ''
else:
s = self.pprint_val(x)
return self.fix_minus(s)
def set_scientific(self, b):
"""
Turn scientific notation on or off.
.. seealso:: Method :meth:`set_powerlimits`
"""
self._scientific = bool(b)
def set_powerlimits(self, lims):
"""
Sets size thresholds for scientific notation.
``lims`` is a two-element sequence containing the powers of 10
that determine the switchover threshold. Numbers below
``10**lims[0]`` and above ``10**lims[1]`` will be displayed in
scientific notation.
For example, ``formatter.set_powerlimits((-3, 4))`` sets the
pre-2007 default in which scientific notation is used for
numbers less than 1e-3 or greater than 1e4.
.. seealso:: Method :meth:`set_scientific`
"""
if len(lims) != 2:
raise ValueError("'lims' must be a sequence of length 2")
self._powerlimits = lims
def format_data_short(self, value):
"""
Return a short formatted string representation of a number.
"""
if self._useLocale:
return locale.format_string('%-12g', (value,))
else:
return '%-12g' % value
def format_data(self, value):
"""
Return a formatted string representation of a number.
"""
if self._useLocale:
s = locale.format_string('%1.10e', (value,))
else:
s = '%1.10e' % value
s = self._formatSciNotation(s)
return self.fix_minus(s)
def get_offset(self):
"""
Return scientific notation, plus offset.
"""
if len(self.locs) == 0:
return ''
s = ''
if self.orderOfMagnitude or self.offset:
offsetStr = ''
sciNotStr = ''
if self.offset:
offsetStr = self.format_data(self.offset)
if self.offset > 0:
offsetStr = '+' + offsetStr
if self.orderOfMagnitude:
if self._usetex or self._useMathText:
sciNotStr = self.format_data(10 ** self.orderOfMagnitude)
else:
sciNotStr = '1e%d' % self.orderOfMagnitude
if self._useMathText:
if sciNotStr != '':
sciNotStr = r'\times%s' % _mathdefault(sciNotStr)
s = ''.join(('$', sciNotStr, _mathdefault(offsetStr), '$'))
elif self._usetex:
if sciNotStr != '':
sciNotStr = r'\times%s' % sciNotStr
s = ''.join(('$', sciNotStr, offsetStr, '$'))
else:
s = ''.join((sciNotStr, offsetStr))
return self.fix_minus(s)
def set_locs(self, locs):
"""
Set the locations of the ticks.
"""
self.locs = locs
if len(self.locs) > 0:
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax - vmin)
if self._useOffset:
self._compute_offset()
self._set_orderOfMagnitude(d)
self._set_format(vmin, vmax)
def _compute_offset(self):
locs = self.locs
if locs is None or not len(locs):
self.offset = 0
return
# Restrict to visible ticks.
vmin, vmax = sorted(self.axis.get_view_interval())
locs = np.asarray(locs)
locs = locs[(vmin <= locs) & (locs <= vmax)]
if not len(locs):
self.offset = 0
return
lmin, lmax = locs.min(), locs.max()
# Only use offset if there are at least two ticks and every tick has
# the same sign.
if lmin == lmax or lmin <= 0 <= lmax:
self.offset = 0
return
# min, max comparing absolute values (we want division to round towards
# zero so we work on absolute values).
abs_min, abs_max = sorted([abs(float(lmin)), abs(float(lmax))])
sign = math.copysign(1, lmin)
# What is the smallest power of ten such that abs_min and abs_max are
# equal up to that precision?
# Note: Internally using oom instead of 10 ** oom avoids some numerical
# accuracy issues.
oom_max = np.ceil(math.log10(abs_max))
oom = 1 + next(oom for oom in itertools.count(oom_max, -1)
if abs_min // 10 ** oom != abs_max // 10 ** oom)
if (abs_max - abs_min) / 10 ** oom <= 1e-2:
# Handle the case of straddling a multiple of a large power of ten
# (relative to the span).
# What is the smallest power of ten such that abs_min and abs_max
# are no more than 1 apart at that precision?
oom = 1 + next(oom for oom in itertools.count(oom_max, -1)
if abs_max // 10 ** oom - abs_min // 10 ** oom > 1)
# Only use offset if it saves at least _offset_threshold digits.
n = self._offset_threshold - 1
self.offset = (sign * (abs_max // 10 ** oom) * 10 ** oom
if abs_max // 10 ** oom >= 10**n
else 0)
def _set_orderOfMagnitude(self, range):
# if scientific notation is to be used, find the appropriate exponent
# if using an numerical offset, find the exponent after applying the
# offset
if not self._scientific:
self.orderOfMagnitude = 0
return
locs = np.absolute(self.locs)
if self.offset:
oom = math.floor(math.log10(range))
else:
if locs[0] > locs[-1]:
val = locs[0]
else:
val = locs[-1]
if val == 0:
oom = 0
else:
oom = math.floor(math.log10(val))
if oom <= self._powerlimits[0]:
self.orderOfMagnitude = oom
elif oom >= self._powerlimits[1]:
self.orderOfMagnitude = oom
else:
self.orderOfMagnitude = 0
def _set_format(self, vmin, vmax):
# set the format string to format all the ticklabels
if len(self.locs) < 2:
# Temporarily augment the locations with the axis end points.
_locs = list(self.locs) + [vmin, vmax]
else:
_locs = self.locs
locs = (np.asarray(_locs) - self.offset) / 10. ** self.orderOfMagnitude
loc_range = np.ptp(locs)
# Curvilinear coordinates can yield two identical points.
if loc_range == 0:
loc_range = np.max(np.abs(locs))
# Both points might be zero.
if loc_range == 0:
loc_range = 1
if len(self.locs) < 2:
# We needed the end points only for the loc_range calculation.
locs = locs[:-2]
loc_range_oom = int(math.floor(math.log10(loc_range)))
# first estimate:
sigfigs = max(0, 3 - loc_range_oom)
# refined estimate:
thresh = 1e-3 * 10 ** loc_range_oom
while sigfigs >= 0:
if np.abs(locs - np.round(locs, decimals=sigfigs)).max() < thresh:
sigfigs -= 1
else:
break
sigfigs += 1
self.format = '%1.' + str(sigfigs) + 'f'
if self._usetex:
self.format = '$%s$' % self.format
elif self._useMathText:
self.format = '$%s$' % _mathdefault(self.format)
def pprint_val(self, x):
xp = (x - self.offset) / (10. ** self.orderOfMagnitude)
if np.absolute(xp) < 1e-8:
xp = 0
if self._useLocale:
return locale.format_string(self.format, (xp,))
else:
return self.format % xp
def _formatSciNotation(self, s):
# transform 1e+004 into 1e4, for example
if self._useLocale:
decimal_point = locale.localeconv()['decimal_point']
positive_sign = locale.localeconv()['positive_sign']
else:
decimal_point = '.'
positive_sign = '+'
tup = s.split('e')
try:
significand = tup[0].rstrip('0').rstrip(decimal_point)
sign = tup[1][0].replace(positive_sign, '')
exponent = tup[1][1:].lstrip('0')
if self._useMathText or self._usetex:
if significand == '1' and exponent != '':
# reformat 1x10^y as 10^y
significand = ''
if exponent:
exponent = '10^{%s%s}' % (sign, exponent)
if significand and exponent:
return r'%s{\times}%s' % (significand, exponent)
else:
return r'%s%s' % (significand, exponent)
else:
s = ('%se%s%s' % (significand, sign, exponent)).rstrip('e')
return s
except IndexError:
return s
class LogFormatter(Formatter):
"""
Base class for formatting ticks on a log or symlog scale.
It may be instantiated directly, or subclassed.
Parameters
----------
base : float, optional, default: 10.
Base of the logarithm used in all calculations.
labelOnlyBase : bool, optional, default: False
If True, label ticks only at integer powers of base.
This is normally True for major ticks and False for
minor ticks.
minor_thresholds : (subset, all), optional, default: (1, 0.4)
If labelOnlyBase is False, these two numbers control
the labeling of ticks that are not at integer powers of
base; normally these are the minor ticks. The controlling
parameter is the log of the axis data range. In the typical
case where base is 10 it is the number of decades spanned
by the axis, so we can call it 'numdec'. If ``numdec <= all``,
all minor ticks will be labeled. If ``all < numdec <= subset``,
then only a subset of minor ticks will be labeled, so as to
avoid crowding. If ``numdec > subset`` then no minor ticks will
be labeled.
linthresh : None or float, optional, default: None
If a symmetric log scale is in use, its ``linthresh``
parameter must be supplied here.
Notes
-----
The `set_locs` method must be called to enable the subsetting
logic controlled by the ``minor_thresholds`` parameter.
In some cases such as the colorbar, there is no distinction between
major and minor ticks; the tick locations might be set manually,
or by a locator that puts ticks at integer powers of base and
at intermediate locations. For this situation, disable the
minor_thresholds logic by using ``minor_thresholds=(np.inf, np.inf)``,
so that all ticks will be labeled.
To disable labeling of minor ticks when 'labelOnlyBase' is False,
use ``minor_thresholds=(0, 0)``. This is the default for the
"classic" style.
Examples
--------
To label a subset of minor ticks when the view limits span up
to 2 decades, and all of the ticks when zoomed in to 0.5 decades
or less, use ``minor_thresholds=(2, 0.5)``.
To label all minor ticks when the view limits span up to 1.5
decades, use ``minor_thresholds=(1.5, 1.5)``.
"""
def __init__(self, base=10.0, labelOnlyBase=False,
minor_thresholds=None,
linthresh=None):
self._base = float(base)
self.labelOnlyBase = labelOnlyBase
if minor_thresholds is None:
if rcParams['_internal.classic_mode']:
minor_thresholds = (0, 0)
else:
minor_thresholds = (1, 0.4)
self.minor_thresholds = minor_thresholds
self._sublabels = None
self._linthresh = linthresh
def base(self, base):
"""
change the `base` for labeling.
.. warning::
Should always match the base used for :class:`LogLocator`
"""
self._base = base
def label_minor(self, labelOnlyBase):
"""
Switch minor tick labeling on or off.
Parameters
----------
labelOnlyBase : bool
If True, label ticks only at integer powers of base.
"""
self.labelOnlyBase = labelOnlyBase
def set_locs(self, locs=None):
"""
Use axis view limits to control which ticks are labeled.
The ``locs`` parameter is ignored in the present algorithm.
"""
if np.isinf(self.minor_thresholds[0]):
self._sublabels = None
return
# Handle symlog case:
linthresh = self._linthresh
if linthresh is None:
try:
linthresh = self.axis.get_transform().linthresh
except AttributeError:
pass
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
if linthresh is None and vmin <= 0:
# It's probably a colorbar with
# a format kwarg setting a LogFormatter in the manner
# that worked with 1.5.x, but that doesn't work now.
self._sublabels = set((1,)) # label powers of base
return
b = self._base
if linthresh is not None: # symlog
# Only compute the number of decades in the logarithmic part of the
# axis
numdec = 0
if vmin < -linthresh:
rhs = min(vmax, -linthresh)
numdec += math.log(vmin / rhs) / math.log(b)
if vmax > linthresh:
lhs = max(vmin, linthresh)
numdec += math.log(vmax / lhs) / math.log(b)
else:
vmin = math.log(vmin) / math.log(b)
vmax = math.log(vmax) / math.log(b)
numdec = abs(vmax - vmin)
if numdec > self.minor_thresholds[0]:
# Label only bases
self._sublabels = set((1,))
elif numdec > self.minor_thresholds[1]:
# Add labels between bases at log-spaced coefficients;
# include base powers in case the locations include
# "major" and "minor" points, as in colorbar.
c = np.logspace(0, 1, int(b)//2 + 1, base=b)
self._sublabels = set(np.round(c))
# For base 10, this yields (1, 2, 3, 4, 6, 10).
else:
# Label all integer multiples of base**n.
self._sublabels = set(np.arange(1, b + 1))
def _num_to_string(self, x, vmin, vmax):
if x > 10000:
s = '%1.0e' % x
elif x < 1:
s = '%1.0e' % x
else:
s = self.pprint_val(x, vmax - vmin)
return s
def __call__(self, x, pos=None):
"""
Return the format for tick val `x`.
"""
if x == 0.0: # Symlog
return '0'
sign = np.sign(x)
x = abs(x)
b = self._base
# only label the decades
fx = math.log(x) / math.log(b)
is_x_decade = is_close_to_int(fx)
exponent = np.round(fx) if is_x_decade else np.floor(fx)
coeff = np.round(x / b ** exponent)
if self.labelOnlyBase and not is_x_decade:
return ''
if self._sublabels is not None and coeff not in self._sublabels:
return ''
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
s = self._num_to_string(x, vmin, vmax)
return self.fix_minus(s)
def format_data(self, value):
b = self.labelOnlyBase
self.labelOnlyBase = False
value = cbook.strip_math(self.__call__(value))
self.labelOnlyBase = b
return value
def format_data_short(self, value):
"""
Return a short formatted string representation of a number.
"""
return '%-12g' % value
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x) < 1e4 and x == int(x):
return '%d' % x
if d < 1e-2:
fmt = '%1.3e'
elif d < 1e-1:
fmt = '%1.3f'
elif d > 1e5:
fmt = '%1.1e'
elif d > 10:
fmt = '%1.1f'
elif d > 1:
fmt = '%1.2f'
else:
fmt = '%1.3f'
s = fmt % x
tup = s.split('e')
if len(tup) == 2:
mantissa = tup[0].rstrip('0').rstrip('.')
exponent = int(tup[1])
if exponent:
s = '%se%d' % (mantissa, exponent)
else:
s = mantissa
else:
s = s.rstrip('0').rstrip('.')
return s
class LogFormatterExponent(LogFormatter):
"""
Format values for log axis using ``exponent = log_base(value)``.
"""
def _num_to_string(self, x, vmin, vmax):
fx = math.log(x) / math.log(self._base)
if abs(fx) > 10000:
s = '%1.0g' % fx
elif abs(fx) < 1:
s = '%1.0g' % fx
else:
fd = math.log(vmax - vmin) / math.log(self._base)
s = self.pprint_val(fx, fd)
return s
class LogFormatterMathtext(LogFormatter):
"""
Format values for log axis using ``exponent = log_base(value)``.
"""
def _non_decade_format(self, sign_string, base, fx, usetex):
'Return string for non-decade locations'
if usetex:
return (r'$%s%s^{%.2f}$') % (sign_string, base, fx)
else:
return ('$%s$' % _mathdefault('%s%s^{%.2f}' %
(sign_string, base, fx)))
def __call__(self, x, pos=None):
"""
Return the format for tick value `x`.
The position `pos` is ignored.
"""
usetex = rcParams['text.usetex']
if x == 0: # Symlog
if usetex:
return '$0$'
else:
return '$%s$' % _mathdefault('0')
sign_string = '-' if x < 0 else ''
x = abs(x)
b = self._base
# only label the decades
fx = math.log(x) / math.log(b)
is_x_decade = is_close_to_int(fx)
exponent = np.round(fx) if is_x_decade else np.floor(fx)
coeff = np.round(x / b ** exponent)
if self.labelOnlyBase and not is_x_decade:
return ''
if self._sublabels is not None and coeff not in self._sublabels:
return ''
# use string formatting of the base if it is not an integer
if b % 1 == 0.0:
base = '%d' % b
else:
base = '%s' % b
if not is_x_decade:
return self._non_decade_format(sign_string, base, fx, usetex)
else:
if usetex:
return (r'$%s%s^{%d}$') % (sign_string,
base,
nearest_long(fx))
else:
return ('$%s$' % _mathdefault(
'%s%s^{%d}' %
(sign_string, base, nearest_long(fx))))
class LogFormatterSciNotation(LogFormatterMathtext):
"""
Format values following scientific notation in a logarithmic axis
"""
def _non_decade_format(self, sign_string, base, fx, usetex):
'Return string for non-decade locations'
b = float(base)
exponent = math.floor(fx)
coeff = b ** fx / b ** exponent
if is_close_to_int(coeff):
coeff = nearest_long(coeff)
if usetex:
return (r'$%s%g\times%s^{%d}$') % \
(sign_string, coeff, base, exponent)
else:
return ('$%s$' % _mathdefault(r'%s%g\times%s^{%d}' %
(sign_string, coeff, base, exponent)))
class LogitFormatter(Formatter):
"""
Probability formatter (using Math text).
"""
def __call__(self, x, pos=None):
s = ''
if 0.01 <= x <= 0.99:
s = '{:.2f}'.format(x)
elif x < 0.01:
if is_decade(x):
s = '$10^{{{:.0f}}}$'.format(np.log10(x))
else:
s = '${:.5f}$'.format(x)
else: # x > 0.99
if is_decade(1-x):
s = '$1-10^{{{:.0f}}}$'.format(np.log10(1-x))
else:
s = '$1-{:.5f}$'.format(1-x)
return s
def format_data_short(self, value):
'return a short formatted string representation of a number'
return '%-12g' % value
class EngFormatter(Formatter):
"""
Formats axis values using engineering prefixes to represent powers
of 1000, plus a specified unit, e.g., 10 MHz instead of 1e7.
`unit` is a string containing the abbreviated name of the unit,
suitable for use with single-letter representations of powers of
1000. For example, 'Hz' or 'm'.
`places` is the precision with which to display the number,
specified in digits after the decimal point (there will be between
one and three digits before the decimal point).
"""
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "\u03bc",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y"
}
def __init__(self, unit="", places=None):
self.unit = unit
self.places = places
def __call__(self, x, pos=None):
s = "%s%s" % (self.format_eng(x), self.unit)
return self.fix_minus(s)
def format_eng(self, num):
""" Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) # for self.places = 0
'0'
>>> format_eng(1000000) # for self.places = 1
'1.0 M'
>>> format_eng("-1e-6") # for self.places = 2
u'-1.00 \u03bc'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
dnum = decimal.Decimal(str(num))
sign = 1
if dnum < 0:
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
prefix = self.ENG_PREFIXES[int(pow10)]
mant = sign * dnum / (10 ** pow10)
if self.places is None:
format_str = "%g %s"
elif self.places == 0:
format_str = "%i %s"
elif self.places > 0:
format_str = ("%%.%if %%s" % self.places)
formatted = format_str % (mant, prefix)
formatted = formatted.strip()
if (self.unit != "") and (prefix == self.ENG_PREFIXES[0]):
formatted = formatted + " "
return formatted
class Locator(TickHelper):
"""
Determine the tick locations;
Note, you should not use the same locator between different
:class:`~matplotlib.axis.Axis` because the locator stores references to
the Axis data and view limits
"""
# Some automatic tick locators can generate so many ticks they
# kill the machine when you try and render them.
# This parameter is set to cause locators to raise an error if too
# many ticks are generated.
MAXTICKS = 1000
def tick_values(self, vmin, vmax):
"""
Return the values of the located ticks given **vmin** and **vmax**.
.. note::
To get tick locations with the vmin and vmax values defined
automatically for the associated :attr:`axis` simply call
the Locator instance::
>>> print((type(loc)))
<type 'Locator'>
>>> print((loc()))
[1, 2, 3, 4]
"""
raise NotImplementedError('Derived must override')
def set_params(self, **kwargs):
"""
Do nothing, and rase a warning. Any locator class not supporting the
set_params() function will call this.
"""
warnings.warn("'set_params()' not defined for locator of type " +
str(type(self)))
def __call__(self):
"""Return the locations of the ticks"""
# note: some locators return data limits, other return view limits,
# hence there is no *one* interface to call self.tick_values.
raise NotImplementedError('Derived must override')
def raise_if_exceeds(self, locs):
"""raise a RuntimeError if Locator attempts to create more than
MAXTICKS locs"""
if len(locs) >= self.MAXTICKS:
msg = ('Locator attempting to generate %d ticks from %s to %s: ' +
'exceeds Locator.MAXTICKS') % (len(locs), locs[0], locs[-1])
raise RuntimeError(msg)
return locs
def view_limits(self, vmin, vmax):
"""
select a scale for the range from vmin to vmax
Normally this method is overridden by subclasses to
change locator behaviour.
"""
return mtransforms.nonsingular(vmin, vmax)
def autoscale(self):
"""autoscale the view limits"""
return self.view_limits(*self.axis.get_view_interval())
def pan(self, numsteps):
"""Pan numticks (can be positive or negative)"""
ticks = self()
numticks = len(ticks)
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
if numticks > 2:
step = numsteps * abs(ticks[0] - ticks[1])
else:
d = abs(vmax - vmin)
step = numsteps * d / 6.
vmin += step
vmax += step
self.axis.set_view_interval(vmin, vmax, ignore=True)
def zoom(self, direction):
"Zoom in/out on axis; if direction is >0 zoom in, else zoom out"
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
interval = abs(vmax - vmin)
step = 0.1 * interval * direction
self.axis.set_view_interval(vmin + step, vmax - step, ignore=True)
def refresh(self):
"""refresh internal information based on current lim"""
pass
class IndexLocator(Locator):
"""
Place a tick on every multiple of some base number of points
plotted, e.g., on every 5th point. It is assumed that you are doing
index plotting; i.e., the axis is 0, len(data). This is mainly
useful for x ticks.
"""
def __init__(self, base, offset):
'place ticks on the i-th data points where (i-offset)%base==0'
self._base = base
self.offset = offset
def set_params(self, base=None, offset=None):
"""Set parameters within this locator"""
if base is not None:
self._base = base
if offset is not None:
self.offset = offset
def __call__(self):
"""Return the locations of the ticks"""
dmin, dmax = self.axis.get_data_interval()
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
return self.raise_if_exceeds(
np.arange(vmin + self.offset, vmax + 1, self._base))
class FixedLocator(Locator):
"""
Tick locations are fixed. If nbins is not None,
the array of possible positions will be subsampled to
keep the number of ticks <= nbins +1.
The subsampling will be done so as to include the smallest
absolute value; for example, if zero is included in the
array of possibilities, then it is guaranteed to be one of
the chosen ticks.
"""
def __init__(self, locs, nbins=None):
self.locs = np.asarray(locs)
self.nbins = nbins
if self.nbins is not None:
self.nbins = max(self.nbins, 2)
def set_params(self, nbins=None):
"""Set parameters within this locator."""
if nbins is not None:
self.nbins = nbins
def __call__(self):
return self.tick_values(None, None)
def tick_values(self, vmin, vmax):
""""
Return the locations of the ticks.
.. note::
Because the values are fixed, vmin and vmax are not used in this
method.
"""
if self.nbins is None:
return self.locs
step = max(int(0.99 + len(self.locs) / float(self.nbins)), 1)
ticks = self.locs[::step]
for i in range(1, step):
ticks1 = self.locs[i::step]
if np.absolute(ticks1).min() < np.absolute(ticks).min():
ticks = ticks1
return self.raise_if_exceeds(ticks)
class NullLocator(Locator):
"""
No ticks
"""
def __call__(self):
return self.tick_values(None, None)
def tick_values(self, vmin, vmax):
""""
Return the locations of the ticks.
.. note::
Because the values are Null, vmin and vmax are not used in this
method.
"""
return []
class LinearLocator(Locator):
"""
Determine the tick locations
The first time this function is called it will try to set the
number of ticks to make a nice tick partitioning. Thereafter the
number of ticks will be fixed so that interactive navigation will
be nice
"""
def __init__(self, numticks=None, presets=None):
"""
Use presets to set locs based on lom. A dict mapping vmin, vmax->locs
"""
self.numticks = numticks
if presets is None:
self.presets = {}
else:
self.presets = presets
def set_params(self, numticks=None, presets=None):
"""Set parameters within this locator."""
if presets is not None:
self.presets = presets
if numticks is not None:
self.numticks = numticks
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
if vmax < vmin:
vmin, vmax = vmax, vmin
if (vmin, vmax) in self.presets:
return self.presets[(vmin, vmax)]
if self.numticks is None:
self._set_numticks()
if self.numticks == 0:
return []
ticklocs = np.linspace(vmin, vmax, self.numticks)
return self.raise_if_exceeds(ticklocs)
def _set_numticks(self):
self.numticks = 11 # todo; be smart here; this is just for dev
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
if vmax < vmin:
vmin, vmax = vmax, vmin
if vmin == vmax:
vmin -= 1
vmax += 1
if rcParams['axes.autolimit_mode'] == 'round_numbers':
exponent, remainder = _divmod(math.log10(vmax - vmin),
math.log10(max([self.numticks-1, 1])))
if remainder < 0.5:
exponent -= 1
scale = max([self.numticks-1, 1]) ** (-exponent)
vmin = math.floor(scale * vmin) / scale
vmax = math.ceil(scale * vmax) / scale
return mtransforms.nonsingular(vmin, vmax)
def closeto(x, y):
if abs(x - y) < 1e-10:
return True
else:
return False
class Base(object):
'this solution has some hacks to deal with floating point inaccuracies'
def __init__(self, base):
if base <= 0:
raise ValueError("'base' must be positive")
self._base = base
def lt(self, x):
'return the largest multiple of base < x'
d, m = _divmod(x, self._base)
if closeto(m, 0) and not closeto(m / self._base, 1):
return (d - 1) * self._base
return d * self._base
def le(self, x):
'return the largest multiple of base <= x'
d, m = _divmod(x, self._base)
if closeto(m / self._base, 1): # was closeto(m, self._base)
#looks like floating point error
return (d + 1) * self._base
return d * self._base
def gt(self, x):
'return the smallest multiple of base > x'
d, m = _divmod(x, self._base)
if closeto(m / self._base, 1):
#looks like floating point error
return (d + 2) * self._base
return (d + 1) * self._base
def ge(self, x):
'return the smallest multiple of base >= x'
d, m = _divmod(x, self._base)
if closeto(m, 0) and not closeto(m / self._base, 1):
return d * self._base
return (d + 1) * self._base
def get_base(self):
return self._base
class MultipleLocator(Locator):
"""
Set a tick on every integer that is multiple of base in the
view interval
"""
def __init__(self, base=1.0):
self._base = Base(base)
def set_params(self, base):
"""Set parameters within this locator."""
if base is not None:
self._base = base
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
if vmax < vmin:
vmin, vmax = vmax, vmin
vmin = self._base.ge(vmin)
base = self._base.get_base()
n = (vmax - vmin + 0.001 * base) // base
locs = vmin - base + np.arange(n + 3) * base
return self.raise_if_exceeds(locs)
def view_limits(self, dmin, dmax):
"""
Set the view limits to the nearest multiples of base that
contain the data
"""
if rcParams['axes.autolimit_mode'] == 'round_numbers':
vmin = self._base.le(dmin)
vmax = self._base.ge(dmax)
if vmin == vmax:
vmin -= 1
vmax += 1
else:
vmin = dmin
vmax = dmax
return mtransforms.nonsingular(vmin, vmax)
def scale_range(vmin, vmax, n=1, threshold=100):
dv = abs(vmax - vmin) # > 0 as nonsingular is called before.
meanv = (vmax + vmin) / 2
if abs(meanv) / dv < threshold:
offset = 0
else:
offset = math.copysign(10 ** (math.log10(abs(meanv)) // 1), meanv)
scale = 10 ** (math.log10(dv / n) // 1)
return scale, offset
class MaxNLocator(Locator):
"""
Select no more than N intervals at nice locations.
"""
default_params = dict(nbins=10,
steps=None,
integer=False,
symmetric=False,
prune=None,
min_n_ticks=2)
def __init__(self, *args, **kwargs):
"""
Keyword args:
*nbins*
Maximum number of intervals; one less than max number of
ticks. If the string `'auto'`, the number of bins will be
automatically determined based on the length of the axis.
*steps*
Sequence of nice numbers starting with 1 and ending with 10;
e.g., [1, 2, 4, 5, 10]
*integer*
If True, ticks will take only integer values, provided
at least `min_n_ticks` integers are found within the
view limits.
*symmetric*
If True, autoscaling will result in a range symmetric
about zero.
*prune*
['lower' | 'upper' | 'both' | None]
Remove edge ticks -- useful for stacked or ganged plots
where the upper tick of one axes overlaps with the lower
tick of the axes above it, primarily when
`rcParams['axes.autolimit_mode']` is `'round_numbers'`.
If `prune=='lower'`, the smallest tick will
be removed. If `prune=='upper'`, the largest tick will be
removed. If `prune=='both'`, the largest and smallest ticks
will be removed. If `prune==None`, no ticks will be removed.
*min_n_ticks*
Relax `nbins` and `integer` constraints if necessary to
obtain this minimum number of ticks.
"""
if args:
kwargs['nbins'] = args[0]
if len(args) > 1:
raise ValueError(
"Keywords are required for all arguments except 'nbins'")
self.set_params(**self.default_params)
self.set_params(**kwargs)
@staticmethod
def _validate_steps(steps):
if not np.iterable(steps):
raise ValueError('steps argument must be a sequence of numbers '
'from 1 to 10')
steps = np.asarray(steps)
if np.any(np.diff(steps) <= 0):
raise ValueError('steps argument must be uniformly increasing')
if steps[-1] > 10 or steps[0] < 1:
warnings.warn('Steps argument should be a sequence of numbers\n'
'increasing from 1 to 10, inclusive. Behavior with\n'
'values outside this range is undefined, and will\n'
'raise a ValueError in future versions of mpl.')
if steps[0] != 1:
steps = np.hstack((1, steps))
if steps[-1] != 10:
steps = np.hstack((steps, 10))
return steps
@staticmethod
def _staircase(steps):
# Make an extended staircase within which the needed
# step will be found. This is probably much larger
# than necessary.
flights = (0.1 * steps[:-1], steps, 10 * steps[1])
return np.hstack(flights)
def set_params(self, **kwargs):
"""Set parameters within this locator."""
if 'nbins' in kwargs:
self._nbins = kwargs['nbins']
if self._nbins != 'auto':
self._nbins = int(self._nbins)
if 'trim' in kwargs:
warnings.warn(
"The 'trim' keyword has no effect since version 2.0.",
mplDeprecation)
if 'symmetric' in kwargs:
self._symmetric = kwargs['symmetric']
if 'prune' in kwargs:
prune = kwargs['prune']
if prune is not None and prune not in ['upper', 'lower', 'both']:
raise ValueError(
"prune must be 'upper', 'lower', 'both', or None")
self._prune = prune
if 'min_n_ticks' in kwargs:
self._min_n_ticks = max(1, kwargs['min_n_ticks'])
if 'steps' in kwargs:
steps = kwargs['steps']
if steps is None:
self._steps = np.array([1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10])
else:
self._steps = self._validate_steps(steps)
self._extended_steps = self._staircase(self._steps)
if 'integer' in kwargs:
self._integer = kwargs['integer']
def _raw_ticks(self, vmin, vmax):
if self._nbins == 'auto':
if self.axis is not None:
nbins = max(min(self.axis.get_tick_space(), 9),
max(1, self._min_n_ticks - 1))
else:
nbins = 9
else:
nbins = self._nbins
scale, offset = scale_range(vmin, vmax, nbins)
_vmin = vmin - offset
_vmax = vmax - offset
raw_step = (vmax - vmin) / nbins
steps = self._extended_steps * scale
if self._integer:
# For steps > 1, keep only integer values.
igood = (steps < 1) | (np.abs(steps - np.round(steps)) < 0.001)
steps = steps[igood]
istep = np.nonzero(steps >= raw_step)[0][0]
# Classic round_numbers mode may require a larger step.
if rcParams['axes.autolimit_mode'] == 'round_numbers':
for istep in range(istep, len(steps)):
step = steps[istep]
best_vmin = (_vmin // step) * step
best_vmax = best_vmin + step * nbins
if (best_vmax >= _vmax):
break
# This is an upper limit; move to smaller steps if necessary.
for i in range(istep):
step = steps[istep - i]
if (self._integer and
np.floor(_vmax) - np.ceil(_vmin) >= self._min_n_ticks - 1):
step = max(1, step)
best_vmin = (_vmin // step) * step
low = np.round(Base(step).le(_vmin - best_vmin) / step)
high = np.round(Base(step).ge(_vmax - best_vmin) / step)
ticks = np.arange(low, high + 1) * step + best_vmin + offset
nticks = ((ticks <= vmax) & (ticks >= vmin)).sum()
if nticks >= self._min_n_ticks:
break
return ticks
@cbook.deprecated("2.0")
def bin_boundaries(self, vmin, vmax):
return self._raw_ticks(vmin, vmax)
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
if self._symmetric:
vmax = max(abs(vmin), abs(vmax))
vmin = -vmax
vmin, vmax = mtransforms.nonsingular(
vmin, vmax, expander=1e-13, tiny=1e-14)
locs = self._raw_ticks(vmin, vmax)
prune = self._prune
if prune == 'lower':
locs = locs[1:]
elif prune == 'upper':
locs = locs[:-1]
elif prune == 'both':
locs = locs[1:-1]
return self.raise_if_exceeds(locs)
def view_limits(self, dmin, dmax):
if self._symmetric:
dmax = max(abs(dmin), abs(dmax))
dmin = -dmax
dmin, dmax = mtransforms.nonsingular(
dmin, dmax, expander=1e-12, tiny=1e-13)
if rcParams['axes.autolimit_mode'] == 'round_numbers':
return self._raw_ticks(dmin, dmax)[[0, -1]]
else:
return dmin, dmax
def decade_down(x, base=10):
'floor x to the nearest lower decade'
if x == 0.0:
return -base
lx = np.floor(np.log(x) / np.log(base))
return base ** lx
def decade_up(x, base=10):
'ceil x to the nearest higher decade'
if x == 0.0:
return base
lx = np.ceil(np.log(x) / np.log(base))
return base ** lx
def nearest_long(x):
if x == 0:
return long(0)
elif x > 0:
return long(x + 0.5)
else:
return long(x - 0.5)
def is_decade(x, base=10):
if not np.isfinite(x):
return False
if x == 0.0:
return True
lx = np.log(np.abs(x)) / np.log(base)
return is_close_to_int(lx)
def is_close_to_int(x):
if not np.isfinite(x):
return False
return abs(x - nearest_long(x)) < 1e-10
class LogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, base=10.0, subs=(1.0,), numdecs=4, numticks=None):
"""
Place ticks on the locations : subs[j] * base**i
Parameters
----------
subs : None, string, or sequence of float, optional, default (1.0,)
Gives the multiples of integer powers of the base at which
to place ticks. The default places ticks only at
integer powers of the base.
The permitted string values are ``'auto'`` and ``'all'``,
both of which use an algorithm based on the axis view
limits to determine whether and how to put ticks between
integer powers of the base. With ``'auto'``, ticks are
placed only between integer powers; with ``'all'``, the
integer powers are included. A value of None is
equivalent to ``'auto'``.
"""
if numticks is None:
if rcParams['_internal.classic_mode']:
numticks = 15
else:
numticks = 'auto'
self.base(base)
self.subs(subs)
self.numdecs = numdecs
self.numticks = numticks
def set_params(self, base=None, subs=None, numdecs=None, numticks=None):
"""Set parameters within this locator."""
if base is not None:
self.base(base)
if subs is not None:
self.subs(subs)
if numdecs is not None:
self.numdecs = numdecs
if numticks is not None:
self.numticks = numticks
# FIXME: these base and subs functions are contrary to our
# usual and desired API.
def base(self, base):
"""
set the base of the log scaling (major tick every base**i, i integer)
"""
self._base = float(base)
def subs(self, subs):
"""
set the minor ticks for the log scaling every base**i*subs[j]
"""
if subs is None: # consistency with previous bad API
self._subs = 'auto'
elif cbook.is_string_like(subs):
if subs not in ('all', 'auto'):
raise ValueError("A subs string must be 'all' or 'auto'; "
"found '%s'." % subs)
self._subs = subs
else:
self._subs = np.asarray(subs, dtype=float)
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
if self.numticks == 'auto':
if self.axis is not None:
numticks = max(min(self.axis.get_tick_space(), 9), 2)
else:
numticks = 9
else:
numticks = self.numticks
b = self._base
# dummy axis has no axes attribute
if hasattr(self.axis, 'axes') and self.axis.axes.name == 'polar':
vmax = math.ceil(math.log(vmax) / math.log(b))
decades = np.arange(vmax - self.numdecs, vmax)
ticklocs = b ** decades
return ticklocs
if vmin <= 0.0:
if self.axis is not None:
vmin = self.axis.get_minpos()
if vmin <= 0.0 or not np.isfinite(vmin):
raise ValueError(
"Data has no positive values, and therefore can not be "
"log-scaled.")
vmin = math.log(vmin) / math.log(b)
vmax = math.log(vmax) / math.log(b)
if vmax < vmin:
vmin, vmax = vmax, vmin
numdec = math.floor(vmax) - math.ceil(vmin)
if cbook.is_string_like(self._subs):
_first = 2.0 if self._subs == 'auto' else 1.0
if numdec > 10 or b < 3:
if self._subs == 'auto':
return np.array([]) # no minor or major ticks
else:
subs = np.array([1.0]) # major ticks
else:
subs = np.arange(_first, b)
else:
subs = self._subs
stride = 1
if rcParams['_internal.classic_mode']:
# Leave the bug left over from the PY2-PY3 transition.
while numdec / stride + 1 > numticks:
stride += 1
else:
while numdec // stride + 1 > numticks:
stride += 1
# Does subs include anything other than 1?
have_subs = len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0)
decades = np.arange(math.floor(vmin) - stride,
math.ceil(vmax) + 2 * stride, stride)
if hasattr(self, '_transform'):
ticklocs = self._transform.inverted().transform(decades)
if have_subs:
if stride == 1:
ticklocs = np.ravel(np.outer(subs, ticklocs))
else:
ticklocs = []
else:
if have_subs:
ticklocs = []
if stride == 1:
for decadeStart in b ** decades:
ticklocs.extend(subs * decadeStart)
else:
ticklocs = b ** decades
return self.raise_if_exceeds(np.asarray(ticklocs))
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
b = self._base
vmin, vmax = self.nonsingular(vmin, vmax)
if self.axis.axes.name == 'polar':
vmax = math.ceil(math.log(vmax) / math.log(b))
vmin = b ** (vmax - self.numdecs)
if rcParams['axes.autolimit_mode'] == 'round_numbers':
if not is_decade(vmin, self._base):
vmin = decade_down(vmin, self._base)
if not is_decade(vmax, self._base):
vmax = decade_up(vmax, self._base)
return vmin, vmax
def nonsingular(self, vmin, vmax):
if not np.isfinite(vmin) or not np.isfinite(vmax):
return 1, 10 # initial range, no data plotted yet
if vmin > vmax:
vmin, vmax = vmax, vmin
if vmax <= 0:
warnings.warn(
"Data has no positive values, and therefore cannot be "
"log-scaled.")
return 1, 10
minpos = self.axis.get_minpos()
if not np.isfinite(minpos):
minpos = 1e-300 # This should never take effect.
if vmin <= 0:
vmin = minpos
if vmin == vmax:
vmin = decade_down(vmin, self._base)
vmax = decade_up(vmax, self._base)
return vmin, vmax
class SymmetricalLogLocator(Locator):
"""
Determine the tick locations for symmetric log axes
"""
def __init__(self, transform=None, subs=None, linthresh=None, base=None):
"""
place ticks on the location= base**i*subs[j]
"""
if transform is not None:
self._base = transform.base
self._linthresh = transform.linthresh
elif linthresh is not None and base is not None:
self._base = base
self._linthresh = linthresh
else:
raise ValueError("Either transform, or both linthresh "
"and base, must be provided.")
if subs is None:
self._subs = [1.0]
else:
self._subs = subs
self.numticks = 15
def set_params(self, subs=None, numticks=None):
"""Set parameters within this locator."""
if numticks is not None:
self.numticks = numticks
if subs is not None:
self._subs = subs
def __call__(self):
'Return the locations of the ticks'
# Note, these are untransformed coordinates
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
b = self._base
t = self._linthresh
if vmax < vmin:
vmin, vmax = vmax, vmin
# The domain is divided into three sections, only some of
# which may actually be present.
#
# <======== -t ==0== t ========>
# aaaaaaaaa bbbbb ccccccccc
#
# a) and c) will have ticks at integral log positions. The
# number of ticks needs to be reduced if there are more
# than self.numticks of them.
#
# b) has a tick at 0 and only 0 (we assume t is a small
# number, and the linear segment is just an implementation
# detail and not interesting.)
#
# We could also add ticks at t, but that seems to usually be
# uninteresting.
#
# "simple" mode is when the range falls entirely within (-t,
# t) -- it should just display (vmin, 0, vmax)
has_a = has_b = has_c = False
if vmin < -t:
has_a = True
if vmax > -t:
has_b = True
if vmax > t:
has_c = True
elif vmin < 0:
if vmax > 0:
has_b = True
if vmax > t:
has_c = True
else:
return [vmin, vmax]
elif vmin < t:
if vmax > t:
has_b = True
has_c = True
else:
return [vmin, vmax]
else:
has_c = True
def get_log_range(lo, hi):
lo = np.floor(np.log(lo) / np.log(b))
hi = np.ceil(np.log(hi) / np.log(b))
return lo, hi
# First, calculate all the ranges, so we can determine striding
if has_a:
if has_b:
a_range = get_log_range(t, -vmin + 1)
else:
a_range = get_log_range(-vmax, -vmin + 1)
else:
a_range = (0, 0)
if has_c:
if has_b:
c_range = get_log_range(t, vmax + 1)
else:
c_range = get_log_range(vmin, vmax + 1)
else:
c_range = (0, 0)
total_ticks = (a_range[1] - a_range[0]) + (c_range[1] - c_range[0])
if has_b:
total_ticks += 1
stride = max(np.floor(float(total_ticks) / (self.numticks - 1)), 1)
decades = []
if has_a:
decades.extend(-1 * (b ** (np.arange(a_range[0], a_range[1],
stride)[::-1])))
if has_b:
decades.append(0.0)
if has_c:
decades.extend(b ** (np.arange(c_range[0], c_range[1], stride)))
# Add the subticks if requested
if self._subs is None:
subs = np.arange(2.0, b)
else:
subs = np.asarray(self._subs)
if len(subs) > 1 or subs[0] != 1.0:
ticklocs = []
for decade in decades:
ticklocs.extend(subs * decade)
else:
ticklocs = decades
return self.raise_if_exceeds(np.array(ticklocs))
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
b = self._base
if vmax < vmin:
vmin, vmax = vmax, vmin
if rcParams['axes.autolimit_mode'] == 'round_numbers':
if not is_decade(abs(vmin), b):
if vmin < 0:
vmin = -decade_up(-vmin, b)
else:
vmin = decade_down(vmin, b)
if not is_decade(abs(vmax), b):
if vmax < 0:
vmax = -decade_down(-vmax, b)
else:
vmax = decade_up(vmax, b)
if vmin == vmax:
if vmin < 0:
vmin = -decade_up(-vmin, b)
vmax = -decade_down(-vmax, b)
else:
vmin = decade_down(vmin, b)
vmax = decade_up(vmax, b)
result = mtransforms.nonsingular(vmin, vmax)
return result
class LogitLocator(Locator):
"""
Determine the tick locations for logit axes
"""
def __init__(self, minor=False):
"""
place ticks on the logit locations
"""
self.minor = minor
def set_params(self, minor=None):
"""Set parameters within this locator."""
if minor is not None:
self.minor = minor
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
# dummy axis has no axes attribute
if hasattr(self.axis, 'axes') and self.axis.axes.name == 'polar':
raise NotImplementedError('Polar axis cannot be logit scaled yet')
vmin, vmax = self.nonsingular(vmin, vmax)
vmin = np.log10(vmin / (1 - vmin))
vmax = np.log10(vmax / (1 - vmax))
decade_min = np.floor(vmin)
decade_max = np.ceil(vmax)
# major ticks
if not self.minor:
ticklocs = []
if (decade_min <= -1):
expo = np.arange(decade_min, min(0, decade_max + 1))
ticklocs.extend(list(10**expo))
if (decade_min <= 0) and (decade_max >= 0):
ticklocs.append(0.5)
if (decade_max >= 1):
expo = -np.arange(max(1, decade_min), decade_max + 1)
ticklocs.extend(list(1 - 10**expo))
# minor ticks
else:
ticklocs = []
if (decade_min <= -2):
expo = np.arange(decade_min, min(-1, decade_max))
newticks = np.outer(np.arange(2, 10), 10**expo).ravel()
ticklocs.extend(list(newticks))
if (decade_min <= 0) and (decade_max >= 0):
ticklocs.extend([0.2, 0.3, 0.4, 0.6, 0.7, 0.8])
if (decade_max >= 2):
expo = -np.arange(max(2, decade_min), decade_max + 1)
newticks = 1 - np.outer(np.arange(2, 10), 10**expo).ravel()
ticklocs.extend(list(newticks))
return self.raise_if_exceeds(np.array(ticklocs))
def nonsingular(self, vmin, vmax):
initial_range = (1e-7, 1 - 1e-7)
if not np.isfinite(vmin) or not np.isfinite(vmax):
return initial_range # no data plotted yet
if vmin > vmax:
vmin, vmax = vmax, vmin
# what to do if a window beyond ]0, 1[ is chosen
if self.axis is not None:
minpos = self.axis.get_minpos()
if not np.isfinite(minpos):
return initial_range # again, no data plotted
else:
minpos = 1e-7 # should not occur in normal use
# NOTE: for vmax, we should query a property similar to get_minpos, but
# related to the maximal, less-than-one data point. Unfortunately,
# Bbox._minpos is defined very deep in the BBox and updated with data,
# so for now we use 1 - minpos as a substitute.
if vmin <= 0:
vmin = minpos
if vmax >= 1:
vmax = 1 - minpos
if vmin == vmax:
return 0.1 * vmin, 1 - 0.1 * vmin
return vmin, vmax
class AutoLocator(MaxNLocator):
def __init__(self):
if rcParams['_internal.classic_mode']:
nbins = 9
steps = [1, 2, 5, 10]
else:
nbins = 'auto'
steps = [1, 2, 2.5, 5, 10]
MaxNLocator.__init__(self, nbins=nbins, steps=steps)
class AutoMinorLocator(Locator):
"""
Dynamically find minor tick positions based on the positions of
major ticks. Assumes the scale is linear and major ticks are
evenly spaced.
"""
def __init__(self, n=None):
"""
*n* is the number of subdivisions of the interval between
major ticks; e.g., n=2 will place a single minor tick midway
between major ticks.
If *n* is omitted or None, it will be set to 5 or 4.
"""
self.ndivs = n
def __call__(self):
'Return the locations of the ticks'
majorlocs = self.axis.get_majorticklocs()
try:
majorstep = majorlocs[1] - majorlocs[0]
except IndexError:
# Need at least two major ticks to find minor tick locations
# TODO: Figure out a way to still be able to display minor
# ticks without two major ticks visible. For now, just display
# no ticks at all.
majorstep = 0
if self.ndivs is None:
if majorstep == 0:
# TODO: Need a better way to figure out ndivs
ndivs = 1
else:
x = int(np.round(10 ** (np.log10(majorstep) % 1)))
if x in [1, 5, 10]:
ndivs = 5
else:
ndivs = 4
else:
ndivs = self.ndivs
minorstep = majorstep / ndivs
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
if len(majorlocs) > 0:
t0 = majorlocs[0]
tmin = ((vmin - t0) // minorstep + 1) * minorstep
tmax = ((vmax - t0) // minorstep + 1) * minorstep
locs = np.arange(tmin, tmax, minorstep) + t0
cond = np.abs((locs - t0) % majorstep) > minorstep / 10.0
locs = locs.compress(cond)
else:
locs = []
return self.raise_if_exceeds(np.array(locs))
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
class OldAutoLocator(Locator):
"""
On autoscale this class picks the best MultipleLocator to set the
view limits and the tick locs.
"""
def __init__(self):
self._locator = LinearLocator()
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self.raise_if_exceeds(self._locator())
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
def refresh(self):
'refresh internal information based on current lim'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
d = abs(vmax - vmin)
self._locator = self.get_locator(d)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
d = abs(vmax - vmin)
self._locator = self.get_locator(d)
return self._locator.view_limits(vmin, vmax)
def get_locator(self, d):
'pick the best locator based on a distance'
d = abs(d)
if d <= 0:
locator = MultipleLocator(0.2)
else:
try:
ld = math.log10(d)
except OverflowError:
raise RuntimeError('AutoLocator illegal data interval range')
fld = math.floor(ld)
base = 10 ** fld
#if ld==fld: base = 10**(fld-1)
#else: base = 10**fld
if d >= 5 * base:
ticksize = base
elif d >= 2 * base:
ticksize = base / 2.0
else:
ticksize = base / 5.0
locator = MultipleLocator(ticksize)
return locator
__all__ = ('TickHelper', 'Formatter', 'FixedFormatter',
'NullFormatter', 'FuncFormatter', 'FormatStrFormatter',
'StrMethodFormatter', 'ScalarFormatter', 'LogFormatter',
'LogFormatterExponent', 'LogFormatterMathtext', 'Locator',
'IndexLocator', 'FixedLocator', 'NullLocator',
'LinearLocator', 'LogLocator', 'AutoLocator',
'MultipleLocator', 'MaxNLocator', 'AutoMinorLocator',
'SymmetricalLogLocator')
|
mit
|
jayhetee/mpld3
|
doc/sphinxext/plot_generator.py
|
19
|
10614
|
import sys
import os
import glob
import token
import tokenize
import shutil
import json
import matplotlib
matplotlib.use('Agg') # don't display plots
import mpld3
from matplotlib import image
from matplotlib.figure import Figure
class disable_mpld3(object):
"""Context manager to temporarily disable mpld3.show() command"""
def __enter__(self):
self.show = mpld3.show
mpld3.show = lambda *args, **kwargs: None
return self
def __exit__(self, type, value, traceback):
mpld3.show = self.show
RST_TEMPLATE = """
.. _{sphinx_tag}:
{docstring}
.. raw:: html
{img_html}
**Python source code:** :download:`[download source: {fname}]<{fname}>`
.. literalinclude:: {fname}
:lines: {end_line}-
"""
INDEX_TEMPLATE = """
.. raw:: html
<style type="text/css">
.figure {{
float: left;
margin: 10px;
width: 180px;
height: 200px;
}}
.figure img {{
display: inline;
width: 170px;
height: 170px;
opacity:0.4;
filter:alpha(opacity=40); /* For IE8 and earlier */
}}
.figure img:hover
{{
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure .caption {{
width: 180px;
text-align: center !important;
}}
</style>
.. _{sphinx_tag}:
Example Gallery
===============
{toctree}
{contents}
.. raw:: html
<div style="clear: both"></div>
"""
BANNER_JS_TEMPLATE = """
var banner_data = {banner_data};
banner_data.forEach(function(d, i) {{
d.i = i;
}});
var height = 150,
width = 900,
imageHeight = 150,
imageWidth = 150,
zoomfactor = 0.1;
var banner = d3.select(".example-banner");
banner.style("height", height + "px")
.style("width", width + "px")
.style("margin-left", "auto")
.style("margin-right", "auto");
var svg = banner.append("svg")
.attr("width", width + "px")
.attr("height", height + "px");
var anchor = svg.append("g")
.attr("class", "example-anchor")
.selectAll("a")
.data(banner_data.slice(0, 7));
anchor.exit().remove();
var anchor_elements = anchor.enter().append("a")
.attr("xlink:href", function(d) {{ return d.url; }})
.attr("xlink:title", function(d) {{ return d.title; }});
anchor_elements.append("svg:image")
.attr("width", (1 - zoomfactor) * imageWidth)
.attr("height", (1 - zoomfactor) * imageHeight)
.attr("xlink:href", function(d){{ return d.thumb; }})
.attr("xroot", function(d){{return d3.round(imageWidth * (d.i - 0.5));}})
.attr("x", function(d){{return d3.round(imageWidth * (d.i - 0.5));}})
.attr("y", d3.round(0.5 * zoomfactor * imageHeight))
.attr("i", function(d){{return d.i;}})
.on("mouseover", function() {{
var img = d3.select(this);
img.transition()
.attr("width", imageWidth)
.attr("height", height)
.attr("x", img.attr("xroot")
- d3.round(0.5 * zoomfactor * imageWidth))
.attr("y", 0);
}})
.on("mouseout", function() {{
var img = d3.select(this);
img.transition()
.attr("width", (1 - zoomfactor) * imageWidth)
.attr("height", (1 - zoomfactor) * height)
.attr("x", img.attr("xroot"))
.attr("y", d3.round(0.5 * zoomfactor * imageHeight));
}});
"""
def create_thumbnail(infile, thumbfile,
width=300, height=300,
cx=0.5, cy=0.6, border=4):
# this doesn't really matter, it will cancel in the end, but we
# need it for the mpl API
dpi = 100
baseout, extout = os.path.splitext(thumbfile)
im = image.imread(infile)
rows, cols = im.shape[:2]
x0 = int(cx * cols - 0.5 * width)
y0 = int(cy * rows - 0.5 * height)
thumb = im[y0: y0 + height,
x0: x0 + width]
thumb[:border, :, :3] = thumb[-border:, :, :3] = 0
thumb[:, :border, :3] = thumb[:, -border:, :3] = 0
extension = extout.lower()
if extension == '.png':
from matplotlib.backends.backend_agg \
import FigureCanvasAgg as FigureCanvas
elif extension == '.pdf':
from matplotlib.backends.backend_pdf \
import FigureCanvasPDF as FigureCanvas
elif extension == '.svg':
from matplotlib.backends.backend_svg \
import FigureCanvasSVG as FigureCanvas
else:
raise ValueError("Can only handle extensions 'png', 'svg' or 'pdf'")
fig = Figure(figsize=(float(width) / dpi, float(height) / dpi),
dpi=dpi)
canvas = FigureCanvas(fig)
ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
frameon=False, xticks=[], yticks=[])
ax.imshow(thumb, aspect='auto', resample=True,
interpolation='bilinear')
fig.savefig(thumbfile, dpi=dpi)
return fig
def indent(s, N=4):
"""indent a string"""
return s.replace('\n', '\n' + N * ' ')
class ExampleGenerator(object):
"""Tools for generating an example page from a file"""
def __init__(self, filename, target_dir):
self.filename = filename
self.target_dir = target_dir
self.extract_docstring()
self.exec_file()
@property
def dirname(self):
return os.path.split(self.filename)[0]
@property
def fname(self):
return os.path.split(self.filename)[1]
@property
def modulename(self):
return os.path.splitext(self.fname)[0]
@property
def pyfilename(self):
return self.modulename + '.py'
@property
def rstfilename(self):
return self.modulename + ".rst"
@property
def htmlfilename(self):
return self.modulename + '.html'
@property
def pngfilename(self):
return self.modulename + '.png'
@property
def thumbfilename(self):
# TODO: don't hard-code image path
return "_images/" + self.pngfilename
@property
def sphinxtag(self):
return self.modulename
@property
def pagetitle(self):
return self.docstring.strip().split('\n')[0].strip()
def extract_docstring(self):
""" Extract a module-level docstring
"""
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(lines.__iter__().next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs,
# extract the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')
).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
def exec_file(self):
print("running {0}".format(self.filename))
with disable_mpld3():
import matplotlib.pyplot as plt
plt.close('all')
my_globals = {'pl': plt,
'plt': plt}
execfile(self.filename, my_globals)
fig = plt.gcf()
self.html = mpld3.fig_to_html(fig)
thumbfile = os.path.join(self.target_dir,
self.pngfilename)
fig.savefig(thumbfile)
create_thumbnail(thumbfile, thumbfile)
def toctree_entry(self):
return " ./%s\n\n" % os.path.splitext(self.htmlfilename)[0]
def contents_entry(self):
return (".. figure:: ./{0}\n"
" :target: ./{1}\n"
" :align: center\n\n"
" :ref:`{2}`\n\n".format(self.pngfilename,
self.htmlfilename,
self.sphinxtag))
def main(app):
static_dir = os.path.join(app.builder.srcdir, '_static')
target_dir = os.path.join(app.builder.srcdir, 'examples')
source_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'..', 'examples'))
if not os.path.exists(static_dir):
os.makedirs(static_dir)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
banner_data = []
toctree = ("\n\n"
".. toctree::\n"
" :hidden:\n\n")
contents = "\n\n"
# Write individual example files
for filename in glob.glob(os.path.join(source_dir, "*.py")):
ex = ExampleGenerator(filename, target_dir)
banner_data.append({"title": ex.pagetitle,
"url": os.path.join('examples', ex.htmlfilename),
"thumb": os.path.join(ex.thumbfilename)})
shutil.copyfile(filename, os.path.join(target_dir, ex.pyfilename))
output = RST_TEMPLATE.format(sphinx_tag=ex.sphinxtag,
docstring=ex.docstring,
end_line=ex.end_line,
fname=ex.pyfilename,
img_html=indent(ex.html, 4))
with open(os.path.join(target_dir, ex.rstfilename), 'w') as f:
f.write(output)
toctree += ex.toctree_entry()
contents += ex.contents_entry()
if len(banner_data) < 10:
banner_data = (4 * banner_data)[:10]
# write index file
index_file = os.path.join(target_dir, 'index.rst')
with open(index_file, 'w') as index:
index.write(INDEX_TEMPLATE.format(sphinx_tag="example-gallery",
toctree=toctree,
contents=contents))
# write javascript include for front page
js_file = os.path.join(static_dir, 'banner_data.js')
with open(js_file, 'w') as js:
js.write(BANNER_JS_TEMPLATE.format(
banner_data=json.dumps(banner_data)))
def setup(app):
app.connect('builder-inited', main)
|
bsd-3-clause
|
deepfield/ibis
|
ibis/client.py
|
1
|
12343
|
import abc
import six
from ibis.config import options
import ibis.util as util
import ibis.common as com
import ibis.expr.types as ir
import ibis.expr.schema as sch
import ibis.expr.operations as ops
import ibis.sql.compiler as comp
class Client(object):
pass
class Query(object):
"""Abstraction for DML query execution to enable queries, progress,
cancellation and more (for backends supporting such functionality).
"""
def __init__(self, client, sql, **kwargs):
self.client = client
dml = getattr(sql, 'dml', sql)
self.expr = getattr(
dml, 'parent_expr', getattr(dml, 'table_set', None)
)
if not isinstance(sql, six.string_types):
self.compiled_sql = sql.compile()
else:
self.compiled_sql = sql
self.result_wrapper = getattr(dml, 'result_handler', None)
self.extra_options = kwargs
def execute(self):
# synchronous by default
with self.client._execute(self.compiled_sql, results=True) as cur:
result = self._fetch(cur)
return self._wrap_result(result)
def _wrap_result(self, result):
if self.result_wrapper is not None:
result = self.result_wrapper(result)
return result
def _fetch(self, cursor):
raise NotImplementedError
def schema(self):
if isinstance(self.expr, (ir.TableExpr, ir.ExprList, sch.HasSchema)):
return self.expr.schema()
elif isinstance(self.expr, ir.ValueExpr):
return sch.schema([(self.expr.get_name(), self.expr.type())])
else:
raise ValueError('Expression with type {} does not have a '
'schema'.format(type(self.expr)))
class SQLClient(six.with_metaclass(abc.ABCMeta, Client)):
dialect = comp.Dialect
query_class = Query
table_class = ops.DatabaseTable
table_expr_class = ir.TableExpr
def table(self, name, database=None):
"""
Create a table expression that references a particular table in the
database
Parameters
----------
name : string
database : string, optional
Returns
-------
table : TableExpr
"""
qualified_name = self._fully_qualified_name(name, database)
schema = self._get_table_schema(qualified_name)
node = self.table_class(qualified_name, schema, self)
return self.table_expr_class(node)
@property
def current_database(self):
return self.con.database
def database(self, name=None):
"""
Create a Database object for a given database name that can be used for
exploring and manipulating the objects (tables, functions, views, etc.)
inside
Parameters
----------
name : string
Name of database
Returns
-------
database : Database
"""
# TODO: validate existence of database
if name is None:
name = self.current_database
return self.database_class(name, self)
def _fully_qualified_name(self, name, database):
# XXX
return name
def _execute(self, query, results=False):
cur = self.con.execute(query)
if results:
return cur
else:
cur.release()
def sql(self, query):
"""
Convert a SQL query to an Ibis table expression
Parameters
----------
Returns
-------
table : TableExpr
"""
# Get the schema by adding a LIMIT 0 on to the end of the query. If
# there is already a limit in the query, we find and remove it
limited_query = 'SELECT * FROM ({}) t0 LIMIT 0'.format(query)
schema = self._get_schema_using_query(limited_query)
return ops.SQLQueryResult(query, schema, self).to_expr()
def raw_sql(self, query, results=False):
"""
Execute a given query string. Could have unexpected results if the
query modifies the behavior of the session in a way unknown to Ibis; be
careful.
Parameters
----------
query : string
DML or DDL statement
results : boolean, default False
Pass True if the query as a result set
Returns
-------
cur : ImpalaCursor if results=True, None otherwise
You must call cur.release() after you are finished using the cursor.
"""
return self._execute(query, results=results)
def execute(self, expr, params=None, limit='default', **kwargs):
"""
Compile and execute Ibis expression using this backend client
interface, returning results in-memory in the appropriate object type
Parameters
----------
expr : Expr
limit : int, default None
For expressions yielding result yets; retrieve at most this number of
values/rows. Overrides any limit already set on the expression.
params : not yet implemented
Returns
-------
output : input type dependent
Table expressions: pandas.DataFrame
Array expressions: pandas.Series
Scalar expressions: Python scalar value
"""
query_ast = self._build_ast_ensure_limit(expr, limit, params=params)
result = self._execute_query(query_ast, **kwargs)
return result
def _execute_query(self, dml, **kwargs):
query = self.query_class(self, dml, **kwargs)
return query.execute()
def compile(self, expr, params=None, limit=None):
"""
Translate expression to one or more queries according to backend target
Returns
-------
output : single query or list of queries
"""
query_ast = self._build_ast_ensure_limit(expr, limit, params=params)
return query_ast.compile()
def _build_ast_ensure_limit(self, expr, limit, params=None):
context = self.dialect.make_context(params=params)
query_ast = self._build_ast(expr, context)
# note: limit can still be None at this point, if the global
# default_limit is None
for query in reversed(query_ast.queries):
if (isinstance(query, comp.Select) and
not isinstance(expr, ir.ScalarExpr) and
query.table_set is not None):
if query.limit is None:
if limit == 'default':
query_limit = options.sql.default_limit
else:
query_limit = limit
if query_limit:
query.limit = {
'n': query_limit,
'offset': 0
}
elif limit is not None and limit != 'default':
query.limit = {'n': limit,
'offset': query.limit['offset']}
return query_ast
def explain(self, expr, params=None):
"""
Query for and return the query plan associated with the indicated
expression or SQL query.
Returns
-------
plan : string
"""
if isinstance(expr, ir.Expr):
context = self.dialect.make_context(params=params)
query_ast = self._build_ast(expr, context)
if len(query_ast.queries) > 1:
raise Exception('Multi-query expression')
query = query_ast.queries[0].compile()
else:
query = expr
statement = 'EXPLAIN {0}'.format(query)
with self._execute(statement, results=True) as cur:
result = self._get_list(cur)
return 'Query:\n{0}\n\n{1}'.format(util.indent(query, 2),
'\n'.join(result))
def _build_ast(self, expr, context):
# Implement in clients
raise NotImplementedError(type(self).__name__)
class QueryPipeline(object):
"""
Execute a series of queries, and capture any result sets generated
Note: No query pipelines have yet been implemented
"""
pass
def validate_backends(backends):
if not backends:
default = options.default_backend
if default is None:
raise com.IbisError(
'Expression depends on no backends, and found no default'
)
return [default]
if len(backends) > 1:
raise ValueError('Multiple backends found')
return backends
def execute(expr, limit='default', params=None, **kwargs):
backend, = validate_backends(list(find_backends(expr)))
return backend.execute(expr, limit=limit, params=params, **kwargs)
def compile(expr, limit=None, params=None, **kwargs):
backend, = validate_backends(list(find_backends(expr)))
return backend.compile(expr, limit=limit, params=params, **kwargs)
def find_backends(expr):
seen_backends = set()
stack = [expr.op()]
seen = set()
while stack:
node = stack.pop()
if node not in seen:
seen.add(node)
for arg in node.flat_args():
if isinstance(arg, Client):
if arg not in seen_backends:
yield arg
seen_backends.add(arg)
elif isinstance(arg, ir.Expr):
stack.append(arg.op())
class Database(object):
def __init__(self, name, client):
self.name = name
self.client = client
def __repr__(self):
return '{}({!r})'.format(type(self).__name__, self.name)
def __dir__(self):
attrs = dir(type(self))
unqualified_tables = [self._unqualify(x) for x in self.tables]
return sorted(frozenset(attrs + unqualified_tables))
def __contains__(self, key):
return key in self.tables
@property
def tables(self):
return self.list_tables()
def __getitem__(self, key):
return self.table(key)
def __getattr__(self, key):
return self.table(key)
def _qualify(self, value):
return value
def _unqualify(self, value):
return value
def drop(self, force=False):
"""
Drop the database
Parameters
----------
drop : boolean, default False
Drop any objects if they exist, and do not fail if the databaes does
not exist
"""
self.client.drop_database(self.name, force=force)
def namespace(self, ns):
"""
Creates a derived Database instance for collections of objects having a
common prefix. For example, for tables fooa, foob, and fooc, creating
the "foo" namespace would enable you to reference those objects as a,
b, and c, respectively.
Returns
-------
ns : DatabaseNamespace
"""
return DatabaseNamespace(self, ns)
def table(self, name):
"""
Return a table expression referencing a table in this database
Returns
-------
table : TableExpr
"""
qualified_name = self._qualify(name)
return self.client.table(qualified_name, self.name)
def list_tables(self, like=None):
return self.client.list_tables(like=self._qualify_like(like),
database=self.name)
def _qualify_like(self, like):
return like
class DatabaseNamespace(Database):
def __init__(self, parent, namespace):
self.parent = parent
self.namespace = namespace
def __repr__(self):
return "{}(database={!r}, namespace={!r})".format(
type(self).__name__, self.name, self.namespace
)
@property
def client(self):
return self.parent.client
@property
def name(self):
return self.parent.name
def _qualify(self, value):
return self.namespace + value
def _unqualify(self, value):
return value.replace(self.namespace, '', 1)
def _qualify_like(self, like):
if like:
return self.namespace + like
else:
return '{0}*'.format(self.namespace)
class DatabaseEntity(object):
pass
class View(DatabaseEntity):
def drop(self):
pass
|
apache-2.0
|
jradavenport/ThirdWay
|
thirdway/lightcurve.py
|
1
|
23719
|
"""
Tool for taking the raw data from MAST and producing cleaned light curves
"""
from __future__ import absolute_import, print_function
from astropy.io import fits
from astropy.time import Time
import astropy.units as u
import os
import numpy as np
import matplotlib.pyplot as plt
import shutil
import batman
from scipy import optimize
def kepler17_params_jrad():
"""
Assumed transit parameters for Kepler-17 from JRAD:
# machine readable file w/ parameters used for Kepler 17
# references:
# [1] http://adsabs.harvard.edu/abs/2011ApJS..197...14D
1.4857108 Planet orbital period (days) [1]
2455185.678035 Transit Ephem (BJD_utc) [1]
0.09485 Transit duration (days) [1]
12.25817669188 Star Rotation Period (days) [me, llc]
0.13413993 Rp/Rstar [Leslie] (STSP wants this squared)
0.1045441 impact parameter [Leslie]
88.94560 orbital inclination [Leslie]
5800 Teff (star) [1]
1.113697 Stellar Density [Leslie]
0.59984, -0.165775, 0.6876732, -0.349944 Limb darkening
"""
params = batman.TransitParams()
params.t0 = 2455185.678035 # time of inferior conjunction
params.per = 1.4857108 # orbital period
params.rp = 0.13413993 # planet radius (in units of stellar radii)
b = 0.1045441 # impact parameter
dur = 0.09485 # transit duration
params.inc = 88.94560 # orbital inclination (in degrees)
params.ecc = 0. # eccentricity
params.w = 90. # longitude of periastron (in degrees)
a_over_Rs, inclination_deg = T14b_to_aRsi(params.per, dur, b, params.ecc, params.w)
params.a = a_over_Rs # semi-major axis (in units of stellar radii)
params.u = [0.59984, -0.165775, 0.6876732, -0.349944] # limb darkening coefficients
params.limb_dark = "nonlinear" # limb darkening model
#params.duration = params.per/np.pi*np.arcsin(np.sqrt((1+params.rp)**2 + b**2)
# / np.sin(inclination)/params.a)
params.duration = dur
return params
def kepler17_params_db():
"""
Assumed transit parameters for Kepler-17 from exoplanets.org
http://exoplanets.org/detail/Kepler-17_b
"""
params = batman.TransitParams()
params.t0 = 2455185.678035 # time of inferior conjunction
params.per = 1.4857108 # orbital period
params.rp = 0.13031 # planet radius (in units of stellar radii)
b = 0.268 # impact parameter
dur = 0.09485 # transit duration
params.inc = 87.2 # orbital inclination (in degrees)
params.ecc = 0. # eccentricity
params.w = 90. # longitude of periastron (in degrees)
a_over_Rs, inclination_deg = T14b_to_aRsi(params.per, dur, b, params.rp, params.ecc, params.w)
params.a = a_over_Rs # semi-major axis (in units of stellar radii)
params.u = [0.405, 0.262] # limb darkening coefficients
params.limb_dark = "quadratic" # limb darkening model
#params.duration = params.per/np.pi*np.arcsin(np.sqrt((1+params.rp)**2 + b**2)
# / np.sin(inclination)/params.a)
params.duration = dur
return params
def T14b_to_aRsi(P, T14, b, RpRs, eccentricity, omega):
'''
Convert from duration and impact param to a/Rs and inclination
'''
C = np.sqrt(1 - eccentricity**2)/(1 + eccentricity*np.sin(np.radians(omega)))
i = np.arctan(np.sqrt((1 + RpRs)**2 - b**2)/(b*np.sin(T14*np.pi/(P*C))))
aRs = b/np.cos(i)
return aRs, np.degrees(i)
def generate_lc_depth(times, depth, init_params):
exp_time = (1*u.min).to(u.day).value
init_params.rp = np.sqrt(depth)
m = batman.TransitModel(init_params, times, supersample_factor=7,
exp_time=exp_time)
model_flux = m.light_curve(init_params)
return model_flux
class LightCurve(object):
"""
Container object for light curves
"""
def __init__(self, times=None, fluxes=None, errors=None, quarters=None, name=None):
#if len(times) < 1:
# raise ValueError("Input `times` have no length.")
if (isinstance(times[0], Time) and isinstance(times, np.ndarray)):
times = Time(times)
elif not isinstance(times, Time):
times = Time(times, format='jd')
self.times = times
self.fluxes = fluxes
if self.times is not None and errors is None:
errors = np.zeros_like(self.fluxes) - 1
self.errors = errors
if self.times is not None and quarters is None:
quarters = np.zeros_like(self.fluxes) - 1
self.quarters = quarters
self.name = name
def plot(self, params, ax=None, quarter=None, show=True, phase=False,
plot_kwargs={'color':'b', 'marker':'o', 'lw':0}):
"""
Plot light curve
"""
if quarter is not None:
if hasattr(quarter, '__len__'):
mask = np.zeros_like(self.fluxes).astype(bool)
for q in quarter:
mask |= self.quarters == q
else:
mask = self.quarters == quarter
else:
mask = np.ones_like(self.fluxes).astype(bool)
if ax is None:
ax = plt.gca()
if phase:
x = (self.times.jd - params.t0)/params.per % 1
x[x > 0.5] -= 1
else:
x = self.times.jd
ax.plot(x[mask], self.fluxes[mask],
**plot_kwargs)
ax.set(xlabel='Time' if not phase else 'Phase',
ylabel='Flux', title=self.name)
if show:
plt.show()
def save_to(self, path, overwrite=False, for_stsp=False):
"""
Save times, fluxes, errors to new directory ``dirname`` in ``path``
"""
dirname = self.name
output_path = os.path.join(path, dirname)
self.times = Time(self.times)
if not for_stsp:
if os.path.exists(output_path) and overwrite:
shutil.rmtree(output_path)
if not os.path.exists(output_path):
os.mkdir(output_path)
for attr in ['times_jd', 'fluxes', 'errors', 'quarters']:
np.savetxt(os.path.join(path, dirname, '{0}.txt'.format(attr)),
getattr(self, attr))
else:
if not os.path.exists(output_path) or overwrite:
attrs = ['times_jd', 'fluxes', 'errors']
output_array = np.zeros((len(self.fluxes), len(attrs)), dtype=float)
for i, attr in enumerate(attrs):
output_array[:, i] = getattr(self, attr)
np.savetxt(os.path.join(path, dirname+'.txt'), output_array)
@classmethod
def from_raw_fits(cls, fits_paths, name=None):
"""
Load FITS files from MAST into the LightCurve object
"""
fluxes = []
errors = []
times = []
quarter = []
for path in fits_paths:
data = fits.getdata(path)
header = fits.getheader(path)
times.append(data['TIME'] + 2454833.0)
errors.append(data['PDCSAP_FLUX_ERR'])
fluxes.append(data['PDCSAP_FLUX'])
quarter.append(len(data['TIME'])*[header['QUARTER']])
times, fluxes, errors, quarter = [np.concatenate(i)
for i in [times, fluxes, errors, quarter]]
mask_nans = np.zeros_like(fluxes).astype(bool)
for attr in [times, fluxes, errors]:
mask_nans |= np.isnan(attr)
times, fluxes, errors, quarter = [attr[-mask_nans]
for attr in [times, fluxes, errors, quarter]]
return LightCurve(times, fluxes, errors, quarters=quarter, name=name)
@classmethod
def from_dir(cls, path, for_stsp=False):
"""Load light curve from numpy save files in ``dir``"""
if not for_stsp:
times, fluxes, errors, quarters = [np.loadtxt(os.path.join(path, '{0}.txt'.format(attr)))
for attr in ['times_jd', 'fluxes', 'errors', 'quarters']]
else:
quarters = None
times, fluxes, errors = np.loadtxt(path, unpack=True)
if os.sep in path:
name = path.split(os.sep)[-1]
else:
name = path
if name.endswith('.txt'):
name = name[:-4]
return cls(times, fluxes, errors, quarters=quarters, name=name)
def normalize_each_quarter(self, rename=None, polynomial_order=2, plots=False):
"""
Use 2nd order polynomial fit to each quarter to normalize the data
"""
quarter_inds = list(set(self.quarters))
quarter_masks = [quarter == self.quarters for quarter in quarter_inds]
for quarter_mask in quarter_masks:
polynomial = np.polyfit(self.times[quarter_mask].jd,
self.fluxes[quarter_mask], polynomial_order)
scaling_term = np.polyval(polynomial, self.times[quarter_mask].jd)
self.fluxes[quarter_mask] /= scaling_term
self.errors[quarter_mask] /= scaling_term
if plots:
plt.plot(self.times[quarter_mask], self.fluxes[quarter_mask])
plt.show()
if rename is not None:
self.name = rename
def mask_out_of_transit(self, params, oot_duration_fraction=0.25, flip=False):
"""
Mask out the out-of-transit light curve based on transit parameters
"""
# Fraction of one duration to capture out of transit
phased = (self.times.jd - params.t0) % params.per
near_transit = ((phased < params.duration*(0.5 + oot_duration_fraction)) |
(phased > params.per - params.duration*(0.5 + oot_duration_fraction)))
if flip:
near_transit = -near_transit
sort_by_time = np.argsort(self.times[near_transit].jd)
return dict(times=self.times[near_transit][sort_by_time],
fluxes=self.fluxes[near_transit][sort_by_time],
errors=self.errors[near_transit][sort_by_time],
quarters=self.quarters[near_transit][sort_by_time])
def mask_in_transit(self, params, oot_duration_fraction=0.25):
return self.mask_out_of_transit(params, oot_duration_fraction=oot_duration_fraction,
flip=True)
def get_transit_light_curves(self, params, plots=False):
"""
For a light curve with transits only (returned by get_only_transits),
split up the transits into their own light curves, return a list of
`TransitLightCurve` objects
"""
time_diffs = np.diff(sorted(self.times.jd))
diff_between_transits = params.per/2.
split_inds = np.argwhere(time_diffs > diff_between_transits) + 1
if len(split_inds) > 1:
split_ind_pairs = [[0, split_inds[0][0]]]
split_ind_pairs.extend([[split_inds[i][0], split_inds[i+1][0]]
for i in range(len(split_inds)-1)])
split_ind_pairs.extend([[split_inds[-1], len(self.times)]])
transit_light_curves = []
counter = -1
for start_ind, end_ind in split_ind_pairs:
counter += 1
if plots:
plt.plot(self.times.jd[start_ind:end_ind],
self.fluxes[start_ind:end_ind], '.-')
parameters = dict(times=self.times[start_ind:end_ind],
fluxes=self.fluxes[start_ind:end_ind],
errors=self.errors[start_ind:end_ind],
quarters=self.quarters[start_ind:end_ind],
name=counter)
transit_light_curves.append(TransitLightCurve(**parameters))
if plots:
plt.show()
else:
transit_light_curves = []
return transit_light_curves
def get_available_quarters(self):
return list(set(self.quarters))
def get_quarter(self, quarter):
this_quarter = self.quarters == quarter
return LightCurve(times=self.times[this_quarter],
fluxes=self.fluxes[this_quarter],
errors=self.errors[this_quarter],
quarters=self.quarters[this_quarter],
name=self.name + '_quarter_{0}'.format(quarter))
@property
def times_jd(self):
return self.times.jd
def save_split_at_stellar_rotations(self, path, stellar_rotation_period,
overwrite=False):
dirname = self.name
output_path = os.path.join(path, dirname)
self.times = Time(self.times)
if os.path.exists(output_path) and overwrite:
shutil.rmtree(output_path)
stellar_rotation_phase = ((self.times.jd - self.times.jd[0])*u.day %
stellar_rotation_period ) / stellar_rotation_period
phase_wraps = np.argwhere(stellar_rotation_phase[:-1] >
stellar_rotation_phase[1:])
split_times = np.split(self.times.jd, phase_wraps)
split_fluxes = np.split(self.fluxes, phase_wraps)
split_errors = np.split(self.errors, phase_wraps)
split_quarters = np.split(self.quarters, phase_wraps)
header = "JD Flux Uncertainty Quarter"
for i, t, f, e, q in zip(range(len(split_times)), split_times,
split_fluxes, split_errors, split_quarters):
np.savetxt(os.path.join(output_path, 'rotation{:02d}.txt'.format(i)),
np.vstack([t, f, e, q]).T, header=header)
class TransitLightCurve(LightCurve):
"""
Container for a single transit light curve
"""
def __init__(self, times=None, fluxes=None, errors=None, quarters=None, name=None):
if isinstance(times[0], Time) and isinstance(times, np.ndarray):
times = Time(times)
elif not isinstance(times, Time):
times = Time(times, format='jd')
self.times = times
self.fluxes = fluxes
self.errors = errors
self.quarters = quarters
self.name = name
self.rescaled = False
def fit_linear_baseline(self, params, cadence=1*u.min, return_near_transit=False,
plots=False):
"""
Find OOT portions of transit light curve using similar method to
`LightCurve.mask_out_of_transit`, fit linear baseline to OOT
"""
cadence_buffer = cadence.to(u.day).value
get_oot_duration_fraction = 0
phased = (self.times.jd - params.t0) % params.per
near_transit = ((phased < params.duration*(0.5 + get_oot_duration_fraction) + cadence_buffer) |
(phased > params.per - params.duration*(0.5 + get_oot_duration_fraction) - cadence_buffer))
# Remove linear baseline trend
order = 1
linear_baseline = np.polyfit(self.times.jd[-near_transit],
self.fluxes[-near_transit], order)
linear_baseline_fit = np.polyval(linear_baseline, self.times.jd)
if plots:
fig, ax = plt.subplots(1, 2, figsize=(15,6))
ax[0].axhline(1, ls='--', color='k')
ax[0].plot(self.times.jd, linear_baseline_fit, 'r')
ax[0].plot(self.times.jd, self.fluxes, 'bo')
plt.show()
if return_near_transit:
return linear_baseline, near_transit
else:
return linear_baseline
def remove_linear_baseline(self, params, plots=False, cadence=1*u.min):
"""
Find OOT portions of transit light curve using similar method to
`LightCurve.mask_out_of_transit`, fit linear baseline to OOT,
divide whole light curve by that fit.
"""
linear_baseline, near_transit = self.fit_linear_baseline(params, cadence=cadence,
return_near_transit=True)
linear_baseline_fit = np.polyval(linear_baseline, self.times.jd)
self.fluxes = self.fluxes/linear_baseline_fit
self.errors = self.errors/linear_baseline_fit
if plots:
fig, ax = plt.subplots(1, 2, figsize=(15,6))
ax[0].axhline(1, ls='--', color='k')
ax[0].plot(self.times.jd, self.fluxes, 'o')
#ax[0].plot(self.times.jd[near_transit], self.fluxes[near_transit], 'ro')
ax[0].set_title('before trend removal')
ax[1].set_title('after trend removal')
ax[1].axhline(1, ls='--', color='k')
ax[1].plot(self.times.jd, self.fluxes, 'o')
plt.show()
def scale_by_baseline(self, linear_baseline_params):
if not self.rescaled:
scaling_vector = np.polyval(linear_baseline_params, self.times.jd)
self.fluxes *= scaling_vector
self.errors *= scaling_vector
self.rescaled = True
def fiducial_transit_fit(self, params, plots=False,
model=generate_lc_depth):
# Determine cadence:
# typical_time_diff = np.median(np.diff(self.times.jd))*u.day
# exp_long = 30*u.min
# exp_short = 1*u.min
# exp_time = (exp_long if np.abs(typical_time_diff - exp_long) < 1*u.min
# else exp_short).to(u.day).value
# [t0, depth, dur, b]
init_depth = params.rp**2
initial_parameters = [init_depth]
def minimize_this(p, times, fluxes, errors):
return np.sum(((model(times, *p) - fluxes)/errors)**2)
fit_result = optimize.fmin(minimize_this, initial_parameters,
args=(self.times.jd, self.fluxes, self.errors),
disp=False)
p = fit_result#[0]#fit_result[0]
init_model = model(self.times.jd, *initial_parameters)
model_flux = model(self.times.jd, *p)
if plots:
fig, ax = plt.subplots(2, 1, figsize=(8, 12), sharex=True)
#ax[0].plot(self.times.jd, init_model, 'g')
ax[0].errorbar(self.times.jd, self.fluxes, self.errors, fmt='.', color='k')
ax[0].plot(self.times.jd, model_flux, 'r')
ax[0].set(ylabel='Flux')
ax[1].errorbar(self.times.jd, self.fluxes - model_flux, fmt='.', color='k')
ax[1].axhline(0, color='r')
#plt.show()
chi2 = np.sum((self.fluxes - model_flux)**2/self.errors**2)/(len(self.fluxes))
return p, chi2
@classmethod
def from_dir(cls, path):
"""Load light curve from numpy save files in ``path``"""
times, fluxes, errors, quarters = [np.loadtxt(os.path.join(path, '{0}.txt'.format(attr)))
for attr in ['times_jd', 'fluxes', 'errors', 'quarters']]
if os.sep in path:
name = path.split(os.sep)[-1]
else:
name = path
return cls(times, fluxes, errors, quarters=quarters, name=name)
def combine_short_and_long_cadence(short_cadence_transit_light_curves_list,
long_cadence_transit_light_curves_list,
long_cadence_light_curve, name=None):
"""
Find the linear baseline in the out of transit portions of the long cadence
light curves in ``long_cadence_transit_light_curves_list``. Scale each
short cadence light curve by that scaling factor.
Cut out all transits from the ``long_cadence_light_curve``, and leave
enough time before/after the first short-cadence points near transit to
ensure no overlapping exposure times.
Insert the normalized short cadence light curves from
``short_cadence_light_curve_list`` into the time series.
"""
# Find linear baseline near transits in long cadence
linear_baseline_params = [transit.fit_linear_baseline(cadence=30*u.min)
for transit in long_cadence_transit_light_curves_list]
# Find the corresponding short cadence transit for each long cadence baseline
# fit, renormalize that short cadence transit accordingly
scaled_short_transits = []
for short_transit in short_cadence_transit_light_curves_list:
for long_transit, baseline_params in zip(long_cadence_transit_light_curves_list, linear_baseline_params):
if abs(long_transit.times.jd.mean() - short_transit.times.jd.mean()) < 0.1:
short_transit.scale_by_baseline(baseline_params)
scaled_short_transits.append(short_transit)
# Break out all times, fluxes, errors quarters, and weed out those from the
# long cadence light curve that overlap with the short cadence data
all_times = long_cadence_light_curve.times.jd
all_fluxes = long_cadence_light_curve.fluxes
all_errors = long_cadence_light_curve.errors
all_quarters = long_cadence_light_curve.quarters
remove_mask = np.zeros(len(all_times), dtype=bool)
short_cadence_exp_time = (30*u.min).to(u.day).value
for scaled_short_transit in scaled_short_transits:
min_t = scaled_short_transit.times.jd.min() - short_cadence_exp_time
max_t = scaled_short_transit.times.jd.max() + short_cadence_exp_time
overlapping_times = (all_times > min_t) & (all_times < max_t)
remove_mask |= overlapping_times
remove_indices = np.arange(len(all_times))[remove_mask]
all_times, all_fluxes, all_errors, all_quarters = [np.delete(arr, remove_indices)
for arr in [all_times, all_fluxes, all_errors, all_quarters]]
# Insert the renormalized short cadence data into the pruned long cadence
# data, return as `LightCurve` object
all_times = np.concatenate([all_times] + [t.times.jd for t in scaled_short_transits])
all_fluxes = np.concatenate([all_fluxes] + [t.fluxes for t in scaled_short_transits])
all_errors = np.concatenate([all_errors] + [t.errors for t in scaled_short_transits])
all_quarters = np.concatenate([all_quarters] + [t.quarters for t in scaled_short_transits])
# Sort by times
time_sort = np.argsort(all_times)
all_times, all_fluxes, all_errors, all_quarters = [arr[time_sort]
for arr in [all_times, all_fluxes, all_errors, all_quarters]]
return LightCurve(times=all_times, fluxes=all_fluxes, errors=all_errors,
quarters=all_quarters, name=name)
def concatenate_transit_light_curves(light_curve_list, name=None):
"""
Combine multiple transit light curves into one `TransitLightCurve` object
"""
times = []
fluxes = []
errors = []
quarters = []
for light_curve in light_curve_list:
times.append(light_curve.times.jd)
fluxes.append(light_curve.fluxes)
errors.append(light_curve.errors)
quarters.append(light_curve.quarters)
times, fluxes, errors, quarters = [np.concatenate(i)
for i in [times, fluxes, errors, quarters]]
times = Time(times, format='jd')
return TransitLightCurve(times=times, fluxes=fluxes, errors=errors,
quarters=quarters, name=name)
|
mit
|
bgroveben/python3_machine_learning_projects
|
oreilly_GANs_for_beginners/oreilly_GANs_for_beginners/introduction_to_ml_with_python/mglearn/mglearn/plot_rbf_svm_parameters.py
|
5
|
1178
|
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from .plot_2d_separator import plot_2d_separator
from .tools import make_handcrafted_dataset
from .plot_helpers import discrete_scatter
def plot_svm(log_C, log_gamma, ax=None):
X, y = make_handcrafted_dataset()
C = 10. ** log_C
gamma = 10. ** log_gamma
svm = SVC(kernel='rbf', C=C, gamma=gamma).fit(X, y)
if ax is None:
ax = plt.gca()
plot_2d_separator(svm, X, ax=ax, eps=.5)
# plot data
discrete_scatter(X[:, 0], X[:, 1], y, ax=ax)
# plot support vectors
sv = svm.support_vectors_
# class labels of support vectors are given by the sign of the dual coefficients
sv_labels = svm.dual_coef_.ravel() > 0
discrete_scatter(sv[:, 0], sv[:, 1], sv_labels, s=15, markeredgewidth=3, ax=ax)
ax.set_title("C = %.4f gamma = %.4f" % (C, gamma))
def plot_svm_interactive():
from IPython.html.widgets import interactive, FloatSlider
C_slider = FloatSlider(min=-3, max=3, step=.1, value=0, readout=False)
gamma_slider = FloatSlider(min=-2, max=2, step=.1, value=0, readout=False)
return interactive(plot_svm, log_C=C_slider, log_gamma=gamma_slider)
|
mit
|
liuchengtian/CS523
|
steerstats/tools/plotting/animating/anim_particles.py
|
8
|
5311
|
"""
Animation of Elastic collisions with Gravity
author: Jake Vanderplas
email: [email protected]
website: http://jakevdp.github.com
license: BSD
Please feel free to use and modify this, but keep the above information. Thanks!
"""
import numpy as np
from scipy.spatial.distance import pdist, squareform
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import matplotlib.animation as animation
class ParticleBox:
"""Orbits class
init_state is an [N x 4] array, where N is the number of particles:
[[x1, y1, vx1, vy1],
[x2, y2, vx2, vy2],
... ]
bounds is the size of the box: [xmin, xmax, ymin, ymax]
"""
def __init__(self,
init_state = [[1, 0, 0, -1],
[-0.5, 0.5, 0.5, 0.5],
[-0.5, -0.5, -0.5, 0.5]],
bounds = [-2, 2, -2, 2],
size = 0.04,
M = 0.05,
G = 9.8):
self.init_state = np.asarray(init_state, dtype=float)
self.M = M * np.ones(self.init_state.shape[0])
self.size = size
self.state = self.init_state.copy()
self.time_elapsed = 0
self.bounds = bounds
self.G = G
def step(self, dt):
"""step once by dt seconds"""
self.time_elapsed += dt
# update positions
self.state[:, :2] += dt * self.state[:, 2:]
# find pairs of particles undergoing a collision
D = squareform(pdist(self.state[:, :2]))
ind1, ind2 = np.where(D < 2 * self.size)
unique = (ind1 < ind2)
ind1 = ind1[unique]
ind2 = ind2[unique]
# update velocities of colliding pairs
for i1, i2 in zip(ind1, ind2):
# mass
m1 = self.M[i1]
m2 = self.M[i2]
# location vector
r1 = self.state[i1, :2]
r2 = self.state[i2, :2]
# velocity vector
v1 = self.state[i1, 2:]
v2 = self.state[i2, 2:]
# relative location & velocity vectors
r_rel = r1 - r2
v_rel = v1 - v2
# momentum vector of the center of mass
v_cm = (m1 * v1 + m2 * v2) / (m1 + m2)
# collisions of spheres reflect v_rel over r_rel
rr_rel = np.dot(r_rel, r_rel)
vr_rel = np.dot(v_rel, r_rel)
v_rel = 2 * r_rel * vr_rel / rr_rel - v_rel
# assign new velocities
self.state[i1, 2:] = v_cm + v_rel * m2 / (m1 + m2)
self.state[i2, 2:] = v_cm - v_rel * m1 / (m1 + m2)
# check for crossing boundary
crossed_x1 = (self.state[:, 0] < self.bounds[0] + self.size)
crossed_x2 = (self.state[:, 0] > self.bounds[1] - self.size)
crossed_y1 = (self.state[:, 1] < self.bounds[2] + self.size)
crossed_y2 = (self.state[:, 1] > self.bounds[3] - self.size)
self.state[crossed_x1, 0] = self.bounds[0] + self.size
self.state[crossed_x2, 0] = self.bounds[1] - self.size
self.state[crossed_y1, 1] = self.bounds[2] + self.size
self.state[crossed_y2, 1] = self.bounds[3] - self.size
self.state[crossed_x1 | crossed_x2, 2] *= -1
self.state[crossed_y1 | crossed_y2, 3] *= -1
# add gravity
self.state[:, 3] -= self.M * self.G * dt
#------------------------------------------------------------
# set up initial state
np.random.seed(0)
init_state = -0.5 + np.random.random((50, 4))
init_state[:, :2] *= 3.9
box = ParticleBox(init_state, size=0.04)
dt = 1. / 30 # 30fps
#------------------------------------------------------------
# set up figure and animation
fig = plt.figure()
fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
ax = fig.add_subplot(111, aspect='equal', autoscale_on=False,
xlim=(-3.2, 3.2), ylim=(-2.4, 2.4))
# particles holds the locations of the particles
particles, = ax.plot([], [], 'bo', ms=6)
# rect is the box edge
rect = plt.Rectangle(box.bounds[::2],
box.bounds[1] - box.bounds[0],
box.bounds[3] - box.bounds[2],
ec='none', lw=2, fc='none')
ax.add_patch(rect)
def init():
"""initialize animation"""
global box, rect
particles.set_data([], [])
rect.set_edgecolor('none')
return particles, rect
def animate(i):
"""perform animation step"""
global box, rect, dt, ax, fig
box.step(dt)
ms = int(fig.dpi * 2 * box.size * fig.get_figwidth()
/ np.diff(ax.get_xbound())[0])
# update pieces of the animation
rect.set_edgecolor('k')
particles.set_data(box.state[:, 0], box.state[:, 1])
particles.set_markersize(ms)
return particles, rect
ani = animation.FuncAnimation(fig, animate, frames=600,
interval=10, blit=True, init_func=init)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
#ani.save('particle_box.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
plt.show()
|
gpl-3.0
|
robgjansen/braids-tor-simulator
|
scripts/Logparser.py
|
1
|
22139
|
'''
Copyright 2010 Rob Jansen
This file is part of braids-tor-simulator.
braids-tor-simulator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
braids-tor-simulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with braids-tor-simulator. If not, see <http://www.gnu.org/licenses/>.
Created on Jan 9, 2010
$Id: Logparser.py 948 2010-10-12 03:33:57Z jansen $
@author: rob
'''
import gzip, csv#,sys
from matplotlib.pylab import figure, savefig, show, legend, plot, xlabel, ylabel, xlim, ylim
from optparse import OptionParser
usage = "usage: %prog [options] arg"
parser = OptionParser(usage)
parser.add_option("-r", "--read", dest="gzfilenames", nargs=5,
help="full read data from the 5 FILENAMES in order of tor-0-20-50-80, output csv file that can then be graphed")
parser.add_option("-g", "--graph", dest="csvfilename", nargs=1,
help="graph data from FILENAME, parsed from a full read")
parser.add_option("-l", "--legacy", dest="filename",
help="read data from a single FILENAME and graph immediately")
parser.add_option("-s", "--save", action="store_true", dest="save_figs", default=False,
help="save figures if they are generated")
parser.add_option("-v", "--noview", action="store_false", dest="view_figs", default=True,
help="do NOT show figures if they are generated")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.error("incorrect number of arguments")
# simulation warmup time - 10 minutes
warmup = 0#600000
bt_rtt_d = {}
web_rtt_d = {}
delay_d = {}
heartbeat_l = []
current_minute = 0;
def parse_to_csv():
#import files
tor = options.gzfilenames[0]
zero = options.gzfilenames[1]
twenty = options.gzfilenames[2]
fifty = options.gzfilenames[3]
eighty = options.gzfilenames[4]
# parse all data, use temp for data we do not need
read_file(tor)
tor_rtt_x = get_web_rtt_CDF("NormalData")
temp, tor_util_x, temp, temp = get_bt_rtt_CDF_weighted()
x, tor_memory_y = get_memory_over_time()
temp, tor_dgs_total_y, temp, temp, temp = get_datagrams_over_time()
temp, tor_mgs_total_y, temp, temp, temp = get_messages_over_time()
read_file(zero)
zero_rtt_normal_x = get_web_rtt_CDF("NormalData")
zero_rtt_lowlatency_x = get_web_rtt_CDF("LowLatencyData")
temp, zero_util_btclient_x, temp, zero_util_btrelay_x = get_bt_rtt_CDF_weighted()
read_file(twenty)
twenty_rtt_normal_x = get_web_rtt_CDF("NormalData")
twenty_rtt_lowlatency_x = get_web_rtt_CDF("LowLatencyData")
temp, twenty_util_btclient_x, temp, twenty_util_btrelay_x = get_bt_rtt_CDF_weighted()
read_file(fifty)
fifty_rtt_normal_x = get_web_rtt_CDF("NormalData")
fifty_rtt_lowlatency_x = get_web_rtt_CDF("LowLatencyData")
temp, fifty_util_btclient_x, temp, fifty_util_btrelay_x = get_bt_rtt_CDF_weighted()
temp, fifty_memory_y = get_memory_over_time()
temp, fifty_dgs_total_y, fifty_dgs_ht_y, fifty_dgs_ll_y, fifty_dgs_n_y = get_datagrams_over_time()
temp, fifty_mgs_total_y, fifty_mgs_ht_y, fifty_mgs_ll_y, fifty_mgs_n_y = get_messages_over_time()
read_file(eighty)
eighty_rtt_normal_x = get_web_rtt_CDF("NormalData")
eighty_rtt_lowlatency_x = get_web_rtt_CDF("LowLatencyData")
temp, eighty_util_btclient_x, temp, eighty_util_btrelay_x = get_bt_rtt_CDF_weighted()
#free the memory
reset()
# output graph data into csv to avoid parsing it again
writer = csv.writer(open('graph_data.csv', 'w'))
writer.writerows([tor_rtt_x, tor_util_x])
writer.writerows([zero_rtt_normal_x, zero_rtt_lowlatency_x, zero_util_btclient_x, zero_util_btrelay_x])
writer.writerows([twenty_rtt_normal_x, twenty_rtt_lowlatency_x, twenty_util_btclient_x, twenty_util_btrelay_x])
writer.writerows([fifty_rtt_normal_x, fifty_rtt_lowlatency_x, fifty_util_btclient_x, fifty_util_btrelay_x])
writer.writerows([eighty_rtt_normal_x, eighty_rtt_lowlatency_x, eighty_util_btclient_x, eighty_util_btrelay_x])
writer.writerows([x, tor_memory_y, fifty_memory_y])
writer.writerows([tor_dgs_total_y, fifty_dgs_total_y, fifty_dgs_ht_y, fifty_dgs_ll_y, fifty_dgs_n_y])
writer.writerows([tor_mgs_total_y, fifty_mgs_total_y, fifty_mgs_ht_y, fifty_mgs_ll_y, fifty_mgs_n_y])
#config for how many markers to show
# (start_index, stride)
util_markevery_all = (0, 500)
util_markevery_one = (0, 200)
rtt_markevery = (0, 10000)
marker_size = 5
line_width = 2.0
mark_every = (0, 0)
def graph_from_csv():
global mark_every
# properties for the graphs, keep consistent
# label, linestyle, color, marker
tor_props = ['Tor', '-', 'k', 'None']
zero_props = ['0%', '--', 'r', '^']
twenty_props = ['20%', ':', 'b', 's']
fifty_props = ['50%', '--', 'g', 'o']
eighty_props = ['80%', ':', 'm', '*']
#grab already parsed data from the csv
reader = csv.reader(open(options.csvfilename, "rb"))
tor_rtt_x = to_float(reader.next())
tor_util_x = to_float(reader.next())
zero_rtt_normal_x = to_float(reader.next())
zero_rtt_lowlatency_x = to_float(reader.next())
zero_util_btclient_x = to_float(reader.next())
zero_util_btrelay_x = to_float(reader.next())
twenty_rtt_normal_x = to_float(reader.next())
twenty_rtt_lowlatency_x = to_float(reader.next())
twenty_util_btclient_x = to_float(reader.next())
twenty_util_btrelay_x = to_float(reader.next())
fifty_rtt_normal_x = to_float(reader.next())
fifty_rtt_lowlatency_x = to_float(reader.next())
fifty_util_btclient_x = to_float(reader.next())
fifty_util_btrelay_x = to_float(reader.next())
eighty_rtt_normal_x = to_float(reader.next())
eighty_rtt_lowlatency_x = to_float(reader.next())
eighty_util_btclient_x = to_float(reader.next())
eighty_util_btrelay_x = to_float(reader.next())
time = to_float(reader.next())
tor_memory = to_float(reader.next())
fifty_memory = to_float(reader.next())
tor_dgs_total = to_float(reader.next())
fifty_dgs_total_y = to_float(reader.next())
fifty_dgs_ht_y = to_float(reader.next())
fifty_dgs_ll_y = to_float(reader.next())
fifty_dgs_n_y = to_float(reader.next())
tor_mgs_total = to_float(reader.next())
fifty_mgs_total_y = to_float(reader.next())
fifty_mgs_ht_y = to_float(reader.next())
fifty_mgs_ll_y = to_float(reader.next())
fifty_mgs_n_y = to_float(reader.next())
#now draw the graphs
mark_every = rtt_markevery
#rtt low latency
figure(figsize=(6, 4))
do_plot_CDF(tor_rtt_x, tor_props)
do_plot_CDF(zero_rtt_lowlatency_x, zero_props)
do_plot_CDF(twenty_rtt_lowlatency_x, twenty_props)
do_plot_CDF(fifty_rtt_lowlatency_x, fifty_props)
do_plot_CDF(eighty_rtt_lowlatency_x, eighty_props)
ylim(0, 1)
xlim(0, 10)
ylabel("Cumulative Fraction")
xlabel("Paid Webpage Download Time (s)")
do_legend2()
if options.save_figs: savefig('ll_rtt.png')
#rtt normal
figure(figsize=(6, 4))
do_plot_CDF(tor_rtt_x, tor_props)
do_plot_CDF(zero_rtt_normal_x, zero_props)
do_plot_CDF(twenty_rtt_normal_x, twenty_props)
do_plot_CDF(fifty_rtt_normal_x, fifty_props)
do_plot_CDF(eighty_rtt_normal_x, eighty_props)
ylim(0, 1)
xlim(0, 10)
ylabel("Cumulative Fraction")
xlabel("Unpaid Webpage Download Time (s)")
do_legend2()
if options.save_figs: savefig('n_rtt.png')
mark_every = util_markevery_all
#util high throughput
figure(figsize=(6, 4))
do_plot_CDF(tor_util_x, tor_props)
#do_plot_CDF(zero_util_btrelay_x, zero_props)
do_plot_CDF(twenty_util_btrelay_x, twenty_props)
do_plot_CDF(fifty_util_btrelay_x, fifty_props)
do_plot_CDF(eighty_util_btrelay_x, eighty_props)
ylim(0, 1)
ylabel("Cumulative Fraction")
xlabel("File Sharing Relay Bandwidth Utilization (%)")
do_legend()
if options.save_figs: savefig('btrelay_util.png')
#util normal
figure(figsize=(6, 4))
do_plot_CDF(tor_util_x, tor_props)
do_plot_CDF(zero_util_btclient_x, zero_props)
do_plot_CDF(twenty_util_btclient_x, twenty_props)
do_plot_CDF(fifty_util_btclient_x, fifty_props)
do_plot_CDF(eighty_util_btclient_x, eighty_props)
ylim(0, 1)
ylabel("Cumulative Fraction")
xlabel("File Sharing Client Bandwidth Utilization (%)")
do_legend()
if options.save_figs: savefig('btclient_util.png')
mark_every = util_markevery_one
figure(figsize=(4.5, 3))
do_plot_CDF(tor_util_x, tor_props)
do_plot_CDF(twenty_util_btrelay_x, get_btrelay_props(twenty_props))
do_plot_CDF(twenty_util_btclient_x, get_btclient_props(twenty_props))
ylim(0, 1)
ylabel("Cumulative Fraction")
xlabel("File Sharing Bandwidth Utilization (%)")
do_legend()
if options.save_figs: savefig('20_util.png')
figure(figsize=(4.5, 3))
do_plot_CDF(tor_util_x, tor_props)
do_plot_CDF(fifty_util_btrelay_x, get_btrelay_props(fifty_props))
do_plot_CDF(fifty_util_btclient_x, get_btclient_props(fifty_props))
ylim(0, 1)
ylabel("Cumulative Fraction")
xlabel("File Sharing Bandwidth Utilization (%)")
do_legend()
if options.save_figs: savefig('50_util.png')
figure(figsize=(4.5, 3))
do_plot_CDF(tor_util_x, tor_props)
do_plot_CDF(eighty_util_btrelay_x, get_btrelay_props(eighty_props))
do_plot_CDF(eighty_util_btclient_x, get_btclient_props(eighty_props))
ylim(0, 1)
ylabel("Cumulative Fraction")
xlabel("File Sharing Bandwidth Utilization (%)")
do_legend()
if options.save_figs: savefig('80_util.png')
# messages, datagrams, and memory over time
figure(figsize=(6, 4))
do_plot(time, tor_memory, tor_props)
do_plot(time, fifty_memory, fifty_props)
ylabel("Memory (MB)")
xlabel("Time (m)")
do_legend()
if options.save_figs: savefig('memory.png')
figure(figsize=(6, 4))
plot(time, tor_mgs_total, lw=line_width, label=tor_props[0])
plot(time, fifty_mgs_total_y, lw=line_width, label=fifty_props[0] + " total")
plot(time, fifty_mgs_ll_y, lw=line_width, label=fifty_props[0] + " LL")
plot(time, fifty_mgs_ht_y, lw=line_width, label=fifty_props[0] + " HT")
plot(time, fifty_mgs_n_y, lw=line_width, label=fifty_props[0] + " N")
ylabel("Number of Messages")
xlabel("Time (m)")
do_legend()
if options.save_figs: savefig('messages.png')
figure(figsize=(6, 4))
plot(time, tor_dgs_total, lw=line_width, label=tor_props[0])
plot(time, fifty_dgs_total_y, lw=line_width, label=fifty_props[0] + " total")
plot(time, fifty_dgs_ll_y, lw=line_width, label=fifty_props[0] + " LL")
plot(time, fifty_dgs_ht_y, lw=line_width, label=fifty_props[0] + " HT")
plot(time, fifty_dgs_n_y, lw=line_width, label=fifty_props[0] + " N")
ylabel("Number of Datagrams")
xlabel("Time (m)")
do_legend()
if options.save_figs: savefig('datagrams.png')
if options.view_figs: show()
def to_float(list):
new = []
for item in list:
new.append(float(item))
return new
# this is for graphs that show both btrelay and btclient data with the same percentage of converters
def get_btrelay_props(props):
new = []
new.append(props[0] + ", FSR")
new.append('--')
new.append('b')
new.append('x')
return new
# this is for graphs that show both btrelay and btclient data with the same percentage of converters
def get_btclient_props(props):
new = []
new.append(props[0] + ", FSC")
new.append(':')
new.append('r')
new.append('+')
return new
def do_plot_CDF(x, props):
do_plot(x, get_y_cdf_axis(x), props)
def do_plot(x, y, props):
ew = 1.0
if props[3] == '+' or props[3] == 'x':
ew = 1.5
plot(x, y, markevery=mark_every, ms=marker_size, mew=ew, lw=line_width, label=props[0], ls=props[1], c=props[2], marker=props[3])
def get_y_cdf_axis(x):
y = []
frac = 0
for i in xrange(len(x)):
frac += 1.0 / float(len(x))
y.append(frac)
return y
def do_legend():
# see http://matplotlib.sourceforge.net/users/legend_guide.html
leg = legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=5, mode="expand", borderaxespad=0., numpoints=1, handletextpad=0.2)
if leg is not None:
for t in leg.get_texts():
t.set_fontsize('small')
do_grid()
def do_legend2():
# see http://matplotlib.sourceforge.net/users/legend_guide.html
leg = legend(loc='lower right', ncol=1, borderaxespad=0.5, numpoints=1, handletextpad=0.2)
if leg is not None:
for t in leg.get_texts():
t.set_fontsize('small')
do_grid()
def do_grid():
from matplotlib.pylab import gca
gca().yaxis.grid(True, c='0.5')
gca().xaxis.grid(True, c='0.5')
def main():
plots1 = []
plots2 = []
plots3 = []
plots4 = []
plots5 = []
plots6 = []
yl1 = 'Cumulative fraction'
yl2 = 'Java VM Memory (MB)'
yl3 = 'Total Datagrams'
yl4 = 'Total Messages'
xl1 = 'Average BT goodput (kbps)'
xl2 = 'BT utilization (%)'
xl3 = 'Average WEB rtt (s)'
xl4 = 'Time (m)'
ps = [("LowLatencyData", "LL"), ("HighThroughputData", "HT"), ("NormalData", "N")]
filename = options.filename
label = "test"
read_file(filename)
for p in ps:
x = get_web_rtt_CDF(p[0])
if(len(x) > 0):
plots1.append((x, get_y_cdf_axis(x), label + " " + p[1]))
# for p in ps:
# x, xutil, y = get_bt_rtt_CDF(p[0])
# if(len(x) > 0):
# plots2.append((x, y, label + " " + p[1]))
# plots3.append((xutil, y, label + " " + p[1]))
x_norm, xutil_norm, x_conv, xutil_conv = get_bt_rtt_CDF_weighted()
if len(x_norm) > 0:
plots2.append((x_norm, get_y_cdf_axis(x_norm), label + " N"))
plots3.append((xutil_norm, get_y_cdf_axis(xutil_norm), label + " N"))
if len(x_conv) > 0:
plots2.append((x_conv, get_y_cdf_axis(x_conv), label + " C"))
plots3.append((xutil_conv, get_y_cdf_axis(xutil_conv), label + " C"))
x, y = get_memory_over_time()
plots4.append((x, y, label))
x, y1, y2, y3, y4 = get_datagrams_over_time()
plots5.append((x, y1, label + " total"))
plots5.append((x, y2, label + " HT"))
plots5.append((x, y3, label + " LL"))
plots5.append((x, y4, label + " N"))
x, y1, y2, y3, y4 = get_messages_over_time()
plots6.append((x, y1, label + " total"))
plots6.append((x, y2, label + " HT"))
plots6.append((x, y3, label + " LL"))
plots6.append((x, y4, label + " N"))
draw(plots1, xl3, yl1, True, "rtt")
draw(plots2, xl1, yl1, True, "goodput")
draw(plots3, xl2, yl1, True, "util")
draw(plots4, xl4, yl2, False, "memory")
draw(plots5, xl4, yl3, False, "datagrams")
draw(plots6, xl4, yl4, False, "messages")
if options.view_figs: show()
def reset():
global bt_rtt_d, web_rtt_d, delay_d, heartbeat_l
del(bt_rtt_d)
bt_rtt_d = {}
del(web_rtt_d)
web_rtt_d = {}
del(delay_d)
delay_d = {}
del(heartbeat_l)
heartbeat_l = []
def read_file(filename):
reset()
print "Reading", filename
if(filename.find('.gz') > -1):
file = gzip.open(filename)
else:
file = open(filename)
for line in file:
if line.startswith('#'):
pass
elif line.find('@') < 0:
pass
else:
time = int((line.split(' '))[7])
if time < warmup:
pass
elif line.find("Heartbeat") > -1:
read_heartbeat(line)
elif line.find("FileSharer@") > -1 and line.find("rtt measurement:") > -1:
read_bt_rtt(line, time)
elif line.find("WebBrowser@") > -1 and line.find("rtt measurement:") > -1:
read_web_rtt(line, time)
print "Parsing", filename
def read_bt_rtt(line, time):
parts = line.split(' ')
t = parts[9]
id = t[t.rfind('@') + 1:t.rfind('[')]
bw = t[t.rfind('/') + 1:t.rfind('k')]
app = parts[9][:parts[9].rfind('@')]
priority = parts[10]
bytes = int(parts[13]) + int(parts[17])
rtt = parts[21]
if(id not in bt_rtt_d):
bt_rtt_d[id] = []
bt_rtt_d[id].append((int(time), int(bytes), float(bw), int(rtt), priority, app))
def read_web_rtt(line, time):
parts = line.split(' ')
id = parts[9]
id = id[id.rfind('@') + 1:id.rfind('[')]
priority = parts[10]
rtt = parts[13]
if(id not in web_rtt_d):
web_rtt_d[id] = []
web_rtt_d[id].append((int(time), int(rtt), priority))
def read_heartbeat(line):
global current_minute
parts = line.split(' ')
time = parts[10]
time = int(time[:time.find('/')])
mem = parts[12]
mem = int(mem[:mem.find('M')])
m = int(parts[13])
mht = int(parts[15])
mll = int(parts[17])
mn = int(parts[19])
d = int(parts[21])
dht = int(parts[23])
dll = int(parts[25])
dn = int(parts[27])
heartbeat_l.append((time, mem, m, mht, mll, mn, d, dht, dll, dn))
current_minute = time
def get_web_rtt_CDF(priority):
x = []
frac = 0
for key in web_rtt_d:
tuplelist = web_rtt_d[key]
total = 0
count = 0
for entry in tuplelist:
if entry[2] == priority:
total += entry[1]
count += 1
if count > 0:
x.append((total / count) / 1000.0)
x.sort()
return x
def get_bt_rtt_CDF(priority):
x = []
xutil = []
y = []
frac = 0
for key in bt_rtt_d:
tuplelist = bt_rtt_d[key]
total = 0
start = 0
end = 0
bw = 0
for entry in tuplelist:
if entry[4] == priority:
if start == 0:
start = entry[0] - entry[3]
bw = float(2.0 * entry[2])
end = entry[0]
total += entry[1]
if bw > 0:
kbps = float(total * 8.0 / 1000.0) / ((end - start) / 1000000000.0)
x.append(kbps)
xutil.append((kbps / bw) * 100.0)
for i in range(len(x)):
frac += 1.0 / float(len(x))
y.append(frac)
x.sort()
xutil.sort()
return x, xutil, y
def get_bt_rtt_CDF_weighted():
x_norm = []
xutil_norm = []
x_conv = []
xutil_conv = []
for key in bt_rtt_d:
tuplelist = bt_rtt_d[key]
if tuplelist[0][5].find('FileSharer@Client') > -1:
kbps, kbps_percent = get_bt_helper(tuplelist)
if kbps > -1:
x_norm.append(kbps)
xutil_norm.append(kbps_percent)
elif tuplelist[0][5].find('FileSharer@Relay') > -1:
kbps, kbps_percent = get_bt_helper(tuplelist)
if kbps > -1:
x_conv.append(kbps)
xutil_conv.append(kbps_percent)
x_norm.sort()
xutil_norm.sort()
x_conv.sort()
xutil_conv.sort()
return x_norm, xutil_norm, x_conv, xutil_conv
def get_bt_helper(tuplelist):
bw = 0
start = 0
end = 0
total = 0
num = 0
for entry in tuplelist:
if start == 0:
start = entry[0] - entry[3]
bw = float(2.0 * entry[2])
end = entry[0]
total += entry[1]
num += 1
kbps = -1
kbps_percent = -1
if bw > 0:
if num > 0:
kbps = float(total * 8.0 / 1000.0) / ((end - start) / 1000000000.0)
kbps_percent = (kbps / bw) * 100.0
return kbps, kbps_percent
def get_memory_over_time():
x = []
y = []
for item in heartbeat_l:
x.append(item[0])
y.append(item[1])
return x, y
def get_messages_over_time():
x = []
y1 = []
y2 = []
y3 = []
y4 = []
for item in heartbeat_l:
x.append(item[0])
y1.append(item[2])#total
y2.append(item[3])#ht
y3.append(item[4])#ll
y4.append(item[5])#n
return x, y1, y2, y3, y4
def get_datagrams_over_time():
x = []
y1 = []
y2 = []
y3 = []
y4 = []
for item in heartbeat_l:
x.append(item[0])
y1.append(item[6])#total
y2.append(item[7])#ht
y3.append(item[8])#ll
y4.append(item[9])#n
return x, y1, y2, y3, y4
def draw(plots, xaxislabel, yaxislabel, isCDF, savename):
figure(figsize=(6, 4))
for p in plots:
plot(p[0], p[1], linewidth=3.0, label=p[2])
if isCDF:
ylim(0, 1)
ylabel(yaxislabel)
xlabel(xaxislabel)
do_legend()
if options.save_figs: savefig(savename)
def draw_relay_bandwidth_CDF():
file = open("../src/main/resources/relay_bandwidth.dat")
x = []
y = []
bytes_to_kbits = 8.0 / 1000.0
for line in file:
if line.startswith('#'):
pass
else:
l = line.strip().split(' ')
x.append(float(l[0]) * bytes_to_kbits)
y.append(float(l[1]))
figure(figsize=(6, 4))
plot(x, y, linewidth=3.0)
ylabel('Cumulative fraction (%)')
xlabel('Relay bandwidth (kbps)')
leg = legend(loc='lower right', ncol=1, columnspacing=0.03, handletextpad=0.01)
#for t in leg.get_texts():
# t.set_fontsize('small')
if __name__ == '__main__':
#draw_relay_bandwidth_CDF()
if options.gzfilenames is not None:
print "reading from gzs..."
parse_to_csv()
if options.csvfilename is not None:
print "graphing csv..."
graph_from_csv()
if options.filename is not None:
print "reading single..."
main()
print "done!"
|
gpl-3.0
|
roxyboy/scikit-learn
|
sklearn/linear_model/tests/test_coordinate_descent.py
|
114
|
25281
|
# Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong format,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
clf.fit(X, y, check_input=False)
coef_false = clf.coef_
clf.fit(X, y, check_input=True)
coef_true = clf.coef_
assert_raises(AssertionError, assert_array_almost_equal,
coef_true, coef_false)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
|
bsd-3-clause
|
J535D165/recordlinkage
|
tests/test_datasets.py
|
1
|
4646
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import environ
from pathlib import Path
import numpy
import pandas
import pytest
from recordlinkage.datasets import (load_febrl1, load_febrl2, load_febrl3,
load_febrl4, load_krebsregister,
binary_vectors)
from recordlinkage.datasets import get_data_home, clear_data_home
FEBRL_DEDUP = [
# nlinks = 500
(load_febrl1, 1000, 500),
# nlinks=19*6*5/2+47*5*4/2+107*4*3/2+141*3*2/2+114
(load_febrl2, 5000, 1934),
# nlinks=168*6*5/2+161*5*4/2+212*4*3/2+256*3*2/2+368
(load_febrl3, 5000, 6538)
]
@pytest.mark.parametrize("dataset,nrows,nlinks", FEBRL_DEDUP)
def test_febrl_dedup(dataset, nrows, nlinks):
df = dataset()
assert isinstance(df, pandas.DataFrame)
assert len(df) == nrows
@pytest.mark.parametrize("dataset,nrows,nlinks", FEBRL_DEDUP)
def test_febrl_dedup_links(dataset, nrows, nlinks):
df, links = dataset(return_links=True)
assert isinstance(df, pandas.DataFrame)
assert len(df) == nrows
assert len(links) == nlinks
assert isinstance(links, pandas.MultiIndex)
@pytest.mark.parametrize("dataset,nrows,nlinks", FEBRL_DEDUP)
def test_febrl_dedup_tril(dataset, nrows, nlinks):
df, links = dataset(return_links=True)
s_level_1 = pandas.Series(numpy.arange(len(df)), index=df.index)
s_level_2 = pandas.Series(numpy.arange(len(df)), index=df.index)
x1 = s_level_1.loc[links.get_level_values(0)]
x2 = s_level_2.loc[links.get_level_values(1)]
assert numpy.all(x1.values > x2.values)
def test_febrl4():
dfa, dfb = load_febrl4()
assert isinstance(dfa, pandas.DataFrame)
assert isinstance(dfb, pandas.DataFrame)
assert len(dfa) == 5000
assert len(dfb) == 5000
def test_febrl_links():
dfa, dfb, links = load_febrl4(return_links=True)
assert isinstance(dfa, pandas.DataFrame)
assert isinstance(dfb, pandas.DataFrame)
assert len(dfa) == 5000
assert len(dfb) == 5000
assert isinstance(links, pandas.MultiIndex)
def test_krebs_dataset_download():
# remove downloaded datasets
clear_data_home()
krebs_data, krebs_matches = load_krebsregister()
for i in range(1, 11):
assert Path(get_data_home(), "krebsregister",
"block_{}.zip".format(i)).is_file()
# count the number of recordss
assert type(krebs_data), pandas.DataFrame
assert type(krebs_matches), pandas.MultiIndex
assert len(krebs_data) == 5749132
assert len(krebs_matches) == 20931
def test_krebs_dataset_environ(tmpdir):
path = Path(str(tmpdir)).expanduser()
environ['RL_DATA'] = str(path)
krebs_data, krebs_matches = load_krebsregister()
for i in range(1, 11):
assert Path(path, "krebsregister", "block_{}.zip".format(i)).is_file()
def test_krebs_dataset():
krebs_data_block1, krebs_matches_block1 = load_krebsregister(1)
krebs_data_block10, krebs_matches_block10 = load_krebsregister(10)
assert len(krebs_data_block1) > 0
assert len(krebs_data_block10) > 0
# load not existing block
with pytest.raises(ValueError):
load_krebsregister(11)
# missing values
krebs_block10, matches = load_krebsregister(10, missing_values=0)
assert krebs_block10.isnull().sum().sum() == 0
def test_krebs_missings():
# missing values
krebs_block10, matches = load_krebsregister(10, missing_values=0)
assert krebs_block10.isnull().sum().sum() == 0
def test_krebs_shuffle():
# missing values
krebs_block10, matches = load_krebsregister(10, shuffle=False)
def test_random_comparison_vectors():
# Test the generation of a random dataset
n_record_pairs = 10000
n_matches = 500
df = binary_vectors(
n_record_pairs, n_matches, m=[0.8] * 8, u=[0.2] * 8, random_state=535)
# Check the result is a DataFrame with MultiIndex
assert isinstance(df, pandas.DataFrame)
assert isinstance(df.index, pandas.MultiIndex)
# Test the length of the dataframe
assert len(df) == n_record_pairs
def test_random_comparison_vectors_1value_col():
m = numpy.array([1, .81, .85, 0])
u = numpy.array([1, .23, .50, 0])
# Create the train dataset.
X_train, y_train = binary_vectors(
1000, 500, m=m, u=u, random_state=535, return_links=True)
assert len(X_train.iloc[:, 0].unique()) == 1
assert X_train.iloc[:, 0].unique()[0] == 1
assert len(X_train.iloc[:, 3].unique()) == 1
assert X_train.iloc[:, 3].unique()[0] == 0
assert len(X_train.iloc[:, 1].unique()) == 2
assert len(X_train.iloc[:, 2].unique()) == 2
|
bsd-3-clause
|
erscott/RASLseqTools
|
RASLseqTools/RASLseqAnalysis_STAR.py
|
1
|
20131
|
import pandas as pd
import os,sys
import numpy as np
import argparse
import subprocess
import multiprocessing as mp
import sys,os
import time
import inspect
source_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
source_dir = '/'.join(source_dir.split('/')[:-1])
sys.path.append(source_dir)
from RASLseqTools import *
class RASLseqAnalysis_STAR(object):
'''
This class creates a pandas DataFrame for RASLseq fastq sequences.
Attributes of this class annotate RASLseq fastq sequences.
The get_attributes method allows the user to specify what additional
information to add to the pandas DataFrame.
Attributes
----------
fastq_file: str
path to Fastq file
probes_path: str
path to Probe file
aligner_dir: str
path to directory holding the blastn or STAR executable
well_annot: str
path to Barcode Annotations file
write_path: str
path to write directory
print_on: boolean, default=False
Whether to print information during data processing
offset_5p: int, optional, default=24
Index position for RASLprobe start,
Number of bases to clip from 5' end of read
to isolate probe sequence
offset_3p: int, optional, default=22
Number of bases from 3' end of read to clip
in order to isolate probe sequence in read
wellbc_start: int, default=0
Expected wellbarcode start position
wellbc_end: int, default=8
Expected wellbarcode end position
Returns
-----------
read_df: pandas DataFrame
index: (PlateBarcode, sequence)
columns: ['PlateBarcode', 'seq', 'seq_count']
PlateBarcode - Index read from fastq file
seq - fastq read sequence
seq_count - number of occurrences of seq in fastq
'''
def __init__(self, fastq_path, probes_path, aligner_dir, well_annot, \
write_path, write_file, print_on=False, n_jobs=1, offset_5p=24, \
offset_3p=22, wellbc_start=0, wellbc_end=8, write_alignments=False ):
self.RASLseqProbes_obj = RASLseqProbes.RASLseqProbes(probes_path, write_path, aligner_dir, aligner='star')
self.RASLseqBCannot_obj = RASLseqBCannot.RASLseqBCannot(well_annot)
self.RASLseqBCannot_obj.well_bc = self.RASLseqBCannot_obj.well_bc
self.aligner = 'star'
self.aligner_path = aligner_dir
self.fastq_path = fastq_path
self.n_jobs = n_jobs
self.offset_5p = int(offset_5p)
self.offset_3p = int(offset_3p)
self.wellbc_start = int(wellbc_start)
self.wellbc_end = int(wellbc_end)
self.print_on = print_on
self.write_alignments = write_alignments
self.write_path = write_path
self.write_file = write_file
self.bc_edit_dist_filter = 2
def _get_probe_well_read_counts(self, collapsed_read_counts):
'''
This function aggregates probe-specific read counts for each plate and well
Parameters
----------
collapsed_read_counts: pandas dataframe
must possess cols: ['plate_barcode','mapped_bc','probe']
Returns
-------
Pandas Dataframe
index: ['plate_barcode','WellBarcode']
columns: Probe-specific read count sum (sum of counts across reads
mapping by BLAST to probe)
'''
#grouping reads by plate barcode, well barcode, and probe
collapsed_read_counts['count'] = 1
collapsed_probe_counts = collapsed_read_counts.groupby(['PlateBarcode','WellBarcode','ProbeName'])['count'].aggregate(np.sum)
#aggregating probe counts for each well
counts_df = collapsed_probe_counts.unstack('ProbeName') #creating matrix of aggregated probe counts indexed on 'plate_barcode','mapped_bc'
#id and removal of off-target ligation hits found by ! character in blast sseq name
on_target_col = [i for i in counts_df.columns if "!" not in i]
counts_df = counts_df[on_target_col] #removing off-target ligation hits from df
counts_df.index.names= ['PlateBarcode', 'WellBarcode']
return counts_df
def _merge_plate_well_annot(self, probe_counts_df, well_annot_df):
'''
This function merges gene_counts_df with well annotations
Parameters
----------
probe_counts_df: Pandas DataFrame
Requires pandas index: ('PlateBarcode','WellBarcode')
well_annot_path: Pandas DataFrame
Requires pandas index: ('PlateBarcode','WellBarcode')
Returns
-------
Pandas DataFrame
well_annot_df right joined to gene_counts_df
index: ('plate_barcode','WellBarcode')
'''
return well_annot_df.join(probe_counts_df,how='right')
def get_demultiplexed_id(self):
'''
This function maps wellbarcodes and platebarcodes
'''
#parsing barcodes from fastq data
self.read_df['wellbarcode'] = self.read_df['seq'].str[self.wellbc_start:self.wellbc_end] #parsing wellbc from read seq
self.read_df['platebarcode'] = self.read_df['id_line'].str.split(' ').str[-1].str.split(':').str[-1].str.rstrip('N') #parsing platebc from id_line
self.read_df = self.read_df[['id_line', 'wellbarcode', 'platebarcode']] #removing unnecessary data
#demultiplexing platebarcode
imperfect_platebcs = self.read_df[~self.read_df['platebarcode'].isin(self.RASLseqBCannot_obj.plate_bc)][['platebarcode']].drop_duplicates()
imperfect_platebcs['PlateBarcode'] = imperfect_platebcs['platebarcode'].apply(RASLseqWellbc.map_bc, args=[self.RASLseqBCannot_obj.plate_bc])
perfect_platebcs = pd.DataFrame([[i,i] for i in self.RASLseqBCannot_obj.plate_bc], columns=['PlateBarcode', 'platebarcode'])
imperfect_platebcs = imperfect_platebcs.append(perfect_platebcs)
#demultiplexing wellbarcode
imperfect_wellbcs = self.read_df[~self.read_df['wellbarcode'].isin(self.RASLseqBCannot_obj.well_bc)][['wellbarcode']].drop_duplicates()
imperfect_wellbcs['WellBarcode'] = imperfect_wellbcs['wellbarcode'].apply(RASLseqWellbc.map_bc, args=[self.RASLseqBCannot_obj.well_bc])
perfect_wellbcs = pd.DataFrame([[i,i] for i in self.RASLseqBCannot_obj.well_bc], columns=['WellBarcode', 'wellbarcode'])
imperfect_wellbcs = imperfect_wellbcs.append(perfect_wellbcs)
#combining demultiplexed platebarcodes with read data
self.read_df = self.read_df.merge(imperfect_platebcs, on='platebarcode', how='left')
del self.read_df['platebarcode']
#combining demultiplexed wellbarcodes with read data
self.read_df = self.read_df.merge(imperfect_wellbcs, on='wellbarcode', how='left')
del self.read_df['wellbarcode']
return True
def get_target_counts_df(self):
'''
This function demultiplexes and aligns RASL-seq fastq reads
Expects the PlateBarcode in the read id-line
PlateBarcode as the last element in the read id-line, delimitted by " "
Expects the following read structure:
WellBarcode - Adaptor - RASL probe RASL probe - Adaptor
Processes 250000 reads per chunk
'''
if '.gz' in self.fastq_path:
self.fastq_df_chunk = pd.read_table(self.fastq_path, sep='\t', chunksize=1000000, header=None, compression='gzip')
else:
self.fastq_df_chunk = pd.read_table(self.fastq_path, sep='\t', chunksize=1000000, header=None)
self.master_df = []
for n,read_df in enumerate(self.fastq_df_chunk):
#READ FASTQ CHUNK INTO DF
self.read_df = FastqCollapse.get_fastq_unstack(read_df)
#WRITE TO TEMP FILE
random_str = self.RASLseqProbes_obj.random_str
self.temp_fastq = self.write_path + 'temp_' + random_str + '.fastq'
FastqCollapse.write_fastq(self.read_df, self.temp_fastq)
#LAUNCH STAR ALIGNMENT PROCESS, STAR ALIGN STDOUT
# self.aligned_df = RASLseqAlign.get_rasl_aligned_df(self.temp_fastq, aligner_path.rstrip('/'), \
# self.RASLseqProbes_obj.probedb_path, print_on=self.print_on, aligner='star', \
# n_jobs=1, offset_5p=self.offset_5p, offset_3p=self.offset_3p)
#ASYNCHRONOUS STAR ALIGNMENT
#print time.gmtime(), 1
print
print 'Starting Asynchronous Alignment Process'
pool = mp.Pool(1)
task = (self.temp_fastq, self.aligner_path.rstrip('/'), self.RASLseqProbes_obj.probedb_path, \
self.print_on, 'star', 1, self.offset_5p, self.offset_3p)
self.aligned_df = pool.apply_async(RASLseqAlign.get_rasl_aligned_df, task)
#BARCODE DEMULTIPLEXING
print 'Demultiplexing Reads'
#print time.gmtime(), 2
self.get_demultiplexed_id()
#print time.gmtime(), 3
print 'Demultiplexing Complete'
#FORMATTING ID_LINE FOR MERGING
self.read_df['id_line'] = self.read_df['id_line'].str.split(' ').str[0].str.lstrip('@')
#print time.gmtime(), 4
pool.close()
pool.join()
self.aligned_df = self.aligned_df.get()
pool.terminate()
print 'Alignment Complete'
#print time.gmtime(), 5
if self.write_alignments:
print 'Writing STAR Alignments To Disk'
self.alignment_write_file = self.write_file + '.Aligned.out'
self.alignment_write_file_header = self.aligned_df.columns
self.aligned_df.to_csv(self.alignment_write_file, sep='\t', mode='a', header=False, index=False)
print 'Writing STAR Alignments To Disk Complete'
print
#MERGING BARCODES AND ALIGNED READS
self.read_df = self.read_df.merge(self.aligned_df, on='id_line', how='inner')
del self.aligned_df
#REMOVING MISMATCH WELLBARCODE AND PLATEBARCODE READS
self.read_df = self.read_df[(self.read_df.WellBarcode != 'mismatch') & (self.read_df.PlateBarcode != 'mismatch')]
#SUM PROBE-SPECIFIC READ COUNTS
self.read_df = self._get_probe_well_read_counts( self.read_df )
self.master_df.append(self.read_df)
del self.read_df
print (n+1)*250000, 'Reads Processed'
os.system('rm ' + self.temp_fastq)
#AGGREGATING THE PROBE COUNTS FOR EACH SAMPLE
self.RASLseqAnalysis_df = self.master_df[0]
for df in self.master_df[1:]:
self.RASLseqAnalysis_df = self.RASLseqAnalysis_df.add(df, fill_value=0)
del self.master_df
self.probe_columns = self.RASLseqAnalysis_df.columns
self.read_count_mapped = self.RASLseqAnalysis_df.sum().sum()
#MERGING WELL ANNOTATIONS AND PROBE READ COUNTS
self.annotation_columns = self.RASLseqBCannot_obj.well_annot_df.columns
self.RASLseqAnalysis_df = self._merge_plate_well_annot(self.RASLseqAnalysis_df, self.RASLseqBCannot_obj.well_annot_df)
#DELETING TEMP FILES
os.system('rm -r ' + self.RASLseqProbes_obj.probedb_path)
return
def load_target_counts_df(self, file_path):
'''
This function loads a RASLseqAnalysis_df from the file_path
Parameters
----------------
file_path: str
Specifies the path to the RASLseqAnalysis_df
Expects tab-separated sample by count dataframe with
PlateBarcode, WellBarcode columns
Returns
----------------
self.RASLseqAnalysis_df, pandas DataFrame
'''
compression = ''
if 'gz' in file_path:
compression='gzip'
df = pd.read_table(file_path, sep='\t', index_col=['PlateBarcode', 'WellBarcode'], compression=compression)
self.RASLseqAnalysis_df = df
self.probe_columns = list( set(self.RASLseqProbes_obj.probe_columns) & set(self.RASLseqAnalysis_df.columns) )
self.annot_columns = list( set(self.RASLseqBCannot_obj.annot_columns) & set(self.RASLseqAnalysis_df.columns) )
return
def count_df(df, annot_cols):
'''
This function aggregates dataframes
Parameters
--------------
df: pandas dataframe
count_cols: list of probe count columns
Returns
--------------
pandas df with aggregated counts
'''
def get_count_cols(df, annot_cols):
return list( set(df.columns) - set(annot_cols) )
aggregated_counts = df[get_count_cols(df, annot_cols)]
aggregated_counts.fillna(value=0, inplace=True)
return aggregated_counts
parser = argparse.ArgumentParser()
parser.add_argument('-f','--fastq', type=str,help='Specifies the input fastq file can be series delimitted list of fq, e.g. /path/to/CG_data/RASLseq.fq')
parser.add_argument('-p','--probes', type=str,help='Specifies the input probes file containing the following columns: AcceptorProbeSequence DonorProbeSequence AcceptorAdaptorSequence DonorAdaptorSequence ProbeName, e.g. /path/to/probes.txt')
parser.add_argument('-a','--aligner_bin', type=str,help='Specifies the path to directory holding STAR executable, /path/to/aligner_dir/')
parser.add_argument('-w','--well_annot', type=str,help='Specifies the input well annotations file containing the following columns: PlateBarcode and WellBarcode, e.g. /path/to/well/annotations.txt')
parser.add_argument('-d','--output_dir', type=str,help='Specifies the output directory path, e.g. /path/to/output/')
parser.add_argument('-o','--output_file', type=str,help='Specifies the output file path, e.g. /path/to/output/STAR_reads.txt')
parser.add_argument('-P','--print_on', action='store_true', default=False,help='Specifies whether to print summary stats during alignment, default=False')
parser.add_argument('-A','--write_alignments', action='store_true', default=False, help='Specifies whether to write STAR alignments to disk, default=False')
parser.add_argument('-n','--n_jobs', type=int, default=1, help='Specifies the number of processors to use, default 1')
parser.add_argument('-o5','--offset_5p', type=int, default=24, help='Specifies the number of bases to clip from 5-prime end of read to isolate probe sequence, default 24')
parser.add_argument('-o3','--offset_3p', type=int, default=22, help='Specifies the number of bases to clip from 3-prime end of read to isolate probe sequence, default 22')
parser.add_argument('-ws','--wellbc_start', type=int, default=0, help='Specifies the index position of the wellbc start base, default 0')
parser.add_argument('-we','--wellbc_end', type=int, default=8, help='Specifies the index position of the wellbc start base, default 8')
opts = parser.parse_known_args()
fastq_path, probes_path, aligner_dir, well_annot, write_path = opts[0].fastq, opts[0].probes, opts[0].aligner_bin, opts[0].well_annot, opts[0].output_dir
n_jobs, offset_5p, offset_3p, wellbc_start, wellbc_end, write_file = opts[0].n_jobs, opts[0].offset_5p, opts[0].offset_3p, opts[0].wellbc_start, opts[0].wellbc_end, opts[0].output_file
print_on, write_alignments = opts[0].print_on, opts[0].write_alignments
if __name__ == '__main__':
if ',' in fastq_path: #Handles multiple fastq files in serial
fq_files = fastq_path.split(',')
rasl_analysis = RASLseqAnalysis_STAR(fq_files[0], probes_path, aligner_dir, well_annot, \
write_path, write_file, print_on=False,n_jobs=n_jobs, \
offset_5p=offset_5p, offset_3p=offset_3p, \
wellbc_start=wellbc_start, wellbc_end=wellbc_end, \
write_alignments=write_alignments)
rasl_analysis.get_target_counts_df()
annot_cols = list(rasl_analysis.RASLseqBCannot_obj.well_annot_df.columns)
master_df = count_df(rasl_analysis.RASLseqAnalysis_df.copy(), annot_cols)
for fastq in fq_files[1:]: #IRERATING THROUGH FASTQ FILES
rasl_analysis = RASLseqAnalysis_STAR(fastq, probes_path, aligner_dir, well_annot, write_path, \
write_file, print_on=False,n_jobs=n_jobs, offset_5p=offset_5p, \
offset_3p=offset_3p, wellbc_start=wellbc_start, \
wellbc_end=wellbc_end, write_alignments=write_alignments)
rasl_analysis.get_target_counts_df()
master_df = master_df.add( count_df(rasl_analysis.RASLseqAnalysis_df, annot_cols), fill_value=0 )
print
print 'Demultiplexing, Alignment, & Counting Complete:', fastq_path
master_df = rasl_analysis.RASLseqBCannot_obj.well_annot_df.join(master_df)
master_df.to_csv(write_path + 'Aggregated_counts_STAR_alignment.txt', sep='\t')
print 'All Files Complete:'
print fastq_path
os.system('gzip ' + rasl_analysis.alignment_write_file) #gzip STAR alignment file
else: #Handles single fastq file
rasl_analysis = RASLseqAnalysis_STAR(fastq_path, probes_path, aligner_dir, well_annot, write_path, \
write_file, print_on=False,n_jobs=n_jobs, offset_5p=offset_5p, \
offset_3p=offset_3p, wellbc_start=wellbc_start, \
wellbc_end=wellbc_end, write_alignments=write_alignments)
rasl_analysis.get_target_counts_df()
rasl_analysis.RASLseqAnalysis_df.to_csv(rasl_analysis.write_file, sep='\t')
os.system('gzip ' + rasl_analysis.write_file) #gzip STAR alignment file
print
print 'Demultiplexing, Alignment, & Counting Complete:', fastq_path
"""
TEST
python RASLseqAnalysis_STAR.py -f ~/Dropbox/RASLseq/Bcell_exp2/ipynb/data/truth_sets/SeqRun1/lane1_Undetermined_L001_R1_001_truth_set_reads.fastq.gz -p ~/Dropbox/RASLseq/Bcell_exp2/ipynb/data/on_target_probes_Bcell_2014.probes -a ~/Dropbox/RASLseq/Bcell_exp2/STAR_bin/ -w ~/Dropbox/RASLseq/Bcell_exp2/ipynb/data/20131203_Rasl-Seq_bioactive_cmp-Table1.tsv -d ~/Dropbox/RASLseq/Bcell_exp2/ipynb/data/temp/ -o ~/Dropbox/RASLseq/Bcell_exp2/ipynb/data/temp/STAR_testing.txt -n 1 -o5 24 -o3 22 -ws 0 -we 8
"""
|
mit
|
evgchz/scikit-learn
|
sklearn/linear_model/tests/test_ransac.py
|
40
|
12814
|
import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros((100, 1)))
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
if __name__ == "__main__":
np.testing.run_module_suite()
|
bsd-3-clause
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/numpy/core/function_base.py
|
41
|
6518
|
from __future__ import division, absolute_import, print_function
__all__ = ['logspace', 'linspace']
from . import numeric as _nx
from .numeric import result_type, NaN
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
start = start * 1.
stop = stop * 1.
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
y = _nx.arange(0, num, dtype=dt)
if num > 1:
delta = stop - start
step = delta / div
if step == 0:
# Special handling for denormal numbers, gh-5437
y /= div
y *= delta
else:
y *= step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
y += start
if endpoint and num > 1:
y[-1] = stop
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
|
apache-2.0
|
idekerlab/py2cytoscape
|
tests/test_style.py
|
2
|
6267
|
# -*- coding: utf-8 -*-
import unittest
from py2cytoscape.data.style import Style
from py2cytoscape.data.cyrest_client import CyRestClient
import pandas as pd
import json
STYLE_NAME = 'style1'
class StyleTests(unittest.TestCase):
def setUp(self):
self.client = CyRestClient()
self.client.style.delete_all()
styles = self.client.style.get_all()
self.assertEqual(1, len(styles))
self.style = self.client.style.create(STYLE_NAME)
self.assertEqual(STYLE_NAME, self.style.get_name())
def test_get_style(self):
style_as_cy3_json = self.client.style.get(STYLE_NAME)
style_as_cyjs_json = self.client.style.get(STYLE_NAME,
data_format='cytoscapejs')
print(style_as_cy3_json)
print(json.dumps(style_as_cyjs_json,indent=4))
def test_mappings(self):
print('\n---------- Mappings tests start -----------\n')
mappings = self.style.get_mappings()
self.assertIsNotNone(mappings)
self.assertEqual(list, type(mappings))
self.assertEqual(0, len(mappings))
print('\n---------- Mapping tests finished! -----------\n')
def test_defaults(self):
print('\n---------- Defaults tests start -----------\n')
default_style = self.client.style.create('default')
defs = default_style.get_defaults()
print(defs)
self.assertIsNotNone(defs)
self.assertEqual(pd.Series, type(defs))
self.assertEqual(106, len(defs))
defs2 = self.style.get_defaults()
print(defs2)
self.assertIsNotNone(defs2)
self.assertEqual(pd.Series, type(defs2))
self.assertEqual(106, len(defs2))
print('\n---------- Defaults tests finished! -----------\n')
def test_get_defaults(self):
print('\n---------- GET Defaults tests start -----------\n')
default_style = self.client.style.create('def2')
n_label = default_style.get_default('NODE_LABEL')
n_size = default_style.get_default('NODE_SIZE')
n_shape = default_style.get_default('NODE_SHAPE')
e_width = default_style.get_default('EDGE_WIDTH')
print(n_label)
# These should be same as VP defaults.
self.assertEqual(pd.Series, type(n_label))
self.assertEqual('', n_label.ix['NODE_LABEL'])
self.assertEqual(50, n_size.ix['NODE_SIZE'])
self.assertEqual('ELLIPSE', n_shape.ix['NODE_SHAPE'])
self.assertEqual(1, e_width.ix['EDGE_WIDTH'])
def test_update_defaults(self):
print('\n---------- Add Defaults tests start -----------\n')
default_style = self.client.style.create('default')
new_defaults = {
# Node defaults
'NODE_FILL_COLOR': '#eeeeff',
'NODE_SIZE': 20,
'NODE_BORDER_WIDTH': 0,
'NODE_TRANSPARENCY': 120,
'NODE_LABEL_COLOR': 'white',
# Edge defaults
'EDGE_WIDTH': 9,
'EDGE_STROKE_UNSELECTED_PAINT': '#aaaaaa',
'EDGE_LINE_TYPE': 'LONG_DASH',
'EDGE_TRANSPARENCY': 120,
# Network defaults
'NETWORK_BACKGROUND_PAINT': 'black'
}
default_style.update_defaults(new_defaults)
defs = default_style.get_defaults()
self.assertEqual(20, defs.ix['NODE_SIZE'])
self.assertEqual('#eeeeff'.upper(), defs.ix['NODE_FILL_COLOR'])
self.assertEqual(0, defs.ix['NODE_BORDER_WIDTH'])
self.assertEqual(120, defs.ix['NODE_TRANSPARENCY'])
self.assertEqual('#FFFFFF', defs.ix['NODE_LABEL_COLOR'])
self.assertEqual(9, defs.ix['EDGE_WIDTH'])
self.assertEqual('#aaaaaa'.upper(), defs.ix['EDGE_STROKE_UNSELECTED_PAINT'])
self.assertEqual('LONG_DASH', defs.ix['EDGE_LINE_TYPE'])
self.assertEqual(120, defs.ix['EDGE_TRANSPARENCY'])
# Network defaults
self.assertEqual('#000000', defs.ix['NETWORK_BACKGROUND_PAINT'])
print('\n---------- Defaults tests finished! -----------\n')
def test_passthrough_mappings(self):
print('---------- Passthrough mmapping tests -----------\n')
self.style.create_passthrough_mapping(column='name',
col_type='String', vp='NODE_LABEL')
mappings = self.style.get_mappings()
self.assertEqual(1, len(mappings))
pt = self.style.get_mapping('NODE_LABEL')
print(pt)
self.assertEqual('passthrough', pt['mappingType'])
self.assertEqual('NODE_LABEL', pt['visualProperty'])
self.assertEqual('String', pt['mappingColumnType'])
self.assertEqual('name', pt['mappingColumn'])
self.style.delete_mapping(vp='NODE_LABEL')
mappings = self.style.get_mappings()
self.assertEqual(0, len(mappings))
print('---------- Passthrough mapping tests finished! -----------\n')
def test_discrete_mappings(self):
print('---------- Discrete mapping tests -----------\n')
vp = 'EDGE_LINE_TYPE'
col = 'interaction'
try:
self.style.create_discrete_mapping(column=col,
col_type='String',
vp=vp)
except ValueError as e:
print(e)
else:
raise RuntimeError('This should not happen!!')
mapping = {
'pp': 'SOLID',
'pd': 'LONG_DASH'
}
self.style.create_discrete_mapping(column=col,
col_type='String',
vp=vp, mappings=mapping)
dm = self.style.get_mapping(vp)
print(dm)
self.assertEqual('discrete', dm['mappingType'])
self.assertEqual(vp, dm['visualProperty'])
self.assertEqual('String', dm['mappingColumnType'])
self.assertEqual(col, dm['mappingColumn'])
self.assertIsNotNone(dm['map'])
dm_map = dm['map']
for entry in dm_map:
self.assertTrue(entry['key'] in mapping.keys())
self.assertEqual(entry['value'], mapping[entry['key']])
print('---------- Discrete mapping tests finished! -----------\n')
|
mit
|
DiCarloLab-Delft/PycQED_py3
|
pycqed/analysis/tools/data_manipulation.py
|
1
|
21438
|
"""
Part of the 'new' analysis toolbox.
This should contain all the functions that where previously/are now contained
in modules/analysis/analysis_toolbox.py in the Analysis tools section.
To give a feeling of what functions belong here, the old a_tools can be
roughly split into
- file-handling tools
- data manipulation tools
- plotting tools
"""
from collections import deque
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sig
def count_rounds_to_error(series):
"""
returns the index of the first entry that is different
from the initial value.
input:
series : array or list
output:
rounds_since_change: list
Returns NAN if no error is found
NOTE: superceded by count_rtf_and_term_cond()
"""
last_s = series[0]
for i, s in enumerate(series):
if s == last_s:
last_s = s
else:
return i
print("Warning did not find any error")
return np.NAN
def count_rtf_and_term_cond(
series, only_count_min_1=False, return_termination_condition=True
):
"""
returns the index of the first entry that is different
from the initial value.
input:
series : array or list
output:
rounds to failure (int), termination condition (string)
Returns the lenght of the timetrace +1 if no error is found
"""
rtf = len(series) + 1
termination_condition = None
initial_s = series[0]
for i, s in enumerate(series):
if s != initial_s:
rtf = i
if i == len(series) - 1:
# If termination occurs at last entry it is not possible
# to determine the cause of termination (note this should be
# a low probability event)
termination_condition = "unknown"
elif series[i + 1] == s:
termination_condition = "double event"
elif series[i + 1] != s:
termination_condition = "single event"
break
if only_count_min_1:
if initial_s == 1:
rtf = 1
if rtf == len(series) + 1:
print("Warning did not find a termination event")
if return_termination_condition:
return rtf, termination_condition
else:
return rtf
def count_rounds_since_flip(series):
"""
Used to extract number of consecutive elements that are identical
input:
series : array or list
output:
rounds_since_change: list
"""
round_since_last_change = 0 # start at zero because
last_s = series[0]
rounds_since_change = []
for i, s in enumerate(series):
if s == last_s:
round_since_last_change += 1
else:
rounds_since_change.append(round_since_last_change)
round_since_last_change = 1
last_s = s
return rounds_since_change
def count_rounds_since_flip_split(series):
"""
Used to extract rounds since flip in a binary sequence flipping between
+1 and -1.
input:
series : array or list containing entries +1 and -1
output:
rounds_between_flips_m_to_p : list of consecutive entries in +1
rounds_between_flips_p_to_m : list of consecutive entries in -1
"""
nr_rounds_since_last_flip = 1
last_s = +1
rounds_between_flips_p_to_m = []
rounds_between_flips_m_to_p = []
for i, s in enumerate(series):
if s == last_s:
nr_rounds_since_last_flip += 1
else:
if s == +1:
rounds_between_flips_m_to_p.append(nr_rounds_since_last_flip)
elif s == -1:
rounds_between_flips_p_to_m.append(nr_rounds_since_last_flip)
else:
raise ValueError(
"Unexpected value in series," + " expect only +1 and -1"
)
nr_rounds_since_last_flip = 1
last_s = s
return rounds_between_flips_m_to_p, rounds_between_flips_p_to_m
def binary_derivative(series):
"""
Used to extract transitions between flipping and non-flipping
part of data traces.
When there is no change the value is 0.
If there is a change the value is 1.
"""
d_series = np.array(
[0 if series[i + 1] == series[i] else 1 for i in range(len(series) - 1)]
)
return d_series
def binary_derivative_old(series):
"""
Used to extract transitions between flipping and non-flipping
part of data traces.
"""
d_series = np.array(
[1 if series[i + 1] == series[i] else -1 for i in range(len(series) - 1)]
)
return d_series
def binary_derivative_2D(data_array, axis=0):
"""
Used to extract transitions between flipping and non-flipping
part of data traces along a certain axis
"""
if axis == 0:
dd_array = np.array([binary_derivative(line) for line in data_array])
elif axis == 1:
dd_array = np.array([binary_derivative(line) for line in data_array.T]).T
return dd_array
def butterfly_data_binning(Z, initial_state=0):
"""
notation of coefficients
Pjk_i = P(1st msmt outcome, 2nd msmt outcome, _ input state)
epsj_i = eps(1st post msmst state, _input state)
"""
if initial_state == 0: # measurement induced excitation
# first is declared second is input state
eps0_0 = np.mean([1 if s == 1 else 0 for s in Z[:, 0]])
eps1_0 = 1 - eps0_0
P00_0 = np.mean([1 if (s_row[:2] == [1.0, 1.0]).all() else 0 for s_row in Z[:]])
P01_0 = np.mean(
[1 if (s_row[:2] == [1.0, -1.0]).all() else 0 for s_row in Z[:]]
)
P10_0 = np.mean(
[1 if (s_row[:2] == [-1.0, 1.0]).all() else 0 for s_row in Z[:]]
)
P11_0 = np.mean(
[1 if (s_row[:2] == [-1.0, -1.0]).all() else 0 for s_row in Z[:]]
)
return {
"eps0_0": eps0_0,
"eps1_0": eps1_0,
"P00_0": P00_0,
"P01_0": P01_0,
"P10_0": P10_0,
"P11_0": P11_0,
}
else: # measurement induced relaxation
# first is declared second is input state
eps0_1 = np.mean([1 if s == 1 else 0 for s in Z[:, 0]])
eps1_1 = 1 - eps0_1
P00_1 = np.mean([1 if (s_row[:2] == [1.0, 1.0]).all() else 0 for s_row in Z[:]])
P01_1 = np.mean(
[1 if (s_row[:2] == [1.0, -1.0]).all() else 0 for s_row in Z[:]]
)
P10_1 = np.mean(
[1 if (s_row[:2] == [-1.0, 1.0]).all() else 0 for s_row in Z[:]]
)
P11_1 = np.mean(
[1 if (s_row[:2] == [-1.0, -1.0]).all() else 0 for s_row in Z[:]]
)
return {
"eps0_1": eps0_1,
"eps1_1": eps1_1,
"P00_1": P00_1,
"P01_1": P01_1,
"P10_1": P10_1,
"P11_1": P11_1,
}
def butterfly_matrix_inversion(exc_coeffs, rel_coeffs):
# combines all coeffs in a single dictionary
rel_coeffs.update(exc_coeffs)
coeffs = rel_coeffs
matr = [[coeffs["eps0_0"], coeffs["eps0_1"]], [coeffs["eps1_0"], coeffs["eps1_1"]]]
inv_matr = np.linalg.inv(matr)
P_vec = [coeffs["P00_0"], coeffs["P01_0"]]
eps_vec = np.dot(inv_matr, P_vec)
[eps00_0, eps01_0] = eps_vec
P_vec = [coeffs["P10_0"], coeffs["P11_0"]]
eps_vec = np.dot(inv_matr, P_vec)
[eps10_0, eps11_0] = eps_vec
matr = [[coeffs["eps0_1"], coeffs["eps0_0"]], [coeffs["eps1_1"], coeffs["eps1_0"]]]
inv_matr = np.linalg.inv(matr)
P_vec = [coeffs["P00_1"], coeffs["P01_1"]]
eps_vec = np.dot(inv_matr, P_vec)
[eps01_1, eps00_1] = eps_vec
P_vec = [coeffs["P10_1"], coeffs["P11_1"]]
eps_vec = np.dot(inv_matr, P_vec)
[eps11_1, eps10_1] = eps_vec
return {
"eps00_0": eps00_0,
"eps01_0": eps01_0,
"eps10_0": eps10_0,
"eps11_0": eps11_0,
"eps00_1": eps00_1,
"eps01_1": eps01_1,
"eps10_1": eps10_1,
"eps11_1": eps11_1,
}
def digitize(
data, threshold: float, one_larger_than_threshold: bool = True, zero_state: int = -1
):
"""
This funciton digitizes 2D arrays. When using postselection,
first postselect if threshold for postslection is
conservative than the threshold for digitization.
Args:
one_larger_than_threshold case :
if True returns +1 for values above threshold and (zero_state) for
values below threshold, the inverse if False.
zero_state (int) : how to note the zero_state, this should be either
-1 (eigenvalue) or 0 (ground state).
"""
if one_larger_than_threshold:
data_digitized = np.asarray(
[
[1 if d_element >= threshold else zero_state for d_element in d_row]
for d_row in data
]
)
else:
data_digitized = np.asarray(
[
[1 if d_element <= threshold else zero_state for d_element in d_row]
for d_row in data
]
)
return data_digitized
def get_post_select_indices(thresholds, init_measurements, positive_case=True):
post_select_indices = []
for th, in_m in zip(thresholds, init_measurements):
if positive_case:
post_select_indices.append(np.where(in_m > th)[0])
else:
post_select_indices.append(np.where(in_m < th)[0])
post_select_indices = np.unique(np.concatenate(post_select_indices))
return post_select_indices
def postselect(data, threshold, positive_case=True):
data_postselected = []
if positive_case:
for data_row in data:
if data_row[0] <= threshold:
data_postselected.append(data_row)
else:
for data_row in data:
if data_row[0] >= threshold:
data_postselected.append(data_row)
return np.asarray(data_postselected)
def count_error_fractions(trace):
"""
The counters produce the same results as the CBox counters in
CBox.get_qubit_state_log_counters().
Requires a boolean array or an array of ints as input.
"""
no_err_counter = 0
single_err_counter = 0
double_err_counter = 0
zero_counter = 0
one_counter = 0
for i in range(len(trace)):
if i < (len(trace) - 1):
if trace[i] == trace[i + 1]:
# A single error is associated with a qubit error
single_err_counter += 1
if i < (len(trace) - 2):
if trace[i] == trace[i + 2]:
# If there are two errors in a row this is associated with
# a RO error, this counter must be substracted from the
# single counter
double_err_counter += 1
else:
no_err_counter += 1
if trace[i] == 1:
zero_counter += 1
else:
one_counter += 1
return (
no_err_counter,
single_err_counter,
double_err_counter,
zero_counter,
one_counter,
)
def mark_errors_flipping(events):
"""
Marks error fractions
"""
single_errors = np.zeros(len(events) - 1)
double_errors = np.zeros(len(events) - 2)
for i in range(len(events) - 1):
# A single error is associated with a qubit error
if events[i] == events[i + 1]:
single_errors[i] = 1
if i < (len(events) - 2):
# two identical outcomes equal to one
if events[i] == events[i + 2]:
double_errors[i] = 1
return single_errors, double_errors
def mark_errors_constant(events):
"""
Marks error fractions
"""
single_errors = np.zeros(len(events) - 1)
double_errors = np.zeros(len(events) - 2)
for i in range(len(events) - 1):
# A single error is associated with a qubit error
if events[i] != events[i + 1]:
single_errors[i] = 1
if i < (len(events) - 2):
# two identical outcomes equal to one
if events[i + 1] != events[i + 2]:
double_errors[i] = 1
return single_errors, double_errors
def mark_errors_FB_to_ground(events):
"""
Marks error fractions
"""
single_errors = np.zeros(len(events) - 1)
double_errors = np.zeros(len(events) - 2)
for i in range(len(events) - 1):
# A single error is associated with a qubit error
if events[i] == 1:
single_errors[i] = 1
if i < (len(events) - 2):
# two identical outcomes equal to one
if events[i + 1] == 1:
double_errors[i] = 1
return single_errors, double_errors
def flatten_2D_histogram(H, xedges, yedges):
"""
Flattens a 2D histogram in preparation for fitting.
Input is the output of the np.histogram2d() command.
Inputs
H: 2D array of counts of shape (yrows, xcols)
xedges: 1D array of bin-edges along x
yedges: ""
Returns
H_flat: flattened array of length (yrows*xcols)
x_tiled_flat: 1D array of bin-x-centers of length (yrows*xcols)
y_rep_flat: 1D array of bin-x-centers of length (yrows*xcols)
"""
# Transpose because Histogram is H(yrows, xcols)
H_flat = H.T.flatten()
xstep = (xedges[1] - xedges[0]) / 2
ystep = (yedges[1] - yedges[0]) / 2
x = xedges[:-1] + xstep
y = yedges[:-1] + ystep
nr_rows = len(y)
nr_cols = len(x)
# tiling and rep is to make the indices match with the locations in the
# 2D grid of H
x_tiled_flat = np.tile(x, nr_cols)
y_rep_flat = np.repeat(y, nr_rows)
return H_flat, x_tiled_flat, y_rep_flat
def reject_outliers(data, m=6.0):
"""
Reject outliers function from stack overflow
http://stackoverflow.com/questions/11686720/is-there-a-numpy-builtin-to-reject-outliers-from-a-list
"""
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d / mdev if mdev else 0.0
return data[s < m]
def rotation_matrix(angle, as_array=False):
"""
Returns a 2x2 rotation matrix based on an angle in degrees.
rot_mat * vec shape(2,1) rotates the vector clockwise
"""
rot_mat = np.matrix(
[
[np.cos(2 * np.pi * angle / 360), -np.sin(2 * np.pi * angle / 360)],
[np.sin(2 * np.pi * angle / 360), np.cos(2 * np.pi * angle / 360)],
]
)
if as_array:
rot_mat = np.array(rot_mat)
return rot_mat
def rotate_complex(complex_number, angle, deg=True):
"""
Rotates a complex number by an angle specified in degrees
"""
if deg:
angle = angle / 360 * 2 * np.pi
rotated_number = complex_number * np.exp(1j * angle)
return rotated_number
def get_outliers_fwd(x, threshold, plot_hist=False, ax=None):
dif = np.zeros(x.shape)
dif[1:] = x[1:] - x[:-1]
if plot_hist:
if ax is None:
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.hist(np.abs(dif[~np.isnan(dif)]))
ax.axvline(threshold, color="k")
return np.where(
np.logical_or((np.abs(dif) > threshold), (np.isnan(x))), True, False
)
def get_outliers_bwd(x, threshold, plot_hist=False, ax=None):
x = x[::-1]
dif = np.zeros(x.shape)
dif[1:] = x[1:] - x[:-1]
if plot_hist:
if ax is None:
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.hist(np.abs(dif[~np.isnan(dif)]))
ax.axvline(threshold, color="k")
return np.where(
np.logical_or((np.abs(dif) > threshold), (np.isnan(x))), True, False
)
def get_outliers(x, threshold):
return np.logical_and(
get_outliers_fwd(x, threshold), get_outliers_bwd(x, threshold)[::-1]
)
def get_generations_by_index(generation_indices, array):
"""
Given a list of indices
"""
generation_indices[0]
generations = []
current_gen_indices = deque([0], maxlen=2)
for idx in generation_indices:
current_gen_indices.append(int(idx))
generations.append(array[current_gen_indices[0] : current_gen_indices[1]])
return generations
def get_generation_means(generation_indices, array):
generations = get_generations_by_index(generation_indices, array)
means = [np.mean(gen) for gen in generations]
return means
def filter_resonator_visibility(
x,
y,
z,
deg=True,
cutoff_factor=0,
sav_windowlen_factor=None,
sav_polorder=4,
hipass_left=0.05,
hipass_right=0.1,
**kw
):
"""
Filters resonator-dac sweeps on phase data to show only the resonator dips
and remove background
Inputs
Optional
deg: True/False if data is in degree or rad. Expected rad and deg
standard range is [-pi, pi] or [-180, 180], respectively.
sav_windowlen_factor: Length of the window for the Savitsky-Golay
filter, expressed in a factor of the total length of thedata
-> value between 0 and 1. Default is 0.1
sav_polorder: Polyorder of Savitsky-Golay filter. Default is 4
hipass_left, hipass_right: Factor cutoff of all data of
left and right side of high pass, as data is not shifted
"""
# cutoff in frequency space optional if high freq data is noisy
cutoff = round(len(x) * (1 - cutoff_factor))
x_cut = x[:cutoff]
restruct = []
# Go line by line for filtering
for i in range(len(z[0])):
ppcut = z[:cutoff, i]
# Pick type of data (deg or rad) to unwrap
# Expected rad standard range is [-pi,pi]
if deg:
ppcut_rad = np.deg2rad(ppcut) + np.pi
ppcut_unwrap = np.unwrap(ppcut_rad)
else:
ppcut_unwrap = np.unwrap(ppcut)
# Remove linear offset of unwrap
[a, b] = np.polyfit(x_cut, ppcut_unwrap, deg=1)
fit = a * x_cut + b
reduced = ppcut_unwrap - fit
# Use Savitsky-Golay filter
if sav_windowlen_factor is None:
sav_windowlen_factor = round(0.1 * len(x) / 2) * 2 + 1
red_filt = sig.savgol_filter(
reduced, window_length=sav_windowlen_factor, polyorder=sav_polorder
)
# Flatten curve by removing the filtered signal
flat = reduced - red_filt
# Poor-mans high pass filter using
# FFT -> Removing frequency components --> IFFT
llcut_f = np.fft.fft(flat)
left = round(hipass_left * len(x))
right = round(hipass_right * len(x))
# Cut and apply 'highpass filter'
llcut_f[:left] = [0] * left
llcut_f[-1 - right : -1] = [0] * right
# Convert back to frequency domain
llcut_if = np.fft.ifft(llcut_f)
# Build new 2D dataset
restruct.append(llcut_if)
# Absolute value of complex IFFT result
restruct = np.abs(np.array(restruct))
return restruct
def populations_using_rate_equations(
SI: np.array, SX: np.array, V0: float, V1: float, V2: float
):
"""
Calculate populations using reference voltages.
Parameters:
-----------
SI : array
signal value for signal with I (Identity) added
SX : array
signal value for signal with X (π-pulse) added
V0 : float
Reference signal level for 0-state (calibration point).
V1 : float
Reference signal level for 1-state (calibration point).
V2 : float
Reference signal level for 2-state (calibration point).
Returns:
--------
P0 : array
population of the |0> state
P1 : array
population of the |1> state
P2 : array
population of the |2> state
M_inv : 2D array
Matrix inverse to find populations
Based on equation (S1) from Asaad & Dickel et al. npj Quant. Info. (2016)
To quantify leakage, we monitor the populations Pi of the three lowest
energy states (i ∈ {0, 1, 2}) and calculate the average
values <Pi>. To do this, we calibrate the average signal levels Vi for
the transmons in level i, and perform each measurement twice, the second
time with an added final π pulse on the 0–1 transition. This final π
pulse swaps P0 and P1, leaving P2 unaffected. Under the assumption that
higher levels are unpopulated (P0 +P1 +P2 = 1),
[V0 −V2, V1 −V2] [P0] = [S −V2]
[V1 −V2, V0 −V2] [P1] = [S' −V2]
where S (S') is the measured signal level without (with) final π pulse.
The populations are extracted by matrix inversion.
"""
M = np.array([[V0 - V2, V1 - V2], [V1 - V2, V0 - V2]])
M_inv = np.linalg.inv(M)
# using lists instead of preallocated array allows this to work
# with ufloats
P0 = []
P1 = []
for i, (sI, sX) in enumerate(zip(SI, SX)):
p0, p1 = np.dot(np.array([sI - V2, sX - V2]), M_inv)
P0.append(p0)
P1.append(p1)
# [2020-07-09 Victor] added compatibility with inputing complex IQ
# voltages in order to make rates equation work properly with "optimal IQ"
# RO mode, regardless of the orientation of the blobs on the IQ-plane
# There might be small imaginary part here in the cases where the measured
# SI or SX are points outside the the triangle formed by the calibration
# points
P0 = np.real(P0)
P1 = np.real(P1)
P2 = 1 - P0 - P1
return P0, P1, P2, M_inv
|
mit
|
joernhees/scikit-learn
|
sklearn/preprocessing/tests/test_data.py
|
30
|
61609
|
# Authors:
#
# Giorgio Patrini
#
# License: BSD 3 clause
import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils import gen_batches
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import _handle_zeros_in_scale
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.exceptions import DataConversionWarning
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_predict
from sklearn.svm import SVR
from sklearn import datasets
iris = datasets.load_iris()
# Make some data to be used many times
rng = np.random.RandomState(0)
n_features = 30
n_samples = 1000
offsets = rng.uniform(-1, 1, size=n_features)
scales = rng.uniform(1, 10, size=n_features)
X_2d = rng.randn(n_samples, n_features) * scales + offsets
X_1row = X_2d[0, :].reshape(1, n_features)
X_1col = X_2d[:, 0].reshape(n_samples, 1)
X_list_1row = X_1row.tolist()
X_list_1col = X_1col.tolist()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def _check_dim_1axis(a):
if isinstance(a, list):
return np.array(a).shape[0]
return a.shape[0]
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,
n_samples_seen):
if batch_stop != n:
assert_equal((i + 1) * chunk_size, n_samples_seen)
else:
assert_equal(i * chunk_size + (batch_stop - batch_start),
n_samples_seen)
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
assert_equal(interact.powers_.shape, (interact.n_output_features_,
interact.n_input_features_))
def test_polynomial_feature_names():
X = np.arange(30).reshape(10, 3)
poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)
feature_names = poly.get_feature_names()
assert_array_equal(['1', 'x0', 'x1', 'x2', 'x0^2', 'x0 x1',
'x0 x2', 'x1^2', 'x1 x2', 'x2^2'],
feature_names)
poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(['a', 'b', 'c', 'a^2', 'a b', 'a c', 'b^2',
'b c', 'c^2', 'a^3', 'a^2 b', 'a^2 c',
'a b^2', 'a b c', 'a c^2', 'b^3', 'b^2 c',
'b c^2', 'c^3'], feature_names)
# test some unicode
poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)
feature_names = poly.get_feature_names([u"\u0001F40D", u"\u262E", u"\u05D0"])
assert_array_equal([u"1", u"\u0001F40D", u"\u262E", u"\u05D0"],
feature_names)
def test_standard_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_almost_equal(scaler.mean_, X.ravel())
assert_almost_equal(scaler.scale_, np.ones(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.std(axis=0),
np.zeros_like(n_features))
else:
assert_almost_equal(scaler.mean_, X.mean())
assert_almost_equal(scaler.scale_, X.std())
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_almost_equal(scaler.mean_, 1.)
assert_almost_equal(scaler.scale_, 1.)
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), .0)
assert_equal(scaler.n_samples_seen_, X.shape[0])
def test_scale_1d():
# 1-d inputs
X_list = [1., 3., 5., 0.]
X_arr = np.array(X_list)
for X in [X_list, X_arr]:
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(), 0.0)
assert_array_almost_equal(X_scaled.std(), 1.0)
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
@skip_if_32bit
def test_standard_scaler_numerical_stability():
# Test numerical stability of scaling
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
n_features = 5
n_samples = 4
X = rng.randn(n_samples, n_features)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_equal(scaler.n_samples_seen_, n_samples)
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_handle_zeros_in_scale():
s1 = np.array([0, 1, 2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert_false(s1[0] == s2[0])
assert_array_equal(s1, np.array([0, 1, 2, 3]))
assert_array_equal(s2, np.array([1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert_equal(scaler_batch.var_, scaler_incr.var_) # Nones
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.randint(0, 2, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert_true(scaler.mean_ is not None)
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
def test_partial_fit_sparse_input():
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(X).transform(X)
assert_array_equal(X_null.data, X.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_null.data)
assert_array_equal(X_orig.data, X.data)
def test_standard_scaler_trasform_with_partial_fit():
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i + 1), :]
chunks_copy = X_sofar.copy()
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.nextafter(0, 1)
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
# (i+1) because the Scaler has been already fitted
assert_equal((i + 1), scaler_incr.n_samples_seen_)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
assert_raises(ValueError, StandardScaler().fit, X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
X_csc_copy = X_csc.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csc)
assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
assert_raises(ValueError, scale, X_csc, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csc)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
assert_raises(ValueError, scaler.transform, X_csc)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
X_transformed_csc = sparse.csc_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csc)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [[np.nan, 5, 6, 7, 8]]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [[np.inf, 5, 6, 7, 8]]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_robust_scaler_2d_arrays():
# Test robust scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_transform_one_row_csr():
# Check RobustScaler on transforming csr matrix with one row
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
single_row = np.array([[0.1, 1., 2., 0., -1.]])
scaler = RobustScaler(with_centering=False)
scaler = scaler.fit(X)
row_trans = scaler.transform(sparse.csr_matrix(single_row))
row_expected = single_row / scaler.scale_
assert_array_almost_equal(row_trans.toarray(), row_expected)
row_scaled_back = scaler.inverse_transform(row_trans)
assert_array_almost_equal(single_row, row_scaled_back.toarray())
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_iris_quantiles():
X = iris.data
scaler = RobustScaler(quantile_range=(10, 90))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(10, 90), axis=0)
q_range = q[1] - q[0]
assert_array_almost_equal(q_range, 1)
def test_robust_scaler_invalid_range():
for range_ in [
(-1, 90),
(-2, -3),
(10, 101),
(100.5, 101),
(90, 50),
]:
scaler = RobustScaler(quantile_range=range_)
assert_raises_regex(ValueError, 'Invalid quantile range: \(',
scaler.fit, iris.data)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# null scale
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
# Check RobustScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
# Check MaxAbsScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# function interface
X_trans = maxabs_scale(X)
assert_array_almost_equal(X_trans, X_expected)
# sparse data
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_trans_csr = scaler.fit_transform(X_csr)
X_trans_csc = scaler.fit_transform(X_csc)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans_csr.A, X_expected)
assert_array_almost_equal(X_trans_csc.A, X_expected)
X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)
X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)
assert_array_almost_equal(X, X_trans_csr_inv.A)
assert_array_almost_equal(X, X_trans_csc_inv.A)
def test_maxabs_scaler_large_negative_value():
# Check MaxAbsScaler on toy data with a large negative value
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_maxabs_scaler_transform_one_row_csr():
# Check MaxAbsScaler on transforming csr matrix with one row
X = sparse.csr_matrix([[0.5, 1., 1.]])
scaler = MaxAbsScaler()
scaler = scaler.fit(X)
X_trans = scaler.transform(X)
X_expected = sparse.csr_matrix([[1., 1., 1.]])
assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())
X_scaled_back = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_maxabs_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MaxAbsScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),
np.ones(n_features))
else:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MaxAbsScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# function interface
X_1d = X_1row.ravel()
max_abs = np.abs(X_1d).max()
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
def test_maxabs_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d[:100, :]
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler()
scaler_incr_csr = MaxAbsScaler()
scaler_incr_csc = MaxAbsScaler()
for batch in gen_batches(n, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
X_csr = sparse.csr_matrix(X[batch])
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
X_csc = sparse.csc_matrix(X[batch])
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csc.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csc.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MaxAbsScaler().fit(X[batch0])
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std until the end of partial fits, and
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones((10))
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert_equal(X_norm.dtype, dtype)
X_norm = toarray(X_norm)
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm**2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
# Test return_norm
X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]])
for norm in ('l1', 'l2', 'max'):
_, norms = normalize(X_dense, norm=norm, return_norm=True)
if norm == 'l1':
assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0]))
elif norm == 'l2':
assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127]))
else:
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
X_sparse = sparse.csr_matrix(X_dense)
for norm in ('l1', 'l2'):
assert_raises(NotImplementedError, normalize, X_sparse,
norm=norm, return_norm=True)
_, norms = normalize(X_sparse, norm='max', return_norm=True)
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
binarizer = Binarizer(copy=False)
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
X_bin = binarizer.transform(X_float)
if init is not list:
assert_true(X_bin is X_float)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_cv_pipeline_precomputed():
# Cross-validate a regression on four coplanar points with the same
# value. Use precomputed kernel to ensure Pipeline with KernelCenterer
# is treated as a _pairwise operation.
X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]])
y_true = np.ones((4,))
K = X.dot(X.T)
kcent = KernelCenterer()
pipeline = Pipeline([("kernel_centerer", kcent), ("svr", SVR())])
# did the pipeline set the _pairwise attribute?
assert_true(pipeline._pairwise)
# test cross-validation, score should be almost perfect
# NB: this test is pretty vacuous -- it's mainly to test integration
# of Pipeline and KernelCenterer
y_pred = cross_val_predict(pipeline, K, y_true, cv=2)
assert_array_almost_equal(y_true, y_pred)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
error_msg = "unknown categorical feature present \[2\] during transform."
assert_raises_regex(ValueError, error_msg, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def test_transform_selected_copy_arg():
# transformer that alters X
def _mutating_transformer(X):
X[0, 0] = X[0, 0] + 1
return X
original_X = np.asarray([[1, 2], [3, 4]])
expected_Xtr = [[2, 2], [3, 4]]
X = original_X.copy()
Xtr = _transform_selected(X, _mutating_transformer, copy=True,
selected='all')
assert_array_equal(toarray(X), toarray(original_X))
assert_array_equal(toarray(Xtr), expected_Xtr)
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]]))
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
def test_fit_cold_start():
X = iris.data
X_2d = X[:, :2]
# Scalers that have a partial_fit method
scalers = [StandardScaler(with_mean=False, with_std=False),
MinMaxScaler(),
MaxAbsScaler()]
for scaler in scalers:
scaler.fit_transform(X)
# with a different shape, this may break the scaler unless the internal
# state is reset
scaler.fit_transform(X_2d)
|
bsd-3-clause
|
laijingtao/landlab
|
landlab/grid/base.py
|
1
|
187963
|
#! /usr/env/python
"""
Python implementation of ModelGrid, a base class used to create and manage
grids for 2D numerical models.
Getting Information about a Grid
--------------------------------
The following attributes, properties, and methods provide data about the grid,
its geometry, and the connectivity among the various elements. Each grid
element has an ID number, which is also its position in an array that
contains information about that type of element. For example, the *x*
coordinate of node 5 would be found at `grid.node_x[5]`.
The naming of grid-element arrays is *attribute*`_at_`*element*, where
*attribute* is the name of the data in question, and *element* is the element
to which the attribute applies. For example, the property `node_at_cell`
contains the ID of the node associated with each cell. For example,
`node_at_cell[3]` contains the *node ID* of the node associated with cell 3.
The *attribute* is singular if there is only one value per element; for
example, there is only one node associated with each cell. It is plural when
there are multiple values per element; for example, the `faces_at_cell` array
contains multiple faces for each cell. Exceptions to these general rules are
functions that return indices of a subset of all elements of a particular type.
For example, you can obtain an array with IDs of only the core nodes using
`core_nodes`, while `active_links` provides an array of IDs of active links
(only). Finally, attributes that represent a measurement of something, such as
the length of a link or the surface area of a cell, are described using `_of_`,
as in the example `area_of_cell`.
Information about the grid as a whole
+++++++++++++++++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.base.ModelGrid.axis_name
~landlab.grid.base.ModelGrid.axis_units
~landlab.grid.base.ModelGrid.move_origin
~landlab.grid.base.ModelGrid.ndim
~landlab.grid.base.ModelGrid.node_axis_coordinates
~landlab.grid.base.ModelGrid.number_of_elements
~landlab.grid.base.ModelGrid.size
Information about nodes
+++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.base.ModelGrid.active_link_dirs_at_node
~landlab.grid.base.ModelGrid.active_neighbors_at_node
~landlab.grid.base.ModelGrid.all_node_azimuths_map
~landlab.grid.base.ModelGrid.all_node_distances_map
~landlab.grid.base.ModelGrid.boundary_nodes
~landlab.grid.base.ModelGrid.calc_distances_of_nodes_to_point
~landlab.grid.base.ModelGrid.cell_area_at_node
~landlab.grid.base.ModelGrid.cell_at_node
~landlab.grid.base.ModelGrid.closed_boundary_nodes
~landlab.grid.base.ModelGrid.core_nodes
~landlab.grid.base.ModelGrid.downwind_links_at_node
~landlab.grid.base.ModelGrid.fixed_gradient_boundary_nodes
~landlab.grid.base.ModelGrid.fixed_value_boundary_nodes
~landlab.grid.base.ModelGrid.link_at_node_is_downwind
~landlab.grid.base.ModelGrid.link_at_node_is_upwind
~landlab.grid.base.ModelGrid.link_dirs_at_node
~landlab.grid.base.ModelGrid.links_at_node
~landlab.grid.base.ModelGrid.neighbors_at_node
~landlab.grid.base.ModelGrid.node_at_cell
~landlab.grid.base.ModelGrid.node_at_core_cell
~landlab.grid.base.ModelGrid.node_at_link_head
~landlab.grid.base.ModelGrid.node_at_link_tail
~landlab.grid.base.ModelGrid.node_axis_coordinates
~landlab.grid.base.ModelGrid.node_is_boundary
~landlab.grid.base.ModelGrid.node_x
~landlab.grid.base.ModelGrid.node_y
~landlab.grid.base.ModelGrid.nodes
~landlab.grid.base.ModelGrid.number_of_core_nodes
~landlab.grid.base.ModelGrid.number_of_links_at_node
~landlab.grid.base.ModelGrid.number_of_nodes
~landlab.grid.base.ModelGrid.number_of_patches_present_at_node
~landlab.grid.base.ModelGrid.open_boundary_nodes
~landlab.grid.base.ModelGrid.patches_present_at_node
~landlab.grid.base.ModelGrid.set_nodata_nodes_to_closed
~landlab.grid.base.ModelGrid.set_nodata_nodes_to_fixed_gradient
~landlab.grid.base.ModelGrid.status_at_node
~landlab.grid.base.ModelGrid.unit_vector_sum_xcomponent_at_node
~landlab.grid.base.ModelGrid.unit_vector_sum_ycomponent_at_node
~landlab.grid.base.ModelGrid.upwind_links_at_node
~landlab.grid.base.ModelGrid.x_of_node
~landlab.grid.base.ModelGrid.y_of_node
Information about links
+++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.base.ModelGrid.active_link_dirs_at_node
~landlab.grid.base.ModelGrid.active_links
~landlab.grid.base.ModelGrid.angle_of_link
~landlab.grid.base.ModelGrid.angle_of_link_about_head
~landlab.grid.base.ModelGrid.downwind_links_at_node
~landlab.grid.base.ModelGrid.face_at_link
~landlab.grid.base.ModelGrid.fixed_links
~landlab.grid.base.ModelGrid.length_of_link
~landlab.grid.base.ModelGrid.link_at_face
~landlab.grid.base.ModelGrid.link_at_node_is_downwind
~landlab.grid.base.ModelGrid.link_at_node_is_upwind
~landlab.grid.base.ModelGrid.link_dirs_at_node
~landlab.grid.base.ModelGrid.links_at_node
~landlab.grid.base.ModelGrid.node_at_link_head
~landlab.grid.base.ModelGrid.node_at_link_tail
~landlab.grid.base.ModelGrid.number_of_active_links
~landlab.grid.base.ModelGrid.number_of_fixed_links
~landlab.grid.base.ModelGrid.number_of_links
~landlab.grid.base.ModelGrid.number_of_links_at_node
~landlab.grid.base.ModelGrid.number_of_patches_present_at_link
~landlab.grid.base.ModelGrid.patches_present_at_link
~landlab.grid.base.ModelGrid.resolve_values_on_active_links
~landlab.grid.base.ModelGrid.resolve_values_on_links
~landlab.grid.base.ModelGrid.status_at_link
~landlab.grid.base.ModelGrid.unit_vector_xcomponent_at_link
~landlab.grid.base.ModelGrid.unit_vector_ycomponent_at_link
~landlab.grid.base.ModelGrid.upwind_links_at_node
~landlab.grid.base.ModelGrid.x_of_link
~landlab.grid.base.ModelGrid.y_of_link
Information about cells
+++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.base.ModelGrid.area_of_cell
~landlab.grid.base.ModelGrid.cell_area_at_node
~landlab.grid.base.ModelGrid.cell_at_node
~landlab.grid.base.ModelGrid.core_cells
~landlab.grid.base.ModelGrid.faces_at_cell
~landlab.grid.base.ModelGrid.node_at_cell
~landlab.grid.base.ModelGrid.node_at_core_cell
~landlab.grid.base.ModelGrid.number_of_cells
~landlab.grid.base.ModelGrid.number_of_core_cells
~landlab.grid.base.ModelGrid.number_of_faces_at_cell
~landlab.grid.base.ModelGrid.x_of_cell
~landlab.grid.base.ModelGrid.y_of_cell
Information about faces
+++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.base.ModelGrid.active_faces
~landlab.grid.base.ModelGrid.face_at_link
~landlab.grid.base.ModelGrid.faces_at_cell
~landlab.grid.base.ModelGrid.link_at_face
~landlab.grid.base.ModelGrid.number_of_active_faces
~landlab.grid.base.ModelGrid.number_of_faces
~landlab.grid.base.ModelGrid.number_of_faces_at_cell
~landlab.grid.base.ModelGrid.width_of_face
~landlab.grid.base.ModelGrid.x_of_face
~landlab.grid.base.ModelGrid.y_of_face
Information about patches
+++++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.base.ModelGrid.number_of_patches_present_at_link
~landlab.grid.base.ModelGrid.number_of_patches_present_at_node
~landlab.grid.base.ModelGrid.patches_present_at_link
~landlab.grid.base.ModelGrid.patches_present_at_node
Information about corners
+++++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.base.ModelGrid.number_of_corners
Data Fields in ModelGrid
------------------------
:class:`~.ModelGrid` inherits from the :class:`~.ModelDataFields` class. This
provides `~.ModelGrid`, and its subclasses, with the ability to, optionally,
store data values that are associated with the different types grid elements
(nodes, cells, etc.). In particular, as part of ``ModelGrid.__init__()``,
data field *groups* are added to the `ModelGrid` that provide containers to
put data fields into. There is one group for each of the eight grid elements
(node, cell, link, face, core_node, core_cell, active_link, and active_face).
There is an additional group at_grid that can store arrays of length one
intended as a place to store varibles global to the grid.
To access these groups, use the same methods as accessing groups with
`~.ModelDataFields`. ``ModelGrid.__init__()`` adds the following attributes to
itself that provide access to the values groups:
.. autosummary::
:toctree: generated/
:nosignatures:
~landlab.grid.base.ModelGrid.at_node
~landlab.grid.base.ModelGrid.at_cell
~landlab.grid.base.ModelGrid.at_link
~landlab.grid.base.ModelGrid.at_face
~landlab.grid.base.ModelGrid.at_patch
~landlab.grid.base.ModelGrid.at_corner
~landlab.grid.base.ModelGrid.at_grid
Each of these attributes returns a ``dict``-like object whose keys are value
names as strings and values are numpy arrays that gives quantities at
grid elements.
Create Field Arrays
+++++++++++++++++++
:class:`~.ModelGrid` inherits several useful methods for creating new data
fields and adding new data fields to a ModelGrid instance. Methods to add or
create a new data array follow the ``numpy`` syntax for creating arrays. The
folowing methods create and, optionally, initialize new arrays. These arrays
are of the correct size but a new field will not be added to the field:
.. autosummary::
:toctree: generated/
:nosignatures:
~landlab.field.grouped.ModelDataFields.empty
~landlab.field.grouped.ModelDataFields.ones
~landlab.field.grouped.ModelDataFields.zeros
Add Fields to a ModelGrid
+++++++++++++++++++++++++
Unlike with the equivalent numpy functions, these do not take a size argument
as the size of the returned arrays is determined from the size of the
ModelGrid. However, the keyword arguments are the same as those of the numpy
equivalents.
The following methods will create a new array and add a reference to that
array to the ModelGrid:
.. autosummary::
:toctree: generated/
:nosignatures:
~landlab.grid.base.ModelGrid.add_empty
~landlab.grid.base.ModelGrid.add_field
~landlab.grid.base.ModelGrid.add_ones
~landlab.grid.base.ModelGrid.add_zeros
~landlab.grid.base.ModelGrid.delete_field
~landlab.grid.base.ModelGrid.set_units
These methods operate in the same way as the previous set except that, in
addition to creating a new array, the newly-created array is added to the
ModelGrid. The calling signature is the same but with the addition of an
argument that gives the name of the new field as a string. The additional
method, :meth:`~.ModelDataFields.add_field`, adds a previously allocation
array to the ModelGrid. If the array is of the incorrect size it will raise
``ValueError``.
Query Fields
++++++++++++
Use the following methods/attributes get information about the stored data
fields:
.. autosummary::
:toctree: generated/
:nosignatures:
~landlab.field.grouped.ModelDataFields.size
~landlab.field.grouped.ModelDataFields.keys
~landlab.field.grouped.ModelDataFields.has_group
~landlab.field.grouped.ModelDataFields.has_field
~landlab.grid.base.ModelGrid.field_units
~landlab.grid.base.ModelGrid.field_values
~landlab.field.grouped.ModelDataFields.groups
i.e., call, e.g. mg.has_field('node', 'my_field_name')
# START HERE check that all functions listed below are included above,
# ignore ones that start with underscores(_)
Gradients, fluxes, and divergences on the grid
----------------------------------------------
Landlab is designed to easily calculate gradients in quantities across the
grid, and to construct fluxes and flux divergences from them. Because these
calculations tend to be a little more involved than property lookups, the
methods tend to start with `calc_`.
.. autosummary::
:toctree: generated/
~landlab.grid.base.ModelGrid.calc_diff_at_link
~landlab.grid.base.ModelGrid.calc_flux_div_at_node
~landlab.grid.base.ModelGrid.calc_grad_at_link
~landlab.grid.base.ModelGrid.calc_grad_at_patch
~landlab.grid.base.ModelGrid.calc_net_flux_at_node
~landlab.grid.base.ModelGrid.calc_slope_at_node
~landlab.grid.base.ModelGrid.calc_slope_at_patch
~landlab.grid.base.ModelGrid.calc_unit_normal_at_patch
Mappers
-------
These methods allow mapping of values defined on one grid element type onto a
second, e.g., mapping upwind node values onto links, or mean link values onto
nodes.
.. autosummary::
:toctree: generated/
~landlab.grid.base.ModelGrid.map_downwind_node_link_max_to_node
~landlab.grid.base.ModelGrid.map_downwind_node_link_mean_to_node
~landlab.grid.base.ModelGrid.map_link_head_node_to_link
~landlab.grid.base.ModelGrid.map_link_tail_node_to_link
~landlab.grid.base.ModelGrid.map_link_vector_sum_to_patch
~landlab.grid.base.ModelGrid.map_link_vector_to_nodes
~landlab.grid.base.ModelGrid.map_max_of_link_nodes_to_link
~landlab.grid.base.ModelGrid.map_max_of_node_links_to_node
~landlab.grid.base.ModelGrid.map_max_of_patch_nodes_to_patch
~landlab.grid.base.ModelGrid.map_mean_of_link_nodes_to_link
~landlab.grid.base.ModelGrid.map_mean_of_patch_nodes_to_patch
~landlab.grid.base.ModelGrid.map_min_of_link_nodes_to_link
~landlab.grid.base.ModelGrid.map_min_of_node_links_to_node
~landlab.grid.base.ModelGrid.map_min_of_patch_nodes_to_patch
~landlab.grid.base.ModelGrid.map_node_to_cell
~landlab.grid.base.ModelGrid.map_upwind_node_link_max_to_node
~landlab.grid.base.ModelGrid.map_upwind_node_link_mean_to_node
~landlab.grid.base.ModelGrid.map_value_at_downwind_node_link_max_to_node
~landlab.grid.base.ModelGrid.map_value_at_max_node_to_link
~landlab.grid.base.ModelGrid.map_value_at_min_node_to_link
~landlab.grid.base.ModelGrid.map_value_at_upwind_node_link_max_to_node
Boundary condition control
--------------------------
These are the primary properties for getting and setting the grid boundary
conditions. Changes made to :meth:`~.ModelGrid.status_at_node` and
:meth:`~.ModelGrid.status_at_node` will automatically update the conditions
defined at other grid elements automatically.
.. autosummary::
:toctree: generated/
~landlab.grid.base.ModelGrid.active_faces
~landlab.grid.base.ModelGrid.active_links
~landlab.grid.base.ModelGrid.active_neighbors_at_node
~landlab.grid.base.ModelGrid.boundary_nodes
~landlab.grid.base.ModelGrid.closed_boundary_nodes
~landlab.grid.base.ModelGrid.core_cells
~landlab.grid.base.ModelGrid.core_nodes
~landlab.grid.base.ModelGrid.fixed_gradient_boundary_nodes
~landlab.grid.base.ModelGrid.fixed_links
~landlab.grid.base.ModelGrid.fixed_value_boundary_nodes
~landlab.grid.base.ModelGrid.node_at_core_cell
~landlab.grid.base.ModelGrid.node_is_boundary
~landlab.grid.base.ModelGrid.number_of_active_faces
~landlab.grid.base.ModelGrid.number_of_active_links
~landlab.grid.base.ModelGrid.number_of_core_cells
~landlab.grid.base.ModelGrid.number_of_core_nodes
~landlab.grid.base.ModelGrid.number_of_fixed_links
~landlab.grid.base.ModelGrid.number_of_patches_present_at_link
~landlab.grid.base.ModelGrid.number_of_patches_present_at_node
~landlab.grid.base.ModelGrid.open_boundary_nodes
~landlab.grid.base.ModelGrid.set_nodata_nodes_to_closed
~landlab.grid.base.ModelGrid.set_nodata_nodes_to_fixed_gradient
~landlab.grid.base.ModelGrid.status_at_link
~landlab.grid.base.ModelGrid.status_at_node
Identifying node subsets
------------------------
These methods are useful in identifying subsets of nodes, e.g., closest node
to a point; nodes at edges.
(None are available for this grid type)
Surface analysis
----------------
These methods permit the kinds of surface analysis that you might expect to
find in GIS software.
.. autosummary::
:toctree: generated/
~landlab.grid.base.ModelGrid.calc_aspect_at_node
~landlab.grid.base.ModelGrid.calc_slope_at_node
~landlab.grid.base.ModelGrid.calc_hillshade_at_node
Notes
-----
It is important that when creating a new grid class that inherits from
``ModelGrid``, to call ``ModelGrid.__init__()`` in the new grid's
``__init__()``. For example, the new class's __init__ should contain the
following code,
.. code-block:: python
class NewGrid(ModelGrid):
def __init__(self, *args, **kwds):
ModelGrid.__init__(self, **kwds)
# Code that initializes the NewGrid
Without this, the new grid class will not have the ``at_*`` attributes.
Examples
--------
Although the following examples use a :class:`~.RasterModelGrid`, they apply
equally to any grid that inherits from :class:`~.ModelGrid`. The new grid
comes with a set of pre-defined value groups. One group for each grid element.
Use the groups attribute to see the group names.
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 3))
>>> groups = list(grid.groups)
>>> groups.sort()
>>> groups
['cell', 'corner', 'face', 'grid', 'link', 'node', 'patch']
Create Field Arrays
+++++++++++++++++++
If you just want to create an array but not add it to the grid, you can use
the :meth:`~.ModelGrid.ones` method.
>>> grid.ones(at='node')
array([ 1., 1., 1., 1., 1., 1., 1., 1., 1.])
>>> list(grid.at_node.keys()) # Nothing has been added to the grid
[]
Add Field Arrays
++++++++++++++++
Use the ``add_*`` methods to add value arrays attached to grid elements. Each
of these methods accepts two arguments. The first is name of the grid element
where values are associated and the second the name of the quantity. The
quantity name must be unique within a group but the same quantity can appear
in multiple goups.
>>> list(grid.at_node.keys()) # There a no values defined at grid nodes
[]
>>> z = grid.add_ones('node', 'topographic__elevation')
We now see that the array has been added to the grid as a reference to the
array returned by ``add_ones``.
>>> list(grid.at_node.keys())
['topographic__elevation']
>>> grid.at_node['topographic__elevation']
array([ 1., 1., 1., 1., 1., 1., 1., 1., 1.])
>>> z is grid.at_node['topographic__elevation']
True
To add a previously created array to the grid, use the
:meth:`~.ModelGrid.add_field` method but be aware that it must be of the
correct size (if it's not a ``ValueError`` will be raised).
>>> grid.has_field('node', 'air__temperature')
False
>>> import numpy as np
>>> t = np.zeros(9)
>>> t is grid.add_field('node', 'air__temperature', t)
True
>>> grid.has_field('node', 'air__temperature')
True
>>> grid.has_field('cell', 'air__temperature')
False
>>> t is grid.at_node['air__temperature']
True
"""
import numpy
import numpy as np
import warnings
from time import time
import six
from six.moves import range
from landlab.testing.decorators import track_this_method
from landlab.utils import count_repeated_values
from landlab.core.utils import argsort_points_by_x_then_y
from landlab.utils.decorators import make_return_array_immutable, deprecated
from landlab.field import ModelDataFields, ModelDataFieldsMixIn
from landlab.field.scalar_data_fields import FieldError
from . import grid_funcs as gfuncs
from ..core.utils import as_id_array
from ..core.utils import add_module_functions_to_class
from .decorators import (override_array_setitem_and_reset, return_id_array,
return_readonly_id_array)
#: Indicates an index is, in some way, *bad*.
BAD_INDEX_VALUE = -1
# DEJH thinks the user should be able to override this value if they want
# Map names grid elements to the ModelGrid attribute that contains the count
# of that element in the grid.
_ARRAY_LENGTH_ATTRIBUTES = {
'node': 'number_of_nodes',
'patch': 'number_of_patches',
'link': 'number_of_links',
'corner': 'number_of_corners',
'face': 'number_of_faces',
'cell': 'number_of_cells',
'active_link': 'number_of_active_links',
'active_face': 'number_of_active_faces',
'core_node': 'number_of_core_nodes',
'core_cell': 'number_of_core_cells',
}
# Fields whose sizes can not change.
_SIZED_FIELDS = {'node', 'link', 'patch', 'corner', 'face', 'cell', }
# Define the boundary-type codes
#: Indicates a node is *core*.
CORE_NODE = 0
#: Indicates a boundary node is has a fixed values.
FIXED_VALUE_BOUNDARY = 1
#: Indicates a boundary node is has a fixed gradient.
FIXED_GRADIENT_BOUNDARY = 2
#: Indicates a boundary node is wrap-around.
LOOPED_BOUNDARY = 3
#: Indicates a boundary node is closed
CLOSED_BOUNDARY = 4
# Define the link types
#: Indicates a link is *active*, and can carry flux
ACTIVE_LINK = 0
#: Indicates a link has a fixed (gradient) value, & behaves as a boundary
FIXED_LINK = 2
#: Indicates a link is *inactive*, and cannot carry flux
INACTIVE_LINK = 4
BOUNDARY_STATUS_FLAGS_LIST = [
FIXED_VALUE_BOUNDARY,
FIXED_GRADIENT_BOUNDARY,
LOOPED_BOUNDARY,
CLOSED_BOUNDARY,
]
BOUNDARY_STATUS_FLAGS = set(BOUNDARY_STATUS_FLAGS_LIST)
LINK_STATUS_FLAGS_LIST = [
ACTIVE_LINK,
FIXED_LINK,
INACTIVE_LINK,
]
LINK_STATUS_FLAGS = set(LINK_STATUS_FLAGS_LIST)
def _sort_points_into_quadrants(x, y, nodes):
"""Divide x, y points into quadrants.
Divide points with locations given in the *x*, and *y* arrays into north,
south, east, and west quadrants. Returns nodes contained in quadrants
(west, east, north, south).
Parameters
----------
x : array_like
X-coordinates of points.
y : array_like
Y-coordinates of points.
nodes : array_like
Nodes associated with points.
Returns
-------
tuple of array_like
Tuple of nodes in each coordinate. Nodes are grouped as
(*east*, *north*, *west*, *south*).
Examples
--------
>>> import numpy as np
>>> from landlab.grid.base import _sort_points_into_quadrants
>>> x = np.array([0, 1, 0, -1])
>>> y = np.array([1, 0, -1, 0])
>>> nodes = np.array([1, 2, 3, 4])
>>> _sort_points_into_quadrants(x, y, nodes)
(array([2]), array([1]), array([4]), array([3]))
"""
above_x_axis = y > 0
right_of_y_axis = x > 0
closer_to_y_axis = numpy.abs(y) >= numpy.abs(x)
north_nodes = nodes[above_x_axis & closer_to_y_axis]
south_nodes = nodes[(~ above_x_axis) & closer_to_y_axis]
east_nodes = nodes[right_of_y_axis & (~ closer_to_y_axis)]
west_nodes = nodes[(~ right_of_y_axis) & (~ closer_to_y_axis)]
return (east_nodes, north_nodes, west_nodes, south_nodes)
def _default_axis_names(n_dims):
"""Name of each axis.
Parameters
----------
n_dims : int
Number of spatial dimensions.
Returns
-------
tuple of str
Name of each axis.
Examples
--------
>>> from landlab.grid.base import _default_axis_names
>>> _default_axis_names(1)
('x',)
>>> _default_axis_names(2)
('y', 'x')
>>> _default_axis_names(3)
('z', 'y', 'x')
"""
_DEFAULT_NAMES = ('z', 'y', 'x')
return _DEFAULT_NAMES[- n_dims:]
def _default_axis_units(n_dims):
"""Unit names for each axis.
Parameters
----------
n_dims : int
Number of spatial dimensions.
Returns
-------
tuple of str
Units of each axis.
Examples
--------
>>> from landlab.grid.base import _default_axis_units
>>> _default_axis_units(1)
('-',)
>>> _default_axis_units(2)
('-', '-')
>>> _default_axis_units(3)
('-', '-', '-')
"""
return ('-', ) * n_dims
def find_true_vector_from_link_vector_pair(L1, L2, b1x, b1y, b2x, b2y):
r"""Separate a pair of links with vector values into x and y components.
The concept here is that a pair of adjacent links attached to a node are
projections of a 'true' but unknown vector. This function finds and returns
the x and y components of this true vector. The trivial case is the
situation in which the two links are orthogonal and aligned with the grid
axes, in which case the vectors of these two links *are* the x and y
components.
Parameters
----------
L1, L2 : float
Values (magnitudes) associated with the two links
b1x, b1y, b2x, b2y : float
Unit vectors of the two links
Returns
-------
ax, ay : float
x and y components of the 'true' vector
Notes
-----
The function does an inverse vector projection. Suppose we have a given
'true' vector :math:`a`, and we want to project it onto two other lines
with unit vectors (b1x,b1y) and (b2x,b2y). In the context of Landlab,
the 'true' vector is some unknown vector quantity, which might for
example represent the local water flow velocity. The lines represent two
adjacent links in the grid.
Let :math:`\mathbf{a}` be the true vector, :math:`\mathbf{B}` be a
different vector with unit vector :math:`\mathbf{b}`, and :math:`L`
be the scalar projection of *a* onto *B*. Then,
..math::
L = \mathbf{a} \dot \mathbf{b} = a_x b_x + a_y b_y,
where :math:`(a_x,a_y)` are the components of **a** and :math:`(b_x,b_y)`
are the components of the unit vector **b**.
In this case, we know *b* (the link unit vector), and we want to know the
*x* and *y* components of **a**. The problem is that we have one equation
and two unknowns (:math:`a_x` and :math:`a_y`). But we can solve this if
we have *two* vectors, both of which are projections of **a**. Using the
subscripts 1 and 2 to denote the two vectors, we can obtain equations for
both :math:`a_x` and :math:`a_y`:
..math::
a_x = L_1 / b_{1x} - a_y b_{1y} / b_{1x}
a_y = L_2 / b_{2y} - a_x b_{2x} / b_{2y}
Substituting the second into the first,
..math::
a_x = [L_1/b_{1x}-L_2 b_{1y}/(b_{1x} b_{2y})] / [1-b_{1y} b_{2x}/(b_{1x} b_{2y})]
Hence, we find the original vector :math:`(a_x,a_y)` from two links with
unit vectors :math:`(b_{1x},b_{1y})` and :math:`(b_{2x},b_{2y})` and
associated values :math:`L_1` and :math:`L_2`.
Note that the above equations require that :math:`b_{1x}>0` and
:math:`b_{2y}>0`. If this isn't the case, we invert the order of the two
links, which requires :math:`b_{2x}>0` and :math:`b_{1y}>0`. If none of
these conditions is met, then we have a degenerate case.
Examples
--------
The following example represents the active links in a 7-node hexagonal
grid, with just one core node. The 'true' vector has a magnitude of 5 units
and an orientation of 30 degrees, pointing up and to the right (i.e., the
postive-x and postive-y quadrant), so that its vector components are 4 (x)
and 3 (y) (in other words, it is a 3-4-5 triangle). The values assigned to
L below are the projection of that true vector onto the six link
vectors. The algorithm should recover the correct vector component
values of 4 and 3. The FOR loop examines each pair of links in turn.
>>> import numpy as np
>>> from landlab.grid.base import find_true_vector_from_link_vector_pair
>>> bx = np.array([0.5, -0.5, -1., -0.5, 1., 0.5])
>>> by = np.array([0.866, 0.866, 0., -0.866, 0., -0.866])
>>> L = np.array([4.6, 0.6, -4., -4.6, 4., -0.6])
>>> for i in range(5):
... ax, ay = find_true_vector_from_link_vector_pair(
... L[i], L[i+1], bx[i], by[i], bx[i+1], by[i+1])
... round(ax,1), round(ay,1)
(4.0, 3.0)
(4.0, 3.0)
(4.0, 3.0)
(4.0, 3.0)
(4.0, 3.0)
"""
assert ((b1x != 0 and b2y != 0) or (b2x != 0 and b1y != 0)), \
'Improper unit vectors'
if b1x != 0. and b2y != 0.:
ax = (L1 / b1x - L2 * (b1y / (b1x * b2y))) / \
(1. - (b1y * b2x) / (b1x * b2y))
ay = L2 / b2y - ax * (b2x / b2y)
elif b2x != 0. and b1y != 0.:
ax = (L2 / b2x - L1 * (b2y / (b2x * b1y))) / \
(1. - (b2y * b1x) / (b2x * b1y))
ay = L1 / b1y - ax * (b1x / b1y)
return ax, ay
class ModelGrid(ModelDataFieldsMixIn):
"""Base class for 2D structured or unstructured grids for numerical models.
The idea is to have at least two inherited
classes, RasterModelGrid and DelaunayModelGrid, that can create and
manage grids. To this might be added a GenericModelGrid, which would
be an unstructured polygonal grid that doesn't necessarily obey or
understand the Delaunay triangulation, but rather simply accepts
an input grid from the user. Also a :class:`~.HexModelGrid` for hexagonal.
Attributes
----------
at_node : dict-like
Values at nodes.
at_cell : dict-like
Values at cells.
at_link : dict-like
Values at links.
at_face : dict-like
Values at faces.
at_grid: dict-like
Global values
Other Parameters
----------------
axis_name : tuple, optional
Name of axes
axis_units : tuple, optional
Units of coordinates
"""
# Debugging flags (if True, activates some output statements)
_DEBUG_VERBOSE = False
_DEBUG_TRACK_METHODS = False
at_node = {} # : Values defined at nodes
at_link = {} # : Values defined at links
at_patch = {} # : Values defined at patches
at_corner = {} # : Values defined at corners
at_face = {} # : Values defined at faces
at_cell = {} # : Values defined at cells
# : Nodes on the other end of links pointing into a node.
_node_inlink_matrix = numpy.array([], dtype=numpy.int32)
# : Nodes on the other end of links pointing out of a node.
_node_outlink_matrix = numpy.array([], dtype=numpy.int32)
def __init__(self, **kwds):
super(ModelGrid, self).__init__()
self.axis_name = kwds.get('axis_name', _default_axis_names(self.ndim))
self.axis_units = kwds.get(
'axis_units', _default_axis_units(self.ndim))
self._link_length = None
self._all_node_distances_map = None
self._all_node_azimuths_map = None
self._node_unit_vector_sum_x = None
self._node_unit_vector_sum_y = None
self._link_unit_vec_x = None
self._link_unit_vec_y = None
self.bc_set_code = 0
# Sort links according to the x and y coordinates of their midpoints.
# Assumes 1) node_at_link_tail and node_at_link_head have been
# created, and 2) so have node_x and node_y.
# self._sort_links_by_midpoint()
for loc in _SIZED_FIELDS:
size = self.number_of_elements(loc)
ModelDataFields.new_field_location(self, loc, size=size)
ModelDataFields.new_field_location(self, 'grid', size=1)
# for loc in _UNSIZED_FIELDS:
# ModelDataFields.new_field_location(self, loc, size=None)
ModelDataFields.set_default_group(self, 'node')
def _create_link_face_coords(self):
"""Create x, y coordinates for link-face intersections.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 4), 1.)
>>> mg.x_of_link
array([ 0.5, 1.5, 2.5, 0. , 1. , 2. , 3. , 0.5, 1.5, 2.5,
0. , 1. , 2. , 3. , 0.5, 1.5, 2.5])
>>> mg.y_of_link
array([ 0. , 0. , 0. , 0.5, 0.5, 0.5, 0.5, 1. , 1. , 1. ,
1.5, 1.5, 1.5, 1.5, 2. , 2. , 2. ])
>>> np.all(mg.x_of_link[mg.link_at_face] == mg.x_of_face)
True
>>> np.all(mg.y_of_link[mg.link_at_face] == mg.y_of_face)
True
"""
self._link_x = (self.x_of_node[self.node_at_link_head] +
self.x_of_node[self.node_at_link_tail])/2.
self._link_y = (self.y_of_node[self.node_at_link_head] +
self.y_of_node[self.node_at_link_tail])/2.
def _create_neighbor_list(self, **kwds):
"""Create list of neighbor node IDs.
Creates a list of IDs of neighbor nodes for each node, as a
2D array. Only record neighbor nodes that are on the other end of an
*active* link. Nodes attached to *inactive* links or neighbor nodes
that would be outside of the grid are given an ID of
:const:`~landlab.grid.base.BAD_INDEX_VALUE`.
Neighbors are ordered as [*right*, *top*, *left*, *bottom*].
"""
self._active_neighbor_nodes = self.neighbors_at_node.copy()
self._active_neighbor_nodes[
self.active_link_dirs_at_node == 0] = BAD_INDEX_VALUE
self.neighbor_list_created = True
return self._active_neighbor_nodes
@classmethod
def from_file(cls, file_like):
params = load_params(file_like)
return cls.from_dict(params)
@classmethod
def from_dict(cls, params):
raise NotImplementedError('from_dict')
def _initialize(self):
raise NotImplementedError('_initialize')
@property
def ndim(self):
"""Number of spatial dimensions of the grid.
LLCATS: GINF
"""
return 2
def _setup_nodes(self):
"""Set up the node id array."""
self._nodes = np.arange(self.number_of_nodes, dtype=int)
return self._nodes
@property
@make_return_array_immutable
def nodes(self):
"""Get node ids for the grid.
Examples
--------
>>> from landlab import RadialModelGrid
>>> mg = RadialModelGrid(num_shells=1)
>>> mg.nodes
array([0, 1, 2, 3, 4, 5, 6])
LLCATS: NINF
"""
try:
return self._nodes
except AttributeError:
return self._setup_nodes()
@property
@override_array_setitem_and_reset('_update_links_nodes_cells_to_new_BCs')
def status_at_node(self):
"""Get array of the boundary status for each node.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab import FIXED_GRADIENT_BOUNDARY, FIXED_LINK
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.status_at_node.reshape((4, 5))
array([[1, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1]], dtype=int8)
>>> np.any(mg.status_at_link == FIXED_LINK)
False
>>> mg.status_at_node[mg.nodes_at_left_edge] = FIXED_GRADIENT_BOUNDARY
>>> mg.status_at_node.reshape((4, 5))
array([[2, 1, 1, 1, 1],
[2, 0, 0, 0, 1],
[2, 0, 0, 0, 1],
[2, 1, 1, 1, 1]], dtype=int8)
>>> np.any(mg.status_at_link == FIXED_LINK) # links auto-update
True
LLCATS: NINF BC
"""
return self._node_status
@status_at_node.setter
def status_at_node(self, new_status):
"""Set the array of node boundary statuses."""
self._node_status[:] = new_status[:]
self._update_links_nodes_cells_to_new_BCs()
@property
@make_return_array_immutable
def neighbors_at_node(self):
"""Get neighboring nodes.
Examples
--------
>>> from landlab import RasterModelGrid, BAD_INDEX_VALUE
>>> grid = RasterModelGrid((4, 3))
>>> neighbors = grid.neighbors_at_node.copy()
>>> neighbors[neighbors == BAD_INDEX_VALUE] = -1
>>> neighbors # doctest: +NORMALIZE_WHITESPACE
array([[ 1, 3, -1, -1], [ 2, 4, 0, -1], [-1, 5, 1, -1],
[ 4, 6, -1, 0], [ 5, 7, 3, 1], [-1, 8, 4, 2],
[ 7, 9, -1, 3], [ 8, 10, 6, 4], [-1, 11, 7, 5],
[10, -1, -1, 6], [11, -1, 9, 7], [-1, -1, 10, 8]])
LLCATS: NINF CONN
"""
return self._neighbors_at_node
@property
@return_readonly_id_array
def active_neighbors_at_node(self):
"""Get list of neighbor node IDs.
Return lists of neighbor nodes, where the neighbor is connected by an
active link. For each node, the list gives neighbor ids as [right, top,
left, bottom]. Nodes at the end of inactive links or nodes in missing
positions get BAD_INDEX_VALUE.
Examples
--------
>>> from landlab.grid.base import BAD_INDEX_VALUE as X
>>> from landlab import RasterModelGrid, HexModelGrid, CLOSED_BOUNDARY
>>> rmg = RasterModelGrid((4, 5))
>>> np.array_equal(rmg.active_neighbors_at_node[[-1, 6, 2]],
... [[X, X, X, X], [ 7, 11, 5, 1], [X, 7, X, X]])
True
>>> rmg.active_neighbors_at_node[7]
array([ 8, 12, 6, 2])
>>> rmg.active_neighbors_at_node[2]
array([-1, 7, -1, -1])
>>> hmg = HexModelGrid(3, 2)
>>> hmg.status_at_node[0] = CLOSED_BOUNDARY
>>> hmg.active_neighbors_at_node
array([[-1, -1, -1, -1, -1, -1],
[-1, 3, -1, -1, -1, -1],
[ 3, -1, -1, -1, -1, -1],
[ 4, 6, 5, 2, -1, 1],
[-1, 3, -1, -1, -1, -1],
[-1, -1, 3, -1, -1, -1],
[-1, 3, -1, -1, -1, -1]])
LLCATS: NINF CONN BC
"""
try:
return self._active_neighbor_nodes
except AttributeError:
self._active_neighbor_nodes = self._create_neighbor_list()
return self._active_neighbor_nodes
@property
@make_return_array_immutable
def links_at_node(self):
"""Get links of nodes.
Returns
-------
(NODES, LINKS) ndarray of int
Link for the nodes of a grid. The shape of the matrix will be
number of nodes rows by max number of links per node. Order is
anticlockwise from east.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 3))
>>> grid.links_at_node # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 2, -1, -1], [ 1, 3, 0, -1], [-1, 4, 1, -1],
[ 5, 7, -1, 2], [ 6, 8, 5, 3], [-1, 9, 6, 4],
[10, 12, -1, 7], [11, 13, 10, 8], [-1, 14, 11, 9],
[15, -1, -1, 12], [16, -1, 15, 13], [-1, -1, 16, 14]])
>>> grid.links_at_node[4]
array([6, 8, 5, 3])
>>> grid.links_at_node[(4, 7), :]
array([[ 6, 8, 5, 3], [11, 13, 10, 8]])
LLCATS: NINF LINF CONN
"""
return self._links_at_node
@property
@make_return_array_immutable
def link_dirs_at_node(self):
"""Link directions at each node: 1=incoming, -1=outgoing, 0=none.
Returns
-------
(NODES, LINKS) ndarray of int
Link directions relative to the nodes of a grid. The shape of the
matrix will be number of nodes rows by max number of links per
node. A zero indicates no link at this position.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 3))
>>> grid.link_dirs_at_node # doctest: +NORMALIZE_WHITESPACE
array([[-1, -1, 0, 0], [-1, -1, 1, 0], [ 0, -1, 1, 0],
[-1, -1, 0, 1], [-1, -1, 1, 1], [ 0, -1, 1, 1],
[-1, -1, 0, 1], [-1, -1, 1, 1], [ 0, -1, 1, 1],
[-1, 0, 0, 1], [-1, 0, 1, 1], [ 0, 0, 1, 1]],
dtype=int8)
>>> grid.link_dirs_at_node[4]
array([-1, -1, 1, 1], dtype=int8)
>>> grid.link_dirs_at_node[(4, 7), :]
array([[-1, -1, 1, 1],
[-1, -1, 1, 1]], dtype=int8)
LLCATS: NINF LINF CONN
"""
return self._link_dirs_at_node
@property
@make_return_array_immutable
def active_link_dirs_at_node(self):
"""
Link flux directions at each node: 1=incoming flux, -1=outgoing
flux, 0=no flux. Note that inactive links receive zero, but active
and fixed links are both reported normally.
Returns
-------
(NODES, LINKS) ndarray of int
Link directions relative to the nodes of a grid. The shape of the
matrix will be number of nodes rows by max number of links per
node. A zero indicates no link at this position.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> grid = RasterModelGrid((4, 3))
>>> grid.status_at_node[grid.nodes_at_left_edge] = CLOSED_BOUNDARY
>>> grid.active_link_dirs_at_node # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 0, 0, 0], [ 0, -1, 0, 0], [ 0, 0, 0, 0],
[ 0, 0, 0, 0], [-1, -1, 0, 1], [ 0, 0, 1, 0],
[ 0, 0, 0, 0], [-1, -1, 0, 1], [ 0, 0, 1, 0],
[ 0, 0, 0, 0], [ 0, 0, 0, 1], [ 0, 0, 0, 0]],
dtype=int8)
LLCATS: NINF LINF CONN
"""
return self._active_link_dirs_at_node
@property
def node_at_cell(self):
"""Node ID associated with grid cells.
Examples
--------
>>> from landlab import RasterModelGrid, BAD_INDEX_VALUE
>>> grid = RasterModelGrid((4, 5))
>>> grid.node_at_cell # doctest: +NORMALIZE_WHITESPACE
array([ 6, 7, 8,
11, 12, 13])
LLCATS: NINF CINF CONN
"""
return self._node_at_cell
@property
def cell_at_node(self):
"""Node ID associated with grid cells.
Examples
--------
>>> from landlab import RasterModelGrid, BAD_INDEX_VALUE
>>> grid = RasterModelGrid((4, 5))
>>> ids = grid.cell_at_node
>>> ids[ids == BAD_INDEX_VALUE] = -1
>>> ids # doctest: +NORMALIZE_WHITESPACE
array([-1, -1, -1, -1, -1,
-1, 0, 1, 2, -1,
-1, 3, 4, 5, -1,
-1, -1, -1, -1, -1])
LLCATS: CINF NINF CONN
"""
return self._cell_at_node
@property
@return_readonly_id_array
def core_nodes(self):
"""Get array of core nodes.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.core_nodes
array([ 6, 7, 8, 11, 12, 13])
LLCATS: NINF BC
"""
try:
return self._core_nodes
except AttributeError:
(core_node_ids, ) = numpy.where(self._node_status == CORE_NODE)
return core_node_ids
@property
@return_readonly_id_array
def boundary_nodes(self):
"""Get array of boundary nodes.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.boundary_nodes
array([ 0, 1, 2, 3, 4, 5, 9, 10, 14, 15, 16, 17, 18, 19])
LLCATS: NINF BC
"""
try:
return self._boundary_nodes
except:
(boundary_node_ids, ) = numpy.where(self._node_status != CORE_NODE)
return boundary_node_ids
@property
@return_readonly_id_array
def open_boundary_nodes(self):
"""Get array of open boundary nodes.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> mg = RasterModelGrid((4, 5), 1.)
>>> for edge in (mg.nodes_at_left_edge, mg.nodes_at_right_edge,
... mg.nodes_at_bottom_edge):
... mg.status_at_node[edge] = CLOSED_BOUNDARY
>>> mg.open_boundary_nodes
array([16, 17, 18])
LLCATS: NINF BC
"""
(open_boundary_node_ids, ) = numpy.where(
(self._node_status != CLOSED_BOUNDARY) &
(self._node_status != CORE_NODE))
return open_boundary_node_ids
@property
@return_readonly_id_array
def closed_boundary_nodes(self):
"""Get array of closed boundary nodes.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.status_at_node[mg.nodes_at_top_edge] = CLOSED_BOUNDARY
>>> mg.closed_boundary_nodes
array([15, 16, 17, 18, 19])
LLCATS: NINF BC
"""
(closed_boundary_node_ids, ) = numpy.where(
self._node_status == CLOSED_BOUNDARY)
return closed_boundary_node_ids
@property
@return_readonly_id_array
def fixed_gradient_boundary_nodes(self):
"""Get array of fixed gradient boundary nodes.
Examples
--------
>>> from landlab import RasterModelGrid, FIXED_GRADIENT_BOUNDARY
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.status_at_node[mg.nodes_at_top_edge] = FIXED_GRADIENT_BOUNDARY
>>> mg.fixed_gradient_boundary_nodes
array([15, 16, 17, 18, 19])
LLCATS: NINF BC
"""
(fixed_gradient_boundary_node_ids, ) = numpy.where(
self._node_status == FIXED_GRADIENT_BOUNDARY)
return fixed_gradient_boundary_node_ids
@property
@return_readonly_id_array
def fixed_gradient_boundary_node_fixed_link(self):
"""
An array of the fixed_links connected to fixed gradient boundary nodes.
Note that on a raster, some nodes (notably the corners) can be
FIXED_GRADIENT_BOUNDARY, but not have a true FIXED_LINK neighboring
link. In such cases, the link returned will be a closed link joining
the corner node to a neighboring FIXED_GRADIENT_BOUNDARY node (see
example).
An AssertionError will be raised if for some reason a
FIXED_GRADIENT_BOUNDARY node exists which has neither a
FIXED_GRADIENT_BOUNDARY neighbor, or a FIXED_LINK.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab import FIXED_GRADIENT_BOUNDARY
>>> grid = RasterModelGrid((3, 4))
>>> leftedge = grid.nodes_at_left_edge
>>> grid.status_at_node[leftedge] = FIXED_GRADIENT_BOUNDARY
>>> grid.fixed_gradient_boundary_nodes
array([0, 4, 8])
>>> grid.fixed_gradient_boundary_node_fixed_link
array([ 3, 7, 10])
"""
try:
return self._fixed_gradient_boundary_node_links
except AttributeError:
self._create_fixed_gradient_boundary_node_links()
return self._fixed_gradient_boundary_node_links
@property
@return_readonly_id_array
def fixed_gradient_boundary_node_anchor_node(self):
"""
Returns the node at the other end of the fixed link for a fixed
gradient boundary node.
Degenerate FIXED_GRADIENT_BOUNDARY nodes (e.g., corners) are handled as
in :func:`fixed_gradient_boundary_node_fixed_link`, by pointing to a
neighboring FIXED_GRADIENT_BOUNDARY node.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab import FIXED_GRADIENT_BOUNDARY
>>> grid = RasterModelGrid((3, 4))
>>> leftedge = grid.nodes_at_left_edge
>>> grid.status_at_node[leftedge] = FIXED_GRADIENT_BOUNDARY
>>> grid.fixed_gradient_boundary_nodes
array([0, 4, 8])
>>> grid.fixed_gradient_boundary_node_fixed_link
array([ 3, 7, 10])
>>> grid.fixed_gradient_boundary_node_anchor_node
array([4, 5, 4])
"""
try:
return self._fixed_gradient_boundary_node_anchor_node
except AttributeError:
self._create_fixed_gradient_boundary_node_anchor_node()
return self._fixed_gradient_boundary_node_anchor_node
def _create_fixed_gradient_boundary_node_links(self):
"""
Builds a data structure to hold the fixed_links which control the
values of any FIXED_GRADIENT_BOUNDARY nodes in the grid.
An AssertionError will be raised if for some reason a
FIXED_GRADIENT_BOUNDARY node exists which has neither a
FIXED_GRADIENT_BOUNDARY neighbor, or a FIXED_LINK.
"""
self._fixed_grad_links_created = True
self._fixed_gradient_boundary_node_links = np.empty_like(
self.fixed_gradient_boundary_nodes, dtype=int)
fix_nodes = self.fixed_gradient_boundary_nodes
neighbor_links = self.links_at_node[fix_nodes] # -1s
boundary_exists = self.link_dirs_at_node[fix_nodes]
# next line retains -1 indexes
link_stat_badind = self.status_at_link[neighbor_links] == FIXED_LINK
true_connection = np.logical_and(link_stat_badind, boundary_exists)
true_fix_nodes = true_connection.sum(axis=1).astype(bool)
self._fixed_gradient_boundary_node_links[true_fix_nodes] = (
neighbor_links[true_connection])
# resolve any corner nodes
neighbor_nodes = self.neighbors_at_node[fix_nodes] # BAD_INDEX_VALUEs
neighbor_nodes[neighbor_nodes == BAD_INDEX_VALUE] = -1
fixed_grad_neighbor = np.logical_and((self.status_at_node[
neighbor_nodes] == FIXED_GRADIENT_BOUNDARY), boundary_exists)
# ^True when FIXED_GRADIENT_BOUNDARY for real
# winnow it down to only one possibility for fixed_grad neighbor:
which_neighbor = np.argmax(fixed_grad_neighbor, axis=1)
indexing_range = np.arange(fixed_grad_neighbor.shape[0])
a_link_to_fixed_grad = neighbor_links[indexing_range, which_neighbor]
corners = np.logical_not(true_fix_nodes)
assert np.all(
fixed_grad_neighbor[indexing_range, which_neighbor][corners])
self._fixed_gradient_boundary_node_links[
corners] = a_link_to_fixed_grad[corners]
def _create_fixed_gradient_boundary_node_anchor_node(self):
"""
Builds a data structure to hold the nodes which anchor the
values of any FIXED_GRADIENT_BOUNDARY nodes in the grid, i.e., those
at the other ends of the FIXED_LINKS.
An AssertionError will be raised if for some reason a
FIXED_GRADIENT_BOUNDARY node exists which has neither a
FIXED_GRADIENT_BOUNDARY neighbor, or a FIXED_LINK.
"""
self._fixed_grad_links_created = True
fix_grad_nodes = self.fixed_gradient_boundary_nodes
self._fixed_gradient_boundary_node_anchor_node = np.empty_like(
fix_grad_nodes)
heads_and_tails = np.empty((fix_grad_nodes.size, 2))
which_one = np.empty_like(heads_and_tails, dtype=bool)
heads_and_tails[:, 0] = self.node_at_link_head[
self.fixed_gradient_boundary_node_fixed_link]
heads_and_tails[:, 1] = self.node_at_link_tail[
self.fixed_gradient_boundary_node_fixed_link]
which_one[:, 0] = heads_and_tails[:, 0] == fix_grad_nodes
which_one[:, 1] = heads_and_tails[:, 1] == fix_grad_nodes
assert np.all(which_one.sum(axis=1) == 1)
self._fixed_gradient_boundary_node_anchor_node = heads_and_tails[
np.logical_not(which_one)]
@property
@return_readonly_id_array
def fixed_value_boundary_nodes(self):
"""Get array of fixed value boundary nodes.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> mg = RasterModelGrid((4, 5), 1.)
>>> for edge in (mg.nodes_at_left_edge, mg.nodes_at_right_edge,
... mg.nodes_at_bottom_edge):
... mg.status_at_node[edge] = CLOSED_BOUNDARY
>>> mg.fixed_value_boundary_nodes
array([16, 17, 18])
LLCATS: NINF BC
"""
(fixed_value_boundary_node_ids, ) = numpy.where(
self._node_status == FIXED_VALUE_BOUNDARY)
return fixed_value_boundary_node_ids
@property
@return_readonly_id_array
def active_faces(self):
"""Get array of active faces.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> grid.active_faces
array([0, 1, 2, 3, 4, 5, 6])
>>> from landlab import CLOSED_BOUNDARY
>>> grid.status_at_node[6] = CLOSED_BOUNDARY
>>> grid.active_faces
array([0, 2, 5])
LLCATS: FINF BC
"""
try:
return self._active_faces
except AttributeError:
self._create_active_faces()
return self._active_faces
@property
@return_readonly_id_array
def active_links(self):
"""Get array of active links.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> grid.active_links
array([ 4, 5, 7, 8, 9, 11, 12])
LLCATS: LINF BC
"""
try:
return self._active_links
except AttributeError:
self._reset_link_status_list()
return self._active_links
@property
@return_readonly_id_array
def fixed_links(self):
"""Get array of fixed links.
Examples
--------
>>> from landlab import RasterModelGrid, FIXED_GRADIENT_BOUNDARY
>>> grid = RasterModelGrid((3, 4))
>>> grid.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([1, 1, 1, 1,
1, 0, 0, 1,
1, 1, 1, 1], dtype=int8)
>>> grid.fixed_links.size
0
>>> grid.status_at_node[:4] = FIXED_GRADIENT_BOUNDARY
>>> grid.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([2, 2, 2, 2,
1, 0, 0, 1,
1, 1, 1, 1], dtype=int8)
>>> grid.fixed_links
array([4, 5])
LLCATS: LINF BC
"""
try:
return self._fixed_links
except AttributeError:
self._reset_link_status_list()
return self._fixed_links
@property
@return_readonly_id_array
def node_at_core_cell(self):
"""Get array of nodes associated with core cells.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.status_at_node[8] = CLOSED_BOUNDARY
>>> mg.node_at_core_cell
array([ 6, 7, 11, 12, 13])
LLCATS: NINF CINF BC CONN
"""
(core_cell_ids, ) = numpy.where(self._node_status == CORE_NODE)
return core_cell_ids
@property
def core_cells(self):
"""Get array of core cells.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.status_at_node[8] = CLOSED_BOUNDARY
>>> mg.core_cells
array([0, 1, 3, 4, 5])
LLCATS: CINF BC
"""
return self._core_cells
@property
def node_at_link_head(self):
"""Get array of the node at each link head (*to-node*).
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.node_at_link_head[:5]
array([1, 2, 3, 4, 5])
LLCATS: NINF LINF CONN
"""
return self._node_at_link_head
@property
def node_at_link_tail(self):
"""Get array of the node at each link tail (*from-node*).
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.node_at_link_tail[:5]
array([0, 1, 2, 3, 0])
LLCATS: NINF LINF CONN
"""
return self._node_at_link_tail
@property
def face_at_link(self):
"""Get array of faces associated with links.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid, BAD_INDEX_VALUE
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.face_at_link[5:7]
array([0, 1])
>>> np.all(mg.face_at_link[:5]==BAD_INDEX_VALUE)
True
LLCATS: FINF LINF CONN
"""
try:
return self._face_at_link
except AttributeError:
return self._create_face_at_link()
@property
def link_at_face(self):
"""Get array of links associated with faces.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.link_at_face[0:3]
array([5, 6, 7])
LLCATS: LINF FINF CONN
"""
try:
return self._link_at_face
except AttributeError:
return self._create_link_at_face()
@property
def number_of_nodes(self):
"""Total number of nodes.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.number_of_nodes
20
LLCATS: NINF
"""
return len(self._cell_at_node)
@property
def number_of_corners(self):
"""Total number of nodes.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.number_of_corners
12
LLCATS: CNINF
"""
return self.number_of_patches
@property
def number_of_cells(self):
"""Total number of cells.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.number_of_cells
6
LLCATS: CINF
"""
return len(self._node_at_cell)
@property
def number_of_links(self):
"""Total number of links.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> grid.number_of_links
17
LLCATS: LINF
"""
return self._status_at_link.size
@property
def number_of_faces(self):
"""Total number of faces.
Returns
-------
int
Total number of faces in the grid.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> grid.number_of_faces
7
LLCATS: FINF
"""
return len(self.link_at_face)
@property
def number_of_active_faces(self):
"""Total number of active faces.
Returns
-------
int
Total number of active faces in the grid.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> grid.number_of_active_faces
7
The number of active faces is updated when a node status changes.
>>> from landlab import CLOSED_BOUNDARY
>>> grid.status_at_node[6] = CLOSED_BOUNDARY
>>> grid.number_of_active_faces
3
LLCATS: FINF BC
"""
return self.active_faces.size
@property
def number_of_core_nodes(self):
"""Number of core nodes.
The number of core nodes on the grid (i.e., excluding all boundary
nodes).
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> grid = RasterModelGrid((4, 5))
>>> grid.number_of_core_nodes
6
>>> grid.status_at_node[7] = CLOSED_BOUNDARY
>>> grid.number_of_core_nodes
5
LLCATS: NINF BC
"""
return self._core_nodes.size
@property
def number_of_core_cells(self):
"""Number of core cells.
A core cell excludes all boundary cells.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> grid = RasterModelGrid((4, 5))
>>> grid.number_of_core_cells
6
>>> grid.status_at_node[7] = CLOSED_BOUNDARY
>>> grid.number_of_core_cells
5
LLCATS: CINF BC
"""
return self._core_cells.size
@property
def number_of_active_links(self):
"""Number of active links.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.number_of_active_links
17
>>> for edge in (mg.nodes_at_left_edge, mg.nodes_at_right_edge,
... mg.nodes_at_bottom_edge):
... mg.status_at_node[edge] = CLOSED_BOUNDARY
>>> mg.number_of_active_links
10
LLCATS: LINF BC
"""
return self.active_links.size
@property
def number_of_fixed_links(self):
"""Number of fixed links.
Examples
--------
>>> from landlab import RasterModelGrid, FIXED_GRADIENT_BOUNDARY
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.number_of_fixed_links
0
>>> mg.status_at_node[mg.nodes_at_top_edge] = FIXED_GRADIENT_BOUNDARY
>>> mg.number_of_fixed_links
3
LLCATS: LINF BC
"""
try:
return self._fixed_links.size
except AttributeError:
self._reset_link_status_list()
return self._fixed_links.size
def number_of_elements(self, name):
"""Number of instances of an element.
Get the number of instances of a grid element in a grid.
Parameters
----------
name : {'node', 'cell', 'link', 'face', 'core_node', 'core_cell',
'active_link', 'active_face'}
Name of the grid element.
Returns
-------
int
Number of elements in the grid.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.number_of_elements('node')
20
>>> mg.number_of_elements('core_cell')
6
>>> mg.number_of_elements('link')
31
>>> mg.number_of_elements('active_link')
17
>>> mg.status_at_node[8] = CLOSED_BOUNDARY
>>> mg.number_of_elements('link')
31
>>> mg.number_of_elements('active_link')
13
LLCATS: GINF
"""
try:
return getattr(self, _ARRAY_LENGTH_ATTRIBUTES[name])
except KeyError:
raise TypeError(
'{name}: element name not understood'.format(name=name))
@property
@make_return_array_immutable
def node_x(self):
"""Get array of the x-coordinates of nodes.
See also
--------
x_of_node
Exquivalent method.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.node_x.reshape((4, 5))
array([[ 0., 3., 6., 9., 12.],
[ 0., 3., 6., 9., 12.],
[ 0., 3., 6., 9., 12.],
[ 0., 3., 6., 9., 12.]])
LLCATS: NINF MEAS
"""
return self._node_x
@property
@make_return_array_immutable
def node_y(self):
"""Get array of the y-coordinates of nodes.
See also
--------
y_of_node
Exquivalent method.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.node_y.reshape((4, 5))
array([[ 0., 0., 0., 0., 0.],
[ 2., 2., 2., 2., 2.],
[ 4., 4., 4., 4., 4.],
[ 6., 6., 6., 6., 6.]])
LLCATS: NINF MEAS
"""
return self._node_y
@property
@make_return_array_immutable
def x_of_node(self):
"""Get array of the x-coordinates of nodes.
See also
--------
node_x
Exquivalent method.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.x_of_node.reshape((4, 5))
array([[ 0., 3., 6., 9., 12.],
[ 0., 3., 6., 9., 12.],
[ 0., 3., 6., 9., 12.],
[ 0., 3., 6., 9., 12.]])
LLCATS: NINF MEAS
"""
return self._node_x
@property
@make_return_array_immutable
def y_of_node(self):
"""Get array of the y-coordinates of nodes.
See also
--------
node_y
Exquivalent method.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.y_of_node.reshape((4, 5))
array([[ 0., 0., 0., 0., 0.],
[ 2., 2., 2., 2., 2.],
[ 4., 4., 4., 4., 4.],
[ 6., 6., 6., 6., 6.]])
LLCATS: NINF MEAS
"""
return self._node_y
@property
@make_return_array_immutable
def x_of_cell(self):
"""Get array of the x-coordinates of nodes at cells.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.x_of_cell.reshape((2, 3))
array([[ 3., 6., 9.],
[ 3., 6., 9.]])
LLCATS: CINF MEAS
"""
return self._node_x[self.node_at_cell]
@property
@make_return_array_immutable
def y_of_cell(self):
"""Get array of the y-coordinates of nodes at cells.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.y_of_cell.reshape((2, 3))
array([[ 2., 2., 2.],
[ 4., 4., 4.]])
LLCATS: CINF MEAS
"""
return self._node_y[self.node_at_cell]
@property
@make_return_array_immutable
def x_of_link(self):
"""Get array of the x-coordinates of link midpoints.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.x_of_link # doctest: +NORMALIZE_WHITESPACE
array([ 1.5, 4.5, 7.5, 10.5, 0. , 3. , 6. , 9. , 12. ,
1.5, 4.5, 7.5, 10.5, 0. , 3. , 6. , 9. , 12. ,
1.5, 4.5, 7.5, 10.5, 0. , 3. , 6. , 9. , 12. ,
1.5, 4.5, 7.5, 10.5])
LLCATS: LINF MEAS
"""
try:
return self._link_x
except AttributeError:
self._create_link_face_coords()
return self._link_x
@property
@make_return_array_immutable
def y_of_link(self):
"""Get array of the y-coordinates of link midpoints.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.y_of_link # doctest: +NORMALIZE_WHITESPACE
array([ 0., 0., 0., 0., 1., 1., 1., 1., 1.,
2., 2., 2., 2., 3., 3., 3., 3., 3.,
4., 4., 4., 4., 5., 5., 5., 5., 5.,
6., 6., 6., 6.])
LLCATS: LINF MEAS
"""
try:
return self._link_y
except AttributeError:
self._create_link_face_coords()
return self._link_y
@property
@make_return_array_immutable
def x_of_face(self):
"""Get array of the x-coordinates of face midpoints.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.x_of_face # doctest: +NORMALIZE_WHITESPACE
array([ 3. , 6. , 9. , 1.5, 4.5, 7.5, 10.5,
3. , 6. , 9. , 1.5, 4.5, 7.5, 10.5,
3. , 6. , 9. ])
LLCATS: FINF MEAS
"""
try:
return self._link_x[self.link_at_face]
except AttributeError:
self._create_link_face_coords()
return self._link_x[self.link_at_face]
@property
@make_return_array_immutable
def y_of_face(self):
"""Get array of the y-coordinates of face midpoints.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.y_of_face # doctest: +NORMALIZE_WHITESPACE
array([ 1., 1., 1., 2., 2., 2., 2., 3., 3., 3.,
4., 4., 4., 4., 5., 5., 5.])
LLCATS: FINF MEAS
"""
try:
return self._link_y[self.link_at_face]
except AttributeError:
self._create_link_face_coords()
return self._link_y[self.link_at_face]
@make_return_array_immutable
def node_axis_coordinates(self, axis=0):
"""Get the coordinates of nodes along a particular axis.
Return node coordinates from a given *axis* (defaulting to 0). Axis
numbering is the same as that for numpy arrays. That is, the zeroth
axis is along the rows, and the first along the columns.
Parameters
----------
axis : int, optional
Coordinate axis.
Returns
-------
ndarray
Coordinates of nodes for a given axis.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.node_axis_coordinates(0) # doctest: +NORMALIZE_WHITESPACE
array([ 0., 0., 0., 0., 0.,
1., 1., 1., 1., 1.,
2., 2., 2., 2., 2.,
3., 3., 3., 3., 3.])
>>> grid.node_axis_coordinates(1) # doctest: +NORMALIZE_WHITESPACE
array([ 0., 1., 2., 3., 4.,
0., 1., 2., 3., 4.,
0., 1., 2., 3., 4.,
0., 1., 2., 3., 4.])
LLCATS: GINF NINF MEAS
"""
AXES = ('node_y', 'node_x')
try:
return getattr(self, AXES[axis])
except IndexError:
raise ValueError("'axis' entry is out of bounds")
@property
def axis_units(self):
"""Get units for each axis.
Returns
-------
tuple of str
The units (as a string) for each of a grid's coordinates.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.axis_units
('-', '-')
>>> mg.axis_units = ('km', 'km')
>>> mg.axis_units
('km', 'km')
LLCATS: GINF
"""
return self._axis_units
@axis_units.setter
def axis_units(self, new_units):
"""Set the units for each coordinate axis."""
if len(new_units) != self.ndim:
raise ValueError('length of units does not match grid dimension')
self._axis_units = tuple(new_units)
@property
def axis_name(self):
"""Get the name of each coordinate axis.
Returns
-------
tuple of str
The names of each axis.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.axis_name
('y', 'x')
>>> grid.axis_name = ('lon', 'lat')
>>> grid.axis_name
('lon', 'lat')
LLCATS: GINF
"""
return self._axis_name
@axis_name.setter
def axis_name(self, new_names):
"""Set the names of a grid's coordinate axes.
Raises
------
ValueError
If the number of dimension do not match.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.axis_name = ('lon', 'lat')
>>> grid.axis_name
('lon', 'lat')
"""
if len(new_names) != self.ndim:
raise ValueError('length of names does not match grid dimension')
self._axis_name = tuple(new_names)
@property
@make_return_array_immutable
def status_at_link(self):
"""Get array of the status of all links.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab import CLOSED_BOUNDARY, FIXED_GRADIENT_BOUNDARY
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.status_at_node[mg.nodes_at_left_edge] = CLOSED_BOUNDARY
>>> mg.status_at_node[mg.nodes_at_right_edge] = FIXED_GRADIENT_BOUNDARY
>>> mg.status_at_link # doctest: +NORMALIZE_WHITESPACE
array([4, 4, 4, 4, 4, 0, 0, 0, 4, 4, 0, 0, 2, 4, 0, 0, 0, 4, 4, 0, 0,
2, 4, 0, 0, 0, 4, 4, 4, 4, 4])
LLCATS: BC LINF
"""
return self._status_at_link
@property
@return_readonly_id_array
def link_at_face(self):
"""Get links associated with faces.
Returns an array of the link IDs for the links that intersect
faces.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 4))
>>> mg.link_at_face
array([ 4, 5, 7, 8, 9, 11, 12])
LLCATS: LINF FINF MEAS
"""
try:
return self._link_at_face
except AttributeError:
return self._create_link_at_face()
def _create_number_of_links_at_node(self):
"""Find and record how many links are attached to each node.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 4))
>>> mg.number_of_links_at_node
array([2, 3, 3, 2, 3, 4, 4, 3, 2, 3, 3, 2])
"""
self._number_of_links_at_node = np.zeros(self.number_of_nodes,
dtype=np.int)
for ln in range(self.number_of_links):
self._number_of_links_at_node[self.node_at_link_tail[ln]] += 1
self._number_of_links_at_node[self.node_at_link_head[ln]] += 1
@property
def number_of_links_at_node(self):
"""Number of links connected to each node.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 4))
>>> mg.number_of_links_at_node
array([2, 3, 3, 2, 3, 4, 4, 3, 2, 3, 3, 2])
LLCATS: LINF NINF CONN
"""
try:
return self._number_of_links_at_node
except AttributeError:
self._create_number_of_links_at_node()
return self._number_of_links_at_node
def _create_links_and_link_dirs_at_node(self):
"""Make arrays with links and link directions at each node.
Examples
--------
>>> from landlab import HexModelGrid
>>> hg = HexModelGrid(3, 3)
>>> hg.links_at_node
array([[ 0, 3, 2, -1, -1, -1],
[ 1, 5, 4, 0, -1, -1],
[ 7, 6, 1, -1, -1, -1],
[ 8, 11, 2, -1, -1, -1],
[ 9, 13, 12, 8, 3, 4],
[10, 15, 14, 9, 5, 6],
[16, 10, 7, -1, -1, -1],
[17, 11, 12, -1, -1, -1],
[18, 17, 13, 14, -1, -1],
[18, 15, 16, -1, -1, -1]])
>>> hg.link_dirs_at_node
array([[-1, -1, -1, 0, 0, 0],
[-1, -1, -1, 1, 0, 0],
[-1, -1, 1, 0, 0, 0],
[-1, -1, 1, 0, 0, 0],
[-1, -1, -1, 1, 1, 1],
[-1, -1, -1, 1, 1, 1],
[-1, 1, 1, 0, 0, 0],
[-1, 1, 1, 0, 0, 0],
[-1, 1, 1, 1, 0, 0],
[ 1, 1, 1, 0, 0, 0]], dtype=int8)
"""
# Find maximum number of links per node
nlpn = self.number_of_links_at_node
# ^this fn should become member and property
max_num_links = np.amax(nlpn)
nlpn[:] = 0 # we'll zero it out, then rebuild it
# Create arrays for link-at-node information
self._links_at_node = - np.ones((self.number_of_nodes, max_num_links),
dtype=int)
self._link_dirs_at_node = np.zeros((self.number_of_nodes,
max_num_links), dtype=np.int8)
# Sweep over all links
for lk in range(self.number_of_links):
# Find the IDs of the tail and head nodes
t = self.node_at_link_tail[lk]
h = self.node_at_link_head[lk]
# Add this link to the list for this node, set the direction
# (outgoing, indicated by -1), and increment the number found so
# far
self._links_at_node[t][nlpn[t]] = lk
self._links_at_node[h][nlpn[h]] = lk
self._link_dirs_at_node[t][nlpn[t]] = -1
self._link_dirs_at_node[h][nlpn[h]] = 1
nlpn[t] += 1
nlpn[h] += 1
# Sort the links at each node by angle, counter-clockwise from +x
self._sort_links_at_node_by_angle()
# setup the active link equivalent
self._active_link_dirs_at_node = self._link_dirs_at_node.copy()
inactive_links = (self.status_at_link[self.links_at_node] ==
INACTIVE_LINK)
inactive_links[self.link_dirs_at_node == 0] = False
self._active_link_dirs_at_node[inactive_links] = 0
@deprecated(use='vals[links_at_node]*active_link_dirs_at_node',
version=1.0)
def _active_links_at_node(self, *args):
"""_active_links_at_node([node_ids])
Active links of a node.
Parameters
----------
node_ids : int or list of ints
ID(s) of node(s) for which to find connected active links
Returns
-------
(M, N) ndarray
The ids of active links attached to grid nodes with
*node_ids*. If *node_ids* is not given, return links for all of
the nodes in the grid. M is the number of rows in the grid's
_node_active_inlink_matrix, which can vary depending on the type
and structure of the grid; in a hex grid, for example, it is 6.
Notes
-----
On it's way to being obsolete. **Deprecated**.
LLCATS: DEPR LINF NINF CONN
"""
if len(args) == 0:
return numpy.vstack((self._node_active_inlink_matrix,
self._node_active_outlink_matrix))
elif len(args) == 1:
node_ids = numpy.broadcast_arrays(args[0])[0]
return numpy.vstack(
(self._node_active_inlink_matrix[:, node_ids],
self._node_active_outlink_matrix[:, node_ids])
).reshape(2 * numpy.size(self._node_active_inlink_matrix, 0), -1)
else:
raise ValueError('only zero or one arguments accepted')
@deprecated(use='vals[links_at_node]*active_link_dirs_at_node',
version=1.0)
def _active_links_at_node2(self, *args):
"""_active_links_at_node2([node_ids])
Get active links attached to nodes.
Parameters
----------
node_ids : int or list of ints (optional)
ID(s) of node(s) for which to find connected active links.
(Default: all nodes)
Returns
-------
(M, N) ndarray
The link IDs of active links attached to grid nodes with
*node_ids*. If *node_ids* is not given, return links for all of
the nodes in the grid. M is the number of rows in the grid's
_node_active_inlink_matrix, which can vary depending on the type
and structure of the grid; in a hex grid, for example, it is 6.
Examples
--------
>>> from landlab import HexModelGrid
>>> hmg = HexModelGrid(3, 2)
>>> hmg._active_links_at_node2(3)
array([[ 2],
[ 3],
[ 5],
[-1],
[-1],
[-1],
[ 6],
[ 8],
[ 9],
[-1],
[-1],
[-1]])
>>> hmg._active_links_at_node2()
array([[-1, -1, -1, 2, 6, 8, 9],
[-1, -1, -1, 3, -1, -1, -1],
[-1, -1, -1, 5, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[ 2, 3, 5, 6, -1, -1, -1],
[-1, -1, -1, 8, -1, -1, -1],
[-1, -1, -1, 9, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]])
LLCATS: DEPR NINF LINF CONN
"""
if len(args) == 0:
return numpy.vstack((self._node_active_inlink_matrix2,
self._node_active_outlink_matrix2))
elif len(args) == 1:
node_ids = numpy.broadcast_arrays(args[0])[0]
return numpy.vstack(
(self._node_active_inlink_matrix2[:, node_ids],
self._node_active_outlink_matrix2[:, node_ids])
).reshape(2 * numpy.size(self._node_active_inlink_matrix2, 0), -1)
else:
raise ValueError('only zero or one arguments accepted')
@property
@make_return_array_immutable
def angle_of_link(self):
"""Find and return the angle of a link about the node at the link tail.
Examples
--------
>>> from landlab import HexModelGrid
>>> mg = HexModelGrid(3, 2)
>>> mg.angle_of_link / np.pi * 3. # 60 degree segments
array([ 0., 2., 1., 2., 1., 0., 0., 1., 2., 1., 2., 0.])
LLCATS: LINF MEAS
"""
try:
if not self._angle_of_link_created:
self._create_angle_of_link()
except AttributeError:
self._create_angle_of_link()
return self._angle_of_link_bothends[-1]
@property
@make_return_array_immutable
def angle_of_link_about_head(self):
"""Find and return the angle of a link about the node at the link head.
Because links have direction, their angle can be specified as an angle
about either the node at the link head, or the node at the link tail.
The default behaviour of `angle_of_link` is to return the angle about
the link tail, but this method gives the angle about the link head.
Examples
--------
>>> from landlab import HexModelGrid
>>> mg = HexModelGrid(3, 2)
>>> mg.angle_of_link_about_head[:3] / np.pi * 3. # 60 deg segments
array([ 3., 5., 4.])
LLCATS: LINF MEAS
"""
try:
if not self._angle_of_link_created:
self._create_angle_of_link()
except AttributeError:
self._create_angle_of_link()
return self._angle_of_link_bothends[1]
def _create_angle_of_link(self):
"""
Build a dict with keys (-1, 1) that contains the angles of the links
about both the link heads (1) and link tails (-1).
Notes
-----
dx and dy are the x and y differences between the link endpoints.
Multiplying this by dirs orients these offsets correctly (i.e.,
the correct node is the origin). The call to arctan2 calculates
the angle in radians. Angles in the lower two quadrants will be
negative and clockwise from the positive x axis. We want them
counter-clockwise, which is what the last couple of lines before
the return statement do.
LLCATS: LINF MEAS
"""
self._angle_of_link_bothends = {}
for dirs in (-1, 1):
dx = -dirs * (self.node_x[self.node_at_link_head] -
self.node_x[self.node_at_link_tail])
dy = -dirs * (self.node_y[self.node_at_link_head] -
self.node_y[self.node_at_link_tail])
ang = np.arctan2(dy, dx)
(lower_two_quads, ) = np.where(ang < 0.0)
ang[lower_two_quads] = (2 * np.pi) + ang[lower_two_quads]
(no_link, ) = np.where(dirs == 0)
ang[no_link] = 2*np.pi
self._angle_of_link_bothends[dirs] = ang.copy()
self._angle_of_link_created = True
def _sort_links_at_node_by_angle(self):
"""Sort the links_at_node and link_dirs_at_node arrays by angle.
"""
ang = self.angle_of_link[self.links_at_node]
linkhead_at_node = self.link_dirs_at_node == 1
ang[linkhead_at_node] = self.angle_of_link_about_head[
self.links_at_node[linkhead_at_node]]
ang[self.link_dirs_at_node == 0] = 100.
argsorted = np.argsort(ang, axis=1)
indices = np.indices(ang.shape)[0] * ang.shape[1] + argsorted
self._links_at_node.flat = self._links_at_node.flat[indices.flatten()]
self._link_dirs_at_node.flat = self._link_dirs_at_node.flat[
indices.flatten()]
def resolve_values_on_links(self, link_values, out=None):
"""Resolve the xy-components of links.
Resolves values provided defined on links into the x and y directions.
Returns values_along_x, values_along_y
LLCATS: LINF
"""
return gfuncs.resolve_values_on_links(self, link_values, out=out)
@deprecated(use='no replacement', version=1.0)
def resolve_values_on_active_links(self, link_values, out=None):
"""Resolve the xy-components of active links.
Resolves values provided defined on active links into the x and y
directions.
Returns values_along_x, values_along_y
LLCATS: LINF
"""
return gfuncs.resolve_values_on_active_links(self, link_values,
out=out)
def link_at_node_is_upwind(self, values, out=None):
"""
Return a boolean the same shape as :func:`links_at_node` which flags
links which are upwind of the node as True.
link_at_node_is_upwind iterates across the grid and identifies the link
values at each link connected to a node. It then uses the
link_dirs_at_node data structure to identify links bringing flux into
the node. It then return a boolean array the same shape as
links_at_node flagging these links. e.g., for a raster, the returned
array will be shape (nnodes, 4).
Parameters
----------
values : str or array
Name of variable field defined at links, or array of values at
links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Must be correct shape and boolean dtype.
Returns
-------
ndarray
Boolean of which links are upwind at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> rmg.at_link['grad'] = np.array([-1., -2., -1.,
... -2., -3., -4., -5.,
... -1., -2., -1.,
... -1., -2., -3., -4.,
... -1., -2., -1.])
>>> rmg.link_at_node_is_upwind('grad')
array([[False, False, False, False],
[False, False, True, False],
[False, False, True, False],
[False, False, True, False],
[False, False, False, True],
[False, False, True, True],
[False, False, True, True],
[False, False, True, True],
[False, False, False, True],
[False, False, True, True],
[False, False, True, True],
[False, False, True, True]], dtype=bool)
LLCATS: LINF NINF CONN
"""
if out is None:
out = np.empty_like(self.links_at_node, dtype=bool)
else:
assert out.shape is self.links_at_node.shape
assert out.dtype is bool
if type(values) is str:
vals = self.at_link[values]
else:
assert len(values) == self.number_of_links
vals = values
values_at_links = vals[self.links_at_node] * self.link_dirs_at_node
# this procedure makes incoming links NEGATIVE
np.less(values_at_links, 0., out=out)
return out
def link_at_node_is_downwind(self, values, out=None):
"""
Return a boolean the same shape as :func:`links_at_node` which flags
links which are downwind of the node as True.
link_at_node_is_downwind iterates across the grid and identifies the
link values at each link connected to a node. It then uses the
link_dirs_at_node data structure to identify links carrying flux out of
the node. It then return a boolean array the same shape as
links_at_node flagging these links. e.g., for a raster, the returned
array will be shape (nnodes, 4).
Parameters
----------
values : str or array
Name of variable field defined at links, or array of values at
links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Must be correct shape and boolean dtype.
Returns
-------
ndarray
Boolean of which links are downwind at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> rmg.at_link['grad'] = np.array([-1., -2., -1.,
... -2., -3., -4., -5.,
... -1., -2., -1.,
... -1., -2., -3., -4.,
... -1., -2., -1.])
>>> rmg.link_at_node_is_downwind('grad')
array([[ True, True, False, False],
[ True, True, False, False],
[ True, True, False, False],
[False, True, False, False],
[ True, True, False, False],
[ True, True, False, False],
[ True, True, False, False],
[False, True, False, False],
[ True, False, False, False],
[ True, False, False, False],
[ True, False, False, False],
[False, False, False, False]], dtype=bool)
LLCATS: LINF NINF CONN
"""
if out is None:
out = np.empty_like(self.links_at_node, dtype=bool)
else:
assert out.shape is self.links_at_node.shape
assert out.dtype is bool
if type(values) is str:
vals = self.at_link[values]
else:
assert len(values) == self.number_of_links
vals = values
values_at_links = vals[self.links_at_node] * self.link_dirs_at_node
# this procedure makes incoming links NEGATIVE
np.greater(values_at_links, 0., out=out)
return out
def upwind_links_at_node(self, values, bad_index=-1):
"""
Return an (nnodes, X) shape array of link IDs of which links are upwind
of each node, according to *values* (field or array).
X is the maximum upwind links at any node. Nodes with fewer upwind
links than this have additional slots filled with *bad_index*. Links
are ordered anticlockwise from east.
Parameters
----------
values : str or array
Name of variable field defined at links, or array of values at
links.
bad_index : int
Index to place in array indicating no link.
Returns
-------
ndarray
Array of upwind link IDs
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> rmg.at_link['grad'] = np.array([-1., -2., -1.,
... -2., -3., -4., -5.,
... -1., -2., -1.,
... -1., -2., -3., -4.,
... -1., -2., -1.])
>>> rmg.upwind_links_at_node('grad', bad_index=-1)
array([[-1, -1],
[ 0, -1],
[ 1, -1],
[ 2, -1],
[ 3, -1],
[ 7, 4],
[ 8, 5],
[ 9, 6],
[10, -1],
[14, 11],
[15, 12],
[16, 13]])
LLCATS: LINF NINF CONN
"""
if type(values) is str:
vals = self.at_link[values]
else:
assert len(values) == self.number_of_links
vals = values
values_at_links = vals[self.links_at_node] * self.link_dirs_at_node
# this procedure makes incoming links NEGATIVE
unordered_IDs = np.where(values_at_links < 0., self.links_at_node,
bad_index)
bad_IDs = unordered_IDs == bad_index
nnodes = self.number_of_nodes
flat_sorter = (np.argsort(bad_IDs, axis=1) +
self.links_at_node.shape[1] *
np.arange(nnodes).reshape((nnodes, 1)))
big_ordered_array = unordered_IDs.ravel()[flat_sorter].reshape(
self.links_at_node.shape)
cols_to_cut = int(bad_IDs.sum(axis=1).min())
if cols_to_cut > 0:
return big_ordered_array[:, :-cols_to_cut]
else:
return big_ordered_array
def downwind_links_at_node(self, values, bad_index=-1):
"""
Return an (nnodes, X) shape array of link IDs of which links are
downwind of each node, according to *values* (array or field).
X is the maximum downwind links at any node. Nodes with fewer downwind
links than this have additional slots filled with *bad_index*. Links
are ordered anticlockwise from east.
Parameters
----------
values : str or array
Name of variable field defined at links, or array of values at
links.
bad_index : int
Index to place in array indicating no link.
Returns
-------
ndarray
Array of upwind link IDs
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid, BAD_INDEX_VALUE
>>> rmg = RasterModelGrid((3, 4))
>>> rmg.at_link['grad'] = np.array([-1., -2., -1.,
... -2., -3., -4., -5.,
... -1., -2., -1.,
... -1., -2., -3., -4.,
... -1., -2., -1.])
>>> rmg.downwind_links_at_node('grad', bad_index=BAD_INDEX_VALUE)
array([[ 0, 3],
[ 1, 4],
[ 2, 5],
[ 6, -1],
[ 7, 10],
[ 8, 11],
[ 9, 12],
[13, -1],
[14, -1],
[15, -1],
[16, -1],
[-1, -1]])
LLCATS: LINF NINF CONN
"""
if type(values) is str:
vals = self.at_link[values]
else:
assert len(values) == self.number_of_links
vals = values
values_at_links = vals[self.links_at_node] * self.link_dirs_at_node
# this procedure makes incoming links NEGATIVE
unordered_IDs = np.where(values_at_links > 0., self.links_at_node,
bad_index)
bad_IDs = unordered_IDs == bad_index
nnodes = self.number_of_nodes
flat_sorter = (np.argsort(bad_IDs, axis=1) +
self.links_at_node.shape[1] *
np.arange(nnodes).reshape((nnodes, 1)))
big_ordered_array = unordered_IDs.ravel()[flat_sorter].reshape(
self.links_at_node.shape)
cols_to_cut = int(bad_IDs.sum(axis=1).min())
if cols_to_cut > 0:
return big_ordered_array[:, :-cols_to_cut]
else:
return big_ordered_array
@property
def faces_at_cell(self):
"""Return array containing face IDs at each cell.
Creates array if it doesn't already exist.
Examples
--------
>>> from landlab import HexModelGrid, RasterModelGrid
>>> mg = RasterModelGrid((4, 5))
>>> mg.faces_at_cell
array([[ 4, 7, 3, 0],
[ 5, 8, 4, 1],
[ 6, 9, 5, 2],
[11, 14, 10, 7],
[12, 15, 11, 8],
[13, 16, 12, 9]])
>>> mg = HexModelGrid(3, 4)
>>> mg.faces_at_cell
array([[ 7, 11, 10, 6, 0, 1],
[ 8, 13, 12, 7, 2, 3],
[ 9, 15, 14, 8, 4, 5]])
LLCATS: FINF CINF CONN
"""
try:
return self._faces_at_cell
except AttributeError:
self._create_faces_at_cell()
return self._faces_at_cell
def number_of_faces_at_cell(self):
"""Number of faces attached to each cell.
Examples
--------
>>> from landlab import HexModelGrid
>>> hg = HexModelGrid(3, 3)
>>> hg.number_of_faces_at_cell()
array([6, 6])
LLCATS: FINF CINF CONN
"""
num_faces_at_cell = np.zeros(self.number_of_cells, dtype=np.int)
for ln in range(self.number_of_links):
cell = self.cell_at_node[self.node_at_link_tail[ln]]
if cell != BAD_INDEX_VALUE:
num_faces_at_cell[cell] += 1
cell = self.cell_at_node[self.node_at_link_head[ln]]
if cell != BAD_INDEX_VALUE:
num_faces_at_cell[cell] += 1
return num_faces_at_cell
def _sort_faces_at_cell_by_angle(self):
"""Sort the faces_at_cell array by angle.
Assumes links_at_node and link_dirs_at_node created.
"""
for cell in range(self.number_of_cells):
sorted_links = self.links_at_node[self.node_at_cell[cell], :]
sorted_faces = self._faces_at_cell[cell, :] = self.face_at_link[
sorted_links]
self._faces_at_cell[cell, :] = sorted_faces
def _create_faces_at_cell(self):
"""Construct faces_at_cell array.
Examples
--------
>>> from landlab import HexModelGrid
>>> hg = HexModelGrid(3, 3)
>>> hg._create_faces_at_cell()
>>> hg._faces_at_cell
array([[ 5, 8, 7, 4, 0, 1],
[ 6, 10, 9, 5, 2, 3]])
"""
num_faces = self.number_of_faces_at_cell()
self._faces_at_cell = np.zeros((self.number_of_cells,
np.amax(num_faces)), dtype=int)
num_faces[:] = 0 # Zero out and count again, to use as index
for ln in range(self.number_of_links):
cell = self.cell_at_node[self.node_at_link_tail[ln]]
if cell != BAD_INDEX_VALUE:
self._faces_at_cell[cell, num_faces[cell]] = \
self.face_at_link[ln]
num_faces[cell] += 1
cell = self.cell_at_node[self.node_at_link_head[ln]]
if cell != BAD_INDEX_VALUE:
self._faces_at_cell[cell, num_faces[cell]] = \
self.face_at_link[ln]
num_faces[cell] += 1
self._sort_faces_at_cell_by_angle()
@property
@make_return_array_immutable
def patches_present_at_node(self):
"""
A boolean array, False where a patch has a closed node or is missing.
The array is the same shape as :func:`patches_at_node`, and is designed
to mask it.
Note that in cases where patches may have more than 3 nodes (e.g.,
rasters), a patch is considered still present as long as at least 3
open nodes are present.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> mg = RasterModelGrid((3, 3))
>>> mg.status_at_node[mg.nodes_at_top_edge] = CLOSED_BOUNDARY
>>> mg.patches_at_node
array([[ 0, -1, -1, -1],
[ 1, 0, -1, -1],
[-1, 1, -1, -1],
[ 2, -1, -1, 0],
[ 3, 2, 0, 1],
[-1, 3, 1, -1],
[-1, -1, -1, 2],
[-1, -1, 2, 3],
[-1, -1, 3, -1]])
>>> mg.patches_present_at_node
array([[ True, False, False, False],
[ True, True, False, False],
[False, True, False, False],
[False, False, False, True],
[False, False, True, True],
[False, False, True, False],
[False, False, False, False],
[False, False, False, False],
[False, False, False, False]], dtype=bool)
>>> 1 in mg.patches_at_node * mg.patches_present_at_node
True
>>> 2 in mg.patches_at_node * mg.patches_present_at_node
False
LLCATS: PINF NINF
"""
try:
return self._patches_present_mask
except AttributeError:
self.patches_at_node
self._reset_patch_status()
return self._patches_present_mask
@property
@make_return_array_immutable
def patches_present_at_link(self):
"""
A boolean array, False where a patch has a closed node or is missing.
The array is the same shape as :func:`patches_at_link`, and is designed
to mask it.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> mg = RasterModelGrid((3, 3))
>>> mg.status_at_node[mg.nodes_at_top_edge] = CLOSED_BOUNDARY
>>> mg.patches_at_link
array([[ 0, -1],
[ 1, -1],
[ 0, -1],
[ 0, 1],
[ 1, -1],
[ 0, 2],
[ 1, 3],
[ 2, -1],
[ 2, 3],
[ 3, -1],
[ 2, -1],
[ 3, -1]])
>>> mg.patches_present_at_link
array([[ True, False],
[ True, False],
[ True, False],
[ True, True],
[ True, False],
[ True, False],
[ True, False],
[False, False],
[False, False],
[False, False],
[False, False],
[False, False]], dtype=bool)
>>> 1 in mg.patches_at_link * mg.patches_present_at_link
True
>>> 2 in mg.patches_at_link * mg.patches_present_at_link
False
LLCATS: PINF LINF
"""
try:
return self._patches_present_link_mask
except AttributeError:
self.patches_at_node
self._reset_patch_status()
return self._patches_present_link_mask
@property
@make_return_array_immutable
def number_of_patches_present_at_node(self):
"""Return the number of patches at a node without a closed node.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> mg = RasterModelGrid((3, 3))
>>> mg.status_at_node[mg.nodes_at_top_edge] = CLOSED_BOUNDARY
>>> mg.patches_present_at_node
array([[ True, False, False, False],
[ True, True, False, False],
[False, True, False, False],
[False, False, False, True],
[False, False, True, True],
[False, False, True, False],
[False, False, False, False],
[False, False, False, False],
[False, False, False, False]], dtype=bool)
>>> mg.number_of_patches_present_at_node
array([1, 2, 1, 1, 2, 1, 0, 0, 0])
LLCATS: PINF NINF BC
"""
try:
return self._number_of_patches_present_at_node
except AttributeError:
self.patches_at_node
self._reset_patch_status()
return self._number_of_patches_present_at_node
@property
@make_return_array_immutable
def number_of_patches_present_at_link(self):
"""Return the number of patches at a link without a closed node.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> mg = RasterModelGrid((3, 3))
>>> mg.status_at_node[mg.nodes_at_top_edge] = CLOSED_BOUNDARY
>>> mg.patches_present_at_link
array([[ True, False],
[ True, False],
[ True, False],
[ True, True],
[ True, False],
[ True, False],
[ True, False],
[False, False],
[False, False],
[False, False],
[False, False],
[False, False]], dtype=bool)
>>> mg.number_of_patches_present_at_link
array([1, 1, 1, 2, 1, 1, 1, 0, 0, 0, 0, 0])
LLCATS: PINF LINF BC
"""
try:
return self._number_of_patches_present_at_link
except AttributeError:
self.patches_at_node
self._reset_patch_status()
return self._number_of_patches_present_at_link
def _reset_patch_status(self):
"""
Creates the array which stores patches_present_at_node.
Call whenever boundary conditions are updated on the grid.
"""
from landlab import RasterModelGrid, VoronoiDelaunayGrid
node_status_at_patch = self.status_at_node[self.nodes_at_patch]
if isinstance(self, RasterModelGrid):
max_nodes_at_patch = 4
elif isinstance(self, VoronoiDelaunayGrid):
max_nodes_at_patch = 3
else:
max_nodes_at_patch = (self.nodes_at_patch > -1).sum(axis=1)
any_node_at_patch_closed = (node_status_at_patch ==
CLOSED_BOUNDARY).sum(axis=1) > (
max_nodes_at_patch - 3)
absent_patches = any_node_at_patch_closed[self.patches_at_node]
bad_patches = numpy.logical_or(absent_patches,
self.patches_at_node == -1)
self._patches_present_mask = numpy.logical_not(
bad_patches)
self._number_of_patches_present_at_node = numpy.sum(
self._patches_present_mask, axis=1)
absent_patches = any_node_at_patch_closed[self.patches_at_link]
bad_patches = numpy.logical_or(absent_patches,
self.patches_at_link == -1)
self._patches_present_link_mask = numpy.logical_not(
bad_patches)
self._number_of_patches_present_at_link = numpy.sum(
self._patches_present_link_mask, axis=1)
def calc_hillshade_at_node(self, alt=45., az=315., slp=None, asp=None,
unit='degrees', elevs='topographic__elevation'):
"""Get array of hillshade.
.. codeauthor:: Katy Barnhart <[email protected]>
Parameters
----------
alt : float
Sun altitude (from horizon) - defaults to 45 degrees
az : float
Sun azimuth (CW from north) - defaults to 315 degrees
slp : float
slope of cells at surface - optional
asp : float
aspect of cells at surface (from north) - optional (with slp)
unit : string
'degrees' (default) or 'radians' - only needed if slp and asp
are not provided
If slp and asp are both not specified, 'elevs' must be provided as
a grid field name (defaults to 'topographic__elevation') or an
nnodes-long array of elevation values. In this case, the method will
calculate local slopes and aspects internally as part of the hillshade
production.
Returns
-------
ndarray of float
Hillshade at each cell.
Notes
-----
code taken from GeospatialPython.com example from December 14th, 2014
DEJH found what looked like minor sign problems, and adjusted to follow
the ArcGIS algorithm: http://help.arcgis.com/en/arcgisdesktop/10.0/
help/index.html#/How_Hillshade_works/009z000000z2000000/ .
Remember when plotting that bright areas have high values. cmap='Greys'
will give an apparently inverted color scheme. *cmap='gray'* has white
associated with the high values, so is recommended for plotting.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((5, 5), 1.)
>>> z = mg.x_of_node * np.tan(60. * np.pi / 180.)
>>> mg.calc_hillshade_at_node(elevs=z, alt=30., az=210.)
array([ 0.625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.625,
0.625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.625,
0.625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.625, 0.625,
0.625])
LLCATS: NINF SURF
"""
if slp is not None and asp is not None:
if unit == 'degrees':
(alt, az, slp, asp) = (numpy.radians(alt), numpy.radians(az),
numpy.radians(slp), numpy.radians(asp))
elif unit == 'radians':
if alt > numpy.pi / 2. or az > 2. * numpy.pi:
six.print_(
'Assuming your solar properties are in degrees, '
'but your slopes and aspects are in radians...')
(alt, az) = (numpy.radians(alt), numpy.radians(az))
# ...because it would be super easy to specify radians,
# but leave the default params alone...
else:
raise TypeError("unit must be 'degrees' or 'radians'")
elif slp is None and asp is None:
if unit == 'degrees':
(alt, az) = (numpy.radians(alt), numpy.radians(az))
elif unit == 'radians':
pass
else:
raise TypeError("unit must be 'degrees' or 'radians'")
slp, slp_comps = self.calc_slope_at_node(
elevs, return_components=True)
asp = self.calc_aspect_at_node(slope_component_tuple=slp_comps,
unit='radians')
else:
raise TypeError('Either both slp and asp must be set, or neither!')
shaded = (
numpy.sin(alt) * numpy.cos(slp) +
numpy.cos(alt) * numpy.sin(slp) * numpy.cos(az - asp)
)
return shaded.clip(0.)
@deprecated(use='calc_flux_div_at_node', version=1.0)
def calculate_flux_divergence_at_core_nodes(self, active_link_flux,
net_unit_flux=None):
r"""Get array of flux divergence for core nodes.
Given an array of fluxes along links, computes the net total flux
within each cell, divides by cell area, and stores the result in
net_unit_flux.
The function works by calling calculate_flux_divergence_at_nodes, then
slicing out only the values at core nodes. Therefore, it is slower
than calculate_flux_divergence_at_nodes, even though it returns a
shorter list of numbers.
The input active_link_flux should be flux of
something (e.g., mass, momentum, energy) per unit face width, positive
if flowing in the same direction as its link, and negative otherwise.
There should be one value per active link. Returns an array of net
total flux per unit area, one value per core node (creates this
array if it is not given as an argument).
By convention, divergence is positive for net outflow, and negative
for net outflow. That's why we *add* outgoing flux and *subtract*
incoming flux. This makes net_unit_flux have the same sign and
dimensions as a typical divergence term in a conservation equation.
In general, for a polygonal cell with *N* sides of lengths
Li and with surface area A, the net influx divided by cell
area would be:
.. math::
{Q_{net} \over A} = {1 \over A} \sum{q_i L_i}
For a square cell, which is what we have in RasterModelGrid,
the sum is over 4 sides of length dx, and :math:`A = dx^2`, so:
.. math::
{Q_{net} \over A} = {1 \over dx} \sum{q_i}
.. note::
The net flux is defined as positive outward, negative
inward. In a diffusion problem, for example, one would use:
.. math::
{du \over dt} = \text{source} - \text{fd}
where *fd* is "flux divergence".
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 5), 1.0)
>>> u = [0., 1., 2., 3., 0.,
... 1., 2., 3., 2., 3.,
... 0., 1., 2., 1., 2.,
... 0., 0., 2., 2., 0.]
>>> u = np.array(u)
>>> grad = rmg.calc_grad_at_link(u)[rmg.active_links]
>>> grad
array([ 1., 1., -1., 1., 1., -1., 1., -1., -1., -1., 1., 1., -1.,
1., -1., 0., 1.])
>>> flux = - grad # downhill flux proportional to gradient
>>> divflux = rmg.calculate_flux_divergence_at_core_nodes(flux)
>>> divflux
array([ 2., 4., -2., 0., 1., -4.])
If calculate_gradients_at_core_nodes is called inside a loop, you can
improve speed slightly by creating an array outside the loop. For
example, do this once, before the loop:
>>> divflux = np.zeros(rmg.number_of_core_cells) # outside loop
Then do this inside the loop:
>>> divflux = rmg.calculate_flux_divergence_at_core_nodes(
... flux, divflux)
In this case, the function will not have to create the divflux array.
Note this method is untested with looped boundary conditions.
LLCATS: DEPR NINF GRAD
"""
if self._DEBUG_TRACK_METHODS:
six.print_('ModelGrid.calculate_flux_divergence_at_core_nodes')
assert (len(active_link_flux) == self.number_of_active_links), \
"incorrect length of active_link_flux array"
# If needed, create net_unit_flux array
if net_unit_flux is None:
net_unit_flux = numpy.zeros(self.number_of_core_nodes)
else:
net_unit_flux[:] = 0.
assert (len(net_unit_flux)) == self.number_of_core_nodes
node_net_unit_flux = self.calculate_flux_divergence_at_nodes(
active_link_flux)
node_at_core_cell = self.node_at_cell[self.core_cells]
net_unit_flux = node_net_unit_flux[node_at_core_cell]
return net_unit_flux
@deprecated(use='calc_flux_div_at_node', version=1.0)
def calculate_flux_divergence_at_nodes(self, active_link_flux, out=None):
"""Flux divergence at nodes.
Same as calculate_flux_divergence_at_active_cells, but works with and
returns a list of net unit fluxes that corresponds to all nodes, rather
than just active cells.
Note that we don't compute net unit fluxes at
boundary nodes (which don't have active cells associated with them, and
often don't have cells of any kind, because they are on the perimeter),
but simply return zeros for these entries. The advantage is that the
caller can work with node-based arrays instead of active-cell-based
arrays.
This method is untested with looped boundary conditions.
LLCATS: DEPR NINF GRAD
"""
return gfuncs.calculate_flux_divergence_at_nodes(self,
active_link_flux,
out=out)
@property
@make_return_array_immutable
def cell_area_at_node(self):
"""Cell areas in a nnodes-long array.
Zeros are entered at all perimeter nodes, which lack cells.
Returns
-------
ndarray
Cell areas as an n_nodes-long array.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5), spacing=(3, 4))
>>> grid.status_at_node[7] = CLOSED_BOUNDARY
>>> grid.cell_area_at_node
array([ 0., 0., 0., 0., 0.,
0., 12., 12., 12., 0.,
0., 12., 12., 12., 0.,
0., 0., 0., 0., 0.])
LLCATS: CINF NINF CONN
"""
try:
return self._cell_area_at_node
except AttributeError:
return self._create_cell_areas_array_force_inactive()
@property
@deprecated(use='width_of_face', version=1.0)
def face_width(self):
"""
LLCATS: DEPR FINF MEAS
"""
return self.width_of_face
@property
@make_return_array_immutable
def width_of_face(self):
"""Width of grid faces.
Examples
--------
>>> from landlab import RasterModelGrid, HexModelGrid
>>> mg = RasterModelGrid((3, 4), (1., 2.))
>>> mg.width_of_face
array([ 2., 2., 2., 1., 1., 1., 1.])
>>> mg = HexModelGrid(3, 3)
>>> np.allclose(mg.width_of_face, 0.57735027)
True
LLCATS: FINF MEAS
"""
try:
return self._face_width
except AttributeError:
return self._create_face_width()
def _create_face_at_link(self):
"""Set up face_at_link array.
Examples
--------
>>> from landlab import HexModelGrid, BAD_INDEX_VALUE
>>> hg = HexModelGrid(3, 3)
>>> face_at_link = hg.face_at_link.copy()
>>> face_at_link[face_at_link == BAD_INDEX_VALUE] = -1
>>> face_at_link # doctest: +NORMALIZE_WHITESPACE
array([-1, -1, -1, 0, 1, 2, 3, -1, 4, 5, 6, -1, 7, 8, 9, 10,
-1, -1, -1])
"""
self._face_at_link = numpy.full(self.number_of_links, BAD_INDEX_VALUE,
dtype=int)
face_id = 0
for link in range(self.number_of_links):
tc = self.cell_at_node[self.node_at_link_tail[link]]
hc = self.cell_at_node[self.node_at_link_head[link]]
if tc != BAD_INDEX_VALUE or hc != BAD_INDEX_VALUE:
self._face_at_link[link] = face_id
face_id += 1
return self._face_at_link
def _create_link_at_face(self):
"""Set up link_at_face array.
Examples
--------
>>> from landlab import HexModelGrid
>>> hg = HexModelGrid(3, 3)
>>> hg.link_at_face
array([ 3, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15])
"""
num_faces = len(self.width_of_face)
self._link_at_face = numpy.empty(num_faces, dtype=int)
face_id = 0
for link in range(self.number_of_links):
tc = self.cell_at_node[self.node_at_link_tail[link]]
hc = self.cell_at_node[self.node_at_link_head[link]]
if tc != BAD_INDEX_VALUE or hc != BAD_INDEX_VALUE:
self._link_at_face[face_id] = link
face_id += 1
return self._link_at_face
def _create_cell_areas_array_force_inactive(self):
"""Set up an array of cell areas that is n_nodes long.
Sets up an array of cell areas that is nnodes long. Nodes that have
cells receive the area of that cell. Nodes which do not, receive
zeros.
"""
_cell_area_at_node_zero = numpy.zeros(self.number_of_nodes,
dtype=float)
_cell_area_at_node_zero[self.node_at_cell] = self.area_of_cell
self._cell_area_at_node = _cell_area_at_node_zero
return self._cell_area_at_node
@deprecated(use='no replacement', version=1.0)
def get_active_link_connecting_node_pair(self, node1, node2):
"""Get the active link that connects a pair of nodes.
Returns the ID number of the active link that connects the given pair
of nodes, or BAD_INDEX_VALUE if not found.
This method is slow, and can only take single ints as *node1* and
*node2*. It should ideally be overridden for optimal functionality in
more specialized grid modules (e.g., raster).
Examples
--------
>>> import landlab as ll
>>> rmg = ll.RasterModelGrid((4, 5))
>>> rmg.get_active_link_connecting_node_pair(8, 3)
array([2])
LLCATS: DEPR LINF NINF CONN
"""
active_link = BAD_INDEX_VALUE
for alink in range(0, self.number_of_active_links):
link_connects_nodes = (
(self._activelink_fromnode[alink] == node1 and
self._activelink_tonode[alink] == node2) or
(self._activelink_tonode[alink] == node1 and
self._activelink_fromnode[alink] == node2))
if link_connects_nodes:
active_link = alink
break
return numpy.array([active_link])
@property
@make_return_array_immutable
def area_of_cell(self):
"""Get areas of grid cells.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5), spacing=(2, 3))
>>> grid.area_of_cell # doctest: +NORMALIZE_WHITESPACE
array([ 6., 6., 6.,
6., 6., 6.])
LLCATS: CINF MEAS
"""
return self._area_of_cell
@property
@deprecated(use='length_of_link', version=1.0)
def link_length(self):
"""
LLCATS: DEPR LINF MEAS
"""
return self.length_of_link
@property
def length_of_link(self):
"""Get lengths of links.
Returns
-------
ndarray
Lengths of all links, in ID order.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.length_of_link
array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1.])
>>> len(grid.length_of_link) == grid.number_of_links
True
LLCATS: LINF MEAS
"""
if self._link_length is None:
return self._create_length_of_link()
else:
return self._link_length
@property
def _length_of_link_with_diagonals(self):
"""A dummy function, equivalent to `length_of_link` for the base class.
This method is required to maintain grid class generality in several
of the flow routing and stream power components. It is overridden in
RasterModelGrid only.
This method will be removed when LL's handling of diagonal links is
modernized.
"""
return self.length_of_link
def _create_length_of_link(self):
"""Get array of the lengths of all links.
Calculates, returns, and stores as a property of the grid the lengths
of all the links in the grid.
"""
if self._link_length is None:
self._link_length = self.empty(at='link', dtype=float)
diff_x = (self.node_x[self.node_at_link_tail] -
self.node_x[self.node_at_link_head])
diff_y = (self.node_y[self.node_at_link_tail] -
self.node_y[self.node_at_link_head])
numpy.sqrt(diff_x ** 2 + diff_y ** 2, out=self._link_length)
return self._link_length
@deprecated(use='map_max_of_link_nodes_to_link', version=1.0)
def _assign_upslope_vals_to_active_links(self, u, v=None):
"""Assign upslope node value to link.
Assigns to each active link the value of *u* at whichever of its
neighbors has a higher value of *v*. If *v* is omitted, uses *u* for
both. The order of the link values is by link ID.
Parameters
----------
u : array-like
Node values to assign to links.
v : array-like, optional
Node values to test for upslope-ness.
Returns
-------
ndarray
Values at active links.
Examples
--------
>>> from landlab import RasterModelGrid
>>> import numpy as np
>>> grid = RasterModelGrid((3, 3))
>>> u = np.arange(9.)
>>> grid._assign_upslope_vals_to_active_links(u)
array([ 4., 4., 5., 7.])
LLCATS: DEPR NINF LINF CONN
"""
if v is None:
v = numpy.array((0., ))
fv = numpy.zeros(self.number_of_active_links)
if len(v) < len(u):
for i in range(0, self.number_of_active_links):
fv[i] = max(u[self._activelink_fromnode[i]],
u[self._activelink_tonode[i]])
else:
for i in range(0, self.number_of_active_links):
if (v[self._activelink_fromnode[i]] >
v[self._activelink_tonode[i]]):
fv[i] = u[self._activelink_fromnode[i]]
else:
fv[i] = u[self._activelink_tonode[i]]
return fv
def _reset_link_status_list(self):
"""Create of reset a list of links statuses.
Creates or resets a list of link statuses. We do this by sweeping
through the given lists of from and to nodes, and checking the status
of these as given in the node_status list. A link is active if both its
nodes are core, or if one is core and the other is fixed value.
A link is inactive if either node is closed.
A link is fixed if either node is fixed gradient.
Note that by default, any link which has been previously set as fixed
will remain so, and if a closed-core node pair is found at each of its
ends, the closed node will be converted to a fixed gradient node. If
you want to close a node which has a fixed link already connected to
it, first change the link status to inactive.
A further test is performed to ensure that the final maps of node and
link status are internally consistent.
"""
if self._DEBUG_TRACK_METHODS:
six.print_('ModelGrid._reset_link_status_list')
try:
already_fixed = self._status_at_link == FIXED_LINK
except AttributeError:
already_fixed = numpy.zeros(self.number_of_links, dtype=bool)
fromnode_status = self._node_status[self.node_at_link_tail]
tonode_status = self._node_status[self.node_at_link_head]
if not numpy.all((fromnode_status[already_fixed] ==
FIXED_GRADIENT_BOUNDARY) |
(tonode_status[already_fixed] ==
FIXED_GRADIENT_BOUNDARY)):
assert numpy.all(np.logical_not((fromnode_status[already_fixed] ==
CLOSED_BOUNDARY) &
(tonode_status[already_fixed] ==
CLOSED_BOUNDARY)))
fromnode_status[already_fixed] = numpy.where(
(fromnode_status[already_fixed] == CLOSED_BOUNDARY) &
(tonode_status[already_fixed] == CORE_NODE),
FIXED_GRADIENT_BOUNDARY,
fromnode_status[already_fixed])
tonode_status[already_fixed] = numpy.where(
(tonode_status[already_fixed] == CLOSED_BOUNDARY) &
(fromnode_status[already_fixed] == CORE_NODE),
FIXED_GRADIENT_BOUNDARY,
tonode_status[already_fixed])
warnings.warn("""
Remember, fixed_links are dominant over node statuses.
Your grid may have had an incompatibility between
fixed_links and closed nodes, which has been resolved by
converting the closed nodes to fixed gradient nodes. If
you were trying to deliberately close a node which had
once been set to fixed gradient, you need to open the
links before changing the node statuses. If you were
setting a node to fixed_value, you can ignore this
message.
""")
active_links = (((fromnode_status == CORE_NODE) & ~
(tonode_status == CLOSED_BOUNDARY)) |
((tonode_status == CORE_NODE) & ~
(fromnode_status == CLOSED_BOUNDARY)))
# ...this still includes things that will become fixed_link
fixed_links = ((((fromnode_status == FIXED_GRADIENT_BOUNDARY) &
(tonode_status == CORE_NODE)) |
((tonode_status == FIXED_GRADIENT_BOUNDARY) &
(fromnode_status == CORE_NODE))) |
already_fixed)
fixed_link_fixed_val = (((fromnode_status == FIXED_VALUE_BOUNDARY) |
(tonode_status == FIXED_VALUE_BOUNDARY)) &
already_fixed)
# these are the "special cases", where the user is probably trying to
# adjust an individual fixed_link back to fixed value. We'll allow it:
fixed_links[fixed_link_fixed_val] = False
try:
self._status_at_link.fill(INACTIVE_LINK)
except AttributeError:
self._status_at_link = numpy.empty(self.number_of_links, dtype=int)
self._status_at_link.fill(INACTIVE_LINK)
self._status_at_link[active_links] = ACTIVE_LINK
self._status_at_link[fixed_links] = FIXED_LINK
active_links = self._status_at_link == ACTIVE_LINK # now it's correct
(self._active_links, ) = numpy.where(active_links)
(self._fixed_links, ) = numpy.where(fixed_links)
self._active_links = as_id_array(self._active_links)
self._fixed_links = as_id_array(self._fixed_links)
self._activelink_fromnode = self.node_at_link_tail[active_links]
self._activelink_tonode = self.node_at_link_head[active_links]
# Set up active inlink and outlink matrices
self._setup_active_inlink_and_outlink_matrices()
def _reset_lists_of_nodes_cells(self):
"""Create of reset lists of nodes and cells based on their status.
Creates or resets various lists of nodes and cells based on their
statuses. Call this function whenever you make changes to the
boundary conditions in the grid.
The updated attributes and arrays are:
* activecell_node *
* corecell_node *
* core_cells
* _boundary_nodes
Examples
--------
>>> import landlab
>>> grid = landlab.RasterModelGrid((4, 5))
>>> grid.status_at_node[7] = landlab.CLOSED_BOUNDARY
>>> grid.core_cells
array([0, 2, 3, 4, 5])
"""
(self._core_nodes, ) = numpy.where(self._node_status == CORE_NODE)
self._core_cells = self.cell_at_node[self._core_nodes]
self._boundary_nodes = as_id_array(
numpy.where(self._node_status != CORE_NODE)[0])
def _update_links_nodes_cells_to_new_BCs(self):
"""Update grid element connectivity, status.
This method updates all of the various lists and attributes governed
by node status (e.g., core nodes, active links, etc) when you change
node statuses. Call it if your method or driver makes changes to the
boundary conditions of nodes in the grid.
"""
self._reset_link_status_list()
self._reset_lists_of_nodes_cells()
self._create_active_faces()
try:
inactive_links = (self.status_at_link[self.links_at_node] ==
INACTIVE_LINK)
inactive_links[self.link_dirs_at_node == 0] = False
self._active_link_dirs_at_node[inactive_links] = 0
except AttributeError: # doesn't exist yet
pass
try:
if self.diagonal_list_created:
self.diagonal_list_created = False
except AttributeError:
pass
try:
if self.neighbor_list_created:
self.neighbor_list_created = False
except AttributeError:
pass
try:
self._fixed_grad_links_created
except AttributeError:
pass
else:
self._gradient_boundary_node_links()
self._create_fixed_gradient_boundary_node_anchor_node()
try:
self._patches_created
self._reset_patch_status()
except AttributeError:
pass
try:
self.bc_set_code += 1
except AttributeError:
self.bc_set_code = 0
@deprecated(use='set_nodata_nodes_to_closed', version='0.2')
def set_nodata_nodes_to_inactive(self, node_data, nodata_value):
"""Make no-data nodes inactive.
Set the status to CLOSED_BOUNDARY for all nodes whose value
of node_data is equal to the nodata_value.
Parameters
----------
node_data : ndarray
Data values.
nodata_value : float
Value that indicates an invalid value.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 4), 1.0)
>>> mg.status_at_node
array([1, 1, 1, 1,
1, 0, 0, 1,
1, 1, 1, 1], dtype=int8)
>>> h = np.array([-9999, -9999, -9999, -9999,
... -9999, -9999, 12345., 0.,
... -9999, 0., 0., 0.])
>>> mg.set_nodata_nodes_to_inactive(h, -9999)
>>> mg.status_at_node
array([4, 4, 4, 4,
4, 4, 0, 1,
4, 1, 1, 1], dtype=int8)
LLCATS: DEPR NINF BC
"""
self.set_nodata_nodes_to_closed(node_data, nodata_value)
def set_nodata_nodes_to_closed(self, node_data, nodata_value):
"""Make no-data nodes closed boundaries.
Sets node status to :any:`CLOSED_BOUNDARY` for all nodes whose value
of *node_data* is equal to the *nodata_value*.
Any links connected to :any:`CLOSED_BOUNDARY` nodes are automatically
set to :any:`INACTIVE_LINK` boundary.
Parameters
----------
node_data : ndarray
Data values.
nodata_value : float
Value that indicates an invalid value.
Examples
--------
The following example uses the following grid::
*--I--->o------>o------>o
^ ^ ^ ^
I I | |
| | | |
*--I--->*--I--->o------>o
^ ^ ^ ^
I I I I
| | | |
*--I--->*--I--->*--I--->*
.. note::
Links set to :any:`ACTIVE_LINK` are not shown in this diagram.
``*`` indicates the nodes that are set to :any:`CLOSED_BOUNDARY`
``o`` indicates the nodes that are set to :any:`CORE_NODE`
``I`` indicates the links that are set to :any:`INACTIVE_LINK`
>>> import numpy as np
>>> import landlab as ll
>>> mg = ll.RasterModelGrid((3, 4), 1.0)
>>> mg.status_at_node
array([1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1], dtype=int8)
>>> h = np.array([-9999, -9999, -9999, -9999, -9999, -9999, 12345.,
... 0., -9999, 0., 0., 0.])
>>> mg.set_nodata_nodes_to_closed(h, -9999)
>>> mg.status_at_node
array([4, 4, 4, 4, 4, 4, 0, 1, 4, 1, 1, 1], dtype=int8)
LLCATS: BC NINF
"""
# Find locations where value equals the NODATA code and set these nodes
# as inactive boundaries.
nodata_locations = numpy.nonzero(node_data == nodata_value)
self._node_status[nodata_locations] = CLOSED_BOUNDARY
# Recreate the list of active cell IDs
self._update_links_nodes_cells_to_new_BCs()
def set_nodata_nodes_to_fixed_gradient(self, node_data, nodata_value):
"""Make no-data nodes fixed gradient boundaries.
Set node status to :any:`FIXED_GRADIENT_BOUNDARY` for all nodes
whose value of *node_data* is equal to *nodata_value*.
Any links between :any:`FIXED_GRADIENT_BOUNDARY` nodes and
:any:`CORE_NODES` are automatically set to :any:`FIXED_LINK` boundary
status.
Parameters
----------
node_data : ndarray
Data values.
nodata_value : float
Value that indicates an invalid value.
Examples
--------
The following examples use this grid::
*--I--->*--I--->*--I--->*--I--->*--I--->*--I--->*--I--->*--I--->*
^ ^ ^ ^ ^ ^ ^ ^ ^
I I I X X X X X I
| | | | | | | | |
*--I--->*--I--->*--X--->o o o o o--X--->*
^ ^ ^ ^ ^ ^ ^ ^ ^
I I I | | | | | I
| | | | | | | | |
*--I--->*--I--->*--X--->o o o o o--X--->*
^ ^ ^ ^ ^ ^ ^ ^ ^
I I I X X X X X I
| | | | | | | | |
*--I--->*--I--->*--I--->*--I--->*--I--->*--I--->*--I--->*--I--->*
.. note::
Links set to :any:`ACTIVE_LINK` are not shown in this diagram.
``X`` indicates the links that are set to :any:`FIXED_LINK`
``I`` indicates the links that are set to :any:`INACTIVE_LINK`
``o`` indicates the nodes that are set to :any:`CORE_NODE`
``*`` indicates the nodes that are set to
:any:`FIXED_GRADIENT_BOUNDARY`
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 9))
>>> rmg.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int8)
>>> z = rmg.zeros(at='node')
>>> z = np.array([
... -99., -99., -99., -99., -99., -99., -99., -99., -99.,
... -99., -99., -99., 0., 0., 0., 0., 0., -99.,
... -99., -99., -99., 0., 0., 0., 0., 0., -99.,
... -99., -99., -99., -99., -99., -99., -99., -99., -99.])
>>> rmg.set_nodata_nodes_to_fixed_gradient(z, -99)
>>> rmg.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 0, 0, 0, 0, 0, 2,
2, 2, 2, 0, 0, 0, 0, 0, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2], dtype=int8)
>>> rmg.status_at_link # doctest: +NORMALIZE_WHITESPACE
array([4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 4,
4, 4, 2, 0, 0, 0, 0, 2, 4, 4, 4, 0, 0, 0, 0, 0, 4,
4, 4, 2, 0, 0, 0, 0, 2, 4, 4, 4, 2, 2, 2, 2, 2, 4,
4, 4, 4, 4, 4, 4, 4, 4])
LLCATS: BC NINF
"""
# Find locations where value equals the NODATA code and set these nodes
# as inactive boundaries.
nodata_locations = numpy.nonzero(node_data == nodata_value)
self._node_status[nodata_locations] = FIXED_GRADIENT_BOUNDARY
# Recreate the list of active cell IDs
self._update_links_nodes_cells_to_new_BCs()
@deprecated(use='map_max_of_link_nodes_to_link', version=1.0)
def max_of_link_end_node_values(self, node_data):
"""Maximum value at the end of links.
For each active link, finds and returns the maximum value of node_data
at either of the two ends. Use this, for example, if you want to find
the maximum value of water depth at linked pairs of nodes (by passing
in an array of water depth values at nodes).
Parameters
----------
node_data : ndarray
Values at grid nodes.
Returns
-------
ndarray :
Maximum values whose length is the number of active links.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4), spacing=(1., 1.))
>>> h = np.array([ 2., 2., 8., 0.,
... 8., 0., 3., 0.,
... 5., 6., 8., 3.])
>>> grid.max_of_link_end_node_values(h)
array([ 2., 8., 8., 3., 3., 6., 8.])
Note that this method is *deprecatd*. The alternative is to use
``map_max_of_link_nodes_to_link``.
>>> vals = grid.map_max_of_link_nodes_to_link(h)
>>> vals[grid.active_links]
array([ 2., 8., 8., 3., 3., 6., 8.])
LLCATS: DEPR LINF NINF CONN
"""
return numpy.maximum(node_data[self._activelink_fromnode],
node_data[self._activelink_tonode])
def _calc_numbers_of_node_neighbors(self):
"""Number of neighbor nodes.
Calculates the number of neighboring nodes for each node, and returns
the result as a 1D numpy array. Used to find the maximum number of
neighbors, so that inlink and outlink matrices can be dimensioned
accordingly. Assumes that self.number_of_nodes, self.node_at_link_tail,
and self.node_at_link_head have already been set up.
Algorithm works by simply looping through all links; for each, the
endpoints are neighbors of one another, so we increment the number of
neighbors for both the endpoint nodes.
"""
num_nbrs = numpy.zeros(self.number_of_nodes, dtype=int)
for link in range(self.number_of_links):
num_nbrs[self.node_at_link_tail[link]] += 1
num_nbrs[self.node_at_link_head[link]] += 1
return num_nbrs
def _create_active_faces(self):
self._active_faces = self.face_at_link[self.active_links]
return self._active_faces
@deprecated(use='no replacement', version=1.0)
def _setup_inlink_and_outlink_matrices(self):
"""Create data structured for number of inlinks and outlinks.
Creates data structures to record the numbers of inlinks and outlinks
for each node. An inlink of a node is simply a link that has the
node as its "to" node, and an outlink is a link that has the node
as its "from".
We store the inlinks in an NM-row by num_nodes-column matrix called
_node_inlink_matrix. NM is the maximum number of neighbors for any node.
We also keep track of the total number of inlinks and outlinks at each
node in the num_inlinks and num_outlinks arrays.
The inlink and outlink matrices are useful in numerical calculations.
Each row of each matrix contains one inlink or outlink per node. So, if
you have a corresponding "flux" matrix, you can map incoming or
outgoing fluxes onto the appropriate nodes. More information on this is
in the various calculate_flux_divergence... functions.
What happens if a given node does not have two inlinks or outlinks? We
simply put the default value -1 in this case. This allows us to use a
cute little trick when computing inflows and outflows. We make our
"flux" array one element longer than the number of links, with the last
element containing the value 0. Thus, any time we add an influx from
link number -1, Python takes the value of the last element in the
array, which is zero. By doing it this way, we maintain the efficiency
that comes with the use of numpy. Again, more info can be found in the
description of the flux divergence functions.
"""
# Find the maximum number of neighbors for any node
num_nbrs = self._calc_numbers_of_node_neighbors()
self.max_num_nbrs = numpy.amax(num_nbrs)
# Create active in-link and out-link matrices.
self._node_inlink_matrix = - numpy.ones(
(self.max_num_nbrs, self.number_of_nodes), dtype=numpy.int)
self._node_outlink_matrix = - numpy.ones(
(self.max_num_nbrs, self.number_of_nodes), dtype=numpy.int)
# Set up the inlink arrays
tonodes = self.node_at_link_head
self._node_numinlink = numpy.bincount(tonodes,
minlength=self.number_of_nodes)
counts = count_repeated_values(self.node_at_link_head)
for (count, (tonodes, link_ids)) in enumerate(counts):
self._node_inlink_matrix[count][tonodes] = link_ids
# Set up the outlink arrays
fromnodes = self.node_at_link_tail
self._node_numoutlink = numpy.bincount(fromnodes,
minlength=self.number_of_nodes)
counts = count_repeated_values(self.node_at_link_tail)
for (count, (fromnodes, link_ids)) in enumerate(counts):
self._node_outlink_matrix[count][fromnodes] = link_ids
@deprecated(use='no replacement', version=1.0)
def _setup_active_inlink_and_outlink_matrices(self):
"""Create data structures for number of active inlinks and outlinks.
Creates data structures to record the numbers of active inlinks and
active outlinks for each node. These data structures are equivalent to
the "regular" inlink and outlink matrices, except that it uses the IDs
of active links (only).
Examples
--------
>>> from landlab import HexModelGrid
>>> hg = HexModelGrid(3, 2)
>>> hg._node_numactiveinlink
array([0, 0, 0, 3, 1, 1, 1])
>>> hg._node_active_inlink_matrix2
array([[-1, -1, -1, 2, 6, 8, 9],
[-1, -1, -1, 3, -1, -1, -1],
[-1, -1, -1, 5, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]])
>>> hg._node_numactiveoutlink
array([1, 1, 1, 3, 0, 0, 0])
>>> hg._node_active_outlink_matrix2
array([[ 2, 3, 5, 6, -1, -1, -1],
[-1, -1, -1, 8, -1, -1, -1],
[-1, -1, -1, 9, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]])
"""
# Create active in-link and out-link matrices.
self._node_active_inlink_matrix = - numpy.ones(
(self.max_num_nbrs, self.number_of_nodes), dtype=numpy.int)
self._node_active_outlink_matrix = - numpy.ones(
(self.max_num_nbrs, self.number_of_nodes), dtype=numpy.int)
# Set up the inlink arrays
tonodes = self._activelink_tonode
self._node_numactiveinlink = as_id_array(numpy.bincount(
tonodes, minlength=self.number_of_nodes))
counts = count_repeated_values(self._activelink_tonode)
for (count, (tonodes, active_link_ids)) in enumerate(counts):
self._node_active_inlink_matrix[count][tonodes] = active_link_ids
# Set up the outlink arrays
fromnodes = self._activelink_fromnode
self._node_numactiveoutlink = as_id_array(numpy.bincount(
fromnodes, minlength=self.number_of_nodes))
counts = count_repeated_values(self._activelink_fromnode)
for (count, (fromnodes, active_link_ids)) in enumerate(counts):
self._node_active_outlink_matrix[count][fromnodes] = active_link_ids
# THE FOLLOWING IS MEANT TO REPLACE THE ABOVE CODE, USING LINK IDS
# FOR ACTIVE LINKS (ONLY), INSTEAD OF "ACTIVE LINK IDS". THE POINT IS
# TO HAVE JUST ONE ID/NUMBERING SYSTEM FOR LINKS, RATHER THAN A
# SEPARATE NUMBERING SYSTEM FOR ACTIVE LINKS
# GT JUNE 2015
# TODO: CLEAN THIS UP
# Create AN ALTERNATIVE VERSION OF active in-link and out-link
# matrices, WHICH WILL EVENTUALLY REPLACE THE ONE ABOVE (AND BE
# RENAMED TO GET RID OF THE "2")
# TODO: MAKE THIS CHANGE ONCE CODE THAT USES IT HAS BEEN PREPPED
self._node_active_inlink_matrix2 = - numpy.ones(
(self.max_num_nbrs, self.number_of_nodes), dtype=numpy.int)
self._node_active_outlink_matrix2 = - numpy.ones(
(self.max_num_nbrs, self.number_of_nodes), dtype=numpy.int)
# Set up the inlink arrays
tonodes = self.node_at_link_head[self.active_links]
self._node_numactiveinlink = as_id_array(numpy.bincount(
tonodes, minlength=self.number_of_nodes))
# OK, HERE WE HAVE TO MAKE A CHANGE, BECAUSE THE INDICES RETURNED BY
# count_repeated_values ARE "ACTIVE LINK INDICES", WHICH WE ARE NO
# LONGER USING. HAVE TO TURN THESE BACK INTO LINK IDS. I THINK WE CAN
# DO THIS BY CHANGING active_link_ids TO
# self.active_links[active_link_ids] BUT HAVEN'T MADE THIS CHANGE YET.
# NEED TO WORK THROUGH EXAMPLE 3,2 HMG
counts = count_repeated_values(
self.node_at_link_head[self.active_links])
for (count, (tonodes, active_link_ids)) in enumerate(counts):
self._node_active_inlink_matrix2[count][
tonodes] = self.active_links[active_link_ids]
# Set up the outlink arrays
fromnodes = self.node_at_link_tail[self.active_links]
self._node_numactiveoutlink = as_id_array(numpy.bincount(
fromnodes, minlength=self.number_of_nodes))
counts = count_repeated_values(self._activelink_fromnode)
for (count, (fromnodes, active_link_ids)) in enumerate(counts):
self._node_active_outlink_matrix2[count][
fromnodes] = self.active_links[active_link_ids]
def _create_link_unit_vectors(self):
"""Make arrays to store the unit vectors associated with each link.
Creates self.link_unit_vec_x and self.link_unit_vec_y. These contain,
for each link, the x and y components of the link's unit vector (that
is, the link's x and y dimensions if it were shrunk to unit length but
retained its orientation). The length of these arrays is the number of
links plus one. The last entry in each array is set to zero, and is
used to handle references to "link -1" (meaning, a non-existent link,
whose unit vector is (0,0)).
Also builds arrays to store the unit-vector component sums for each
node: node_unit_vector_sum_x and node_unit_vector_sum_y. These are
designed to be used when mapping link vector values to nodes (one takes
the average of the x- and y-components of all connected links).
Notes
-----
Creates the following:
* ``self.link_unit_vec_x``, ``self.link_unit_vec_y`` : ndarray
x and y components of unit vectors at each link (extra 0
entries @ end)
* ``self.node_vector_sum_x``, ``self.node_vector_sum_y`` : ndarray
Sums of x & y unit vector components for each node. Sum is over all
links connected to a given node.
Examples
--------
The example below is a seven-node hexagonal grid, with six nodes around
the perimeter and one node (#3) in the interior. There are four
horizontal links with unit vector (1,0), and 8 diagonal links with
unit vector (+/-0.5, +/-sqrt(3)/2) (note: sqrt(3)/2 ~ 0.866).
.. note::
This example assumes that the triangulation places links in a
certain order. Because the order is arbitrary, this might break on
different platforms. If that happens, the example needs to be
made generic somehow ...
>>> import landlab as ll
>>> hmg = ll.HexModelGrid(3, 2, 2.0)
>>> hmg.link_unit_vec_x # doctest: +NORMALIZE_WHITESPACE
array([ 1. , -0.5, 0.5, -0.5, 0.5, 1. , 1. , 0.5, -0.5, 0.5, -0.5,
1. , 0. ])
>>> hmg.link_unit_vec_y
array([ 0. , 0.8660254, 0.8660254, 0.8660254, 0.8660254,
0. , 0. , 0.8660254, 0.8660254, 0.8660254,
0.8660254, 0. , 0. ])
>>> hmg.node_unit_vector_sum_x
array([ 2., 2., 2., 4., 2., 2., 2.])
>>> hmg.node_unit_vector_sum_y
array([ 1.73205081, 1.73205081, 1.73205081, 3.46410162, 1.73205081,
1.73205081, 1.73205081])
"""
# Create the arrays for unit vectors for each link. These each get an
# additional array element at the end with the value zero. This allows
# any references to "link ID -1" in the _node_inlink_matrix and
# _node_outlink_matrix to refer to the zero value in this extra element,
# so that when we're summing up link unit vectors, or multiplying by a
# nonexistent unit vector, we end up just treating these as zero.
self._link_unit_vec_x = numpy.zeros(self.number_of_links + 1)
self._link_unit_vec_y = numpy.zeros(self.number_of_links + 1)
# Calculate the unit vectors using triangle similarity and the
# Pythagorean Theorem.
dx = self.node_x[self.node_at_link_head] - \
self.node_x[self.node_at_link_tail]
dy = self.node_y[self.node_at_link_head] - \
self.node_y[self.node_at_link_tail]
self._link_unit_vec_x[:self.number_of_links] = dx / self.length_of_link
self._link_unit_vec_y[:self.number_of_links] = dy / self.length_of_link
# While we're at it, calculate the unit vector sums for each node.
# These will be useful in averaging link-based vectors at the nodes.
self._node_unit_vector_sum_x = numpy.zeros(self.number_of_nodes)
self._node_unit_vector_sum_y = numpy.zeros(self.number_of_nodes)
max_num_inlinks_per_node = numpy.size(self._node_inlink_matrix, 0)
for i in range(max_num_inlinks_per_node):
self._node_unit_vector_sum_x += abs(
self._link_unit_vec_x[self._node_inlink_matrix[i, :]])
self._node_unit_vector_sum_y += abs(
self._link_unit_vec_y[self._node_inlink_matrix[i, :]])
self._node_unit_vector_sum_x += abs(
self._link_unit_vec_x[self._node_outlink_matrix[i, :]])
self._node_unit_vector_sum_y += abs(
self._link_unit_vec_y[self._node_outlink_matrix[i, :]])
@property
def unit_vector_xcomponent_at_link(self):
"""Get array of x-component of unit vector for links.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 3))
>>> len(grid.unit_vector_xcomponent_at_link) == grid.number_of_links + 1
True
>>> grid.unit_vector_xcomponent_at_link # doctest: +NORMALIZE_WHITESPACE
array([ 1., 1., 0., 0., 0.,
1., 1., 0., 0., 0., 1., 1., 0.])
LLCATS: LINF MEAS
"""
if self._link_unit_vec_x is None:
self._create_link_unit_vectors()
return self._link_unit_vec_x
@property
@deprecated(use='unit_vector_xcomponent_at_link', version='0.5')
def link_unit_vec_x(self):
"""
LLCATS: DEPR LINF MEAS
"""
return self.unit_vector_xcomponent_at_link
@property
def unit_vector_ycomponent_at_link(self):
"""Get array of y-component of unit vector for links.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 3))
>>> len(grid.unit_vector_ycomponent_at_link) == grid.number_of_links + 1
True
>>> grid.unit_vector_ycomponent_at_link # doctest: +NORMALIZE_WHITESPACE
array([ 0., 0., 1., 1., 1.,
0., 0., 1., 1., 1., 0., 0., 0.])
LLCATS: LINF MEAS
"""
if self._link_unit_vec_y is None:
self._create_link_unit_vectors()
return self._link_unit_vec_y
@property
@deprecated(use='unit_vector_xcomponent_at_link', version='0.5')
def link_unit_vec_y(self):
"""
LLCATS: DEPR LINF MEAS
"""
return self.unit_vector_ycomponent_at_link
@property
def unit_vector_sum_xcomponent_at_node(self):
"""Get array of x-component of unit vector sums at each node.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 3))
>>> len(grid.unit_vector_sum_xcomponent_at_node) == grid.number_of_nodes
True
>>> grid.unit_vector_sum_xcomponent_at_node
array([ 1., 2., 1., 1., 2., 1., 1., 2., 1.])
LLCATS: NINF MEAS
"""
if self._node_unit_vector_sum_x is None:
self._create_link_unit_vectors()
return self._node_unit_vector_sum_x
@property
@deprecated(use='unit_vector_sum_xcomponent_at_node', version='0.5')
def node_unit_vector_sum_x(self):
"""
LLCATS: DEPR NINF MEAS
"""
return self.unit_vector_sum_xcomponent_at_node
@property
def unit_vector_sum_ycomponent_at_node(self):
"""Get array of y-component of unit vector sums at each node.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 3))
>>> len(grid.unit_vector_sum_ycomponent_at_node) == grid.number_of_nodes
True
>>> grid.unit_vector_sum_ycomponent_at_node
array([ 1., 1., 1., 2., 2., 2., 1., 1., 1.])
LLCATS: NINF MEAS
"""
if self._node_unit_vector_sum_y is None:
self._create_link_unit_vectors()
return self._node_unit_vector_sum_y
@property
@deprecated(use='unit_vector_sum_ycomponent_at_node', version='0.5')
def node_unit_vector_sum_y(self):
"""
LLCATS: DEPR NINF MEAS
"""
return self.unit_vector_sum_ycomponent_at_node
def map_link_vector_to_nodes(self, q):
r"""Map data defined on links to nodes.
Given a variable defined on links, breaks it into x and y components
and assigns values to nodes by averaging each node's attached links.
Parameters
----------
q : ndarray of floats (1D, length = number of links in grid)
Variable defined on links
Returns
-------
ndarray, ndarray
x and y components of variable mapped to nodes (1D,
length = number of nodes)
See Also
--------
_create_link_unit_vectors : sets up unit vectors at links and unit-vector
sums at nodes
Notes
-----
THIS ALGORITHM IS NOT CORRECT AND NEEDS TO BE CHANGED!
The concept here is that q contains a vector variable that is defined
at each link. The magnitude is given by the value of q, and the
direction is given by the orientation of the link, as described by
its unit vector.
To map the link-vector values to the nodes, we break the values into
x- and y-components according to each link's unit vector. The
x-component of q at a node is a weighted sum of the x-components of the
links that are attached to that node. A good way to appreciate this
is by example. Consider a 3x4 raster grid::
8--14---9--15--10--16--11
| | | |
4 5 6 7
| | | |
4--11---5---12--6---13--7
| | | |
0 1 2 3
| | | |
0---8---1---9---2--10---3
Imagine that for each node, we were to add up the unit vector
components for each connected link; in other words, add up all the x
components of the unit vectors associated with each link, and add up
all the y components. Here's what that would look like for the above
grid ("vsx" and "vsy" stand for "vector sum x" and "vector sum y"):
* Corner nodes (0, 3, 8, 11): vsx = 1, vsy = 1
* Bottom and top nodes (1-2, 9-10): vsx = 2, vsy = 1
* Left and right nodes (4, 7): vsx = 1, vsy = 2
* All others: vsx = 2, vsy = 2
The process of creating unit-vector sums at nodes is handled by
ModelGrid._create_link_unit_vectors() (and, for raster grids, by the
overriding method RasterModelGrid._create_link_unit_vectors()). The node
unit-vector sums are then stored in self.node_unit_vector_sum_x and
self.node_unit_vector_sum_y.
How would you use this? Suppose you have a vector variable q defined at
links. What's the average at the nodes? We'll define the average as
follows. The terminology here is: :math:`q = (u,v)` represents the
vector quantity defined at links, :math:`Q = (U,V)` represents its
definition at nodes, :math:`(m,n)` represents the unit vector
components at a link, and :math:`(S_x,S_y)` represents the unit-vector
sum at a given node.
.. math::
U_i = \sum_{j=1}^{L_i} q_j m_j / S_{xi}
V_i = \sum_{j=1}^{L_i} q_j n_j / S_{yi}
Suppose that the vector q is uniform and equal to one.
Then, at node 0 in the above grid, this works out to::
U_0 = (q_0 m_0) / 1 + (q_8 m_8) / 1 = (1 0)/ 1 + (1 1)/1 = 1
V_0 = (q_0 n_0) / 1 + (q_8 n_8) / 1 = (1 1) / 1 + (1 0) / 1 = 1
At node 1, in the bottom row but not a corner, we add up the values
of **q** associated with THREE links. The x-vector sum of these links
is 2 because there are two horizontal links, each with an x- unit
vector value of unity. The y-vector sum is 1 because only one of the
three (link #1) has a non-zero y component (equal to one). Here is
how the numbers work out::
U_1 = (q_1 m_1) / 2 + (q_8 m_8) / 2 + (q_9 m_9) / 2
= (1 0) / 2 + (1 1) / 2 + (1 1) / 2 = 1
V_1 = (q_1 n_1) / 1 + (q_8 n_8) / 1 + (q_9 n_9) / 1
= (1 1) / 1 + (1 0) / 1 + (1 0) / 1 = 1
At node 5, in the interior, there are four connected links (two
in-links and two out-links; two horizontal and two vertical). So, we
add up the q values associated with all four::
U_5 = (q_1 m_1) / 2 + (q_5 m_5) / 2 + (q_11 m_11) / 2 + (q_12 m_12) / 2
= (1 0) / 2 + (1 0) / 2 + (1 1) / 2 + (1 1) / 2 = 1
V_5 = (q_1 n_1) / 2 + (q_5 n_5) / 2 + (q_11 n_11) / 2 + (q_12 n_12) / 2
= (1 1) / 2 + (1 1) / 2 + (1 0) / 2 + (1 0) / 2 = 1
To do this calculation efficiently, we use the following algorithm::
FOR each row in _node_inlink_matrix (representing one inlink @ each
node)
Multiply the link's q value by its unit x component ...
... divide by node's unit vector sum in x ...
... and add it to the node's total q_x
Multiply the link's q value by its unit y component ...
... divide by node's unit vector sum in y ...
... and add it to the node's total q_y
Examples
--------
**Example 1**
q[:] = 1. Vector magnitude is :math:`\sqrt{2}`, direction is
:math:`(1,1)`.
>>> import numpy as np
>>> import landlab as ll
>>> rmg = ll.RasterModelGrid((3, 4), spacing=(2., 2.))
>>> rmg.node_unit_vector_sum_x
array([ 1., 2., 2., 1., 1., 2., 2., 1., 1., 2., 2., 1.])
>>> rmg.node_unit_vector_sum_y
array([ 1., 1., 1., 1., 2., 2., 2., 2., 1., 1., 1., 1.])
>>> q = np.ones(rmg.number_of_links)
>>> nvx, nvy = rmg.map_link_vector_to_nodes(q)
>>> nvx
array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
>>> nvy
array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
**Example 2**
Vector magnitude is 5, angle is 30 degrees from horizontal,
forming a 3-4-5 triangle.
>>> q = np.array([4., 4., 4., 3., 3., 3., 3.,
... 4., 4., 4., 3., 3., 3., 3.,
... 4., 4., 4])
>>> nvx, nvy = rmg.map_link_vector_to_nodes(q)
>>> nvx
array([ 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4., 4.])
>>> nvy
array([ 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3., 3.])
..todo::
Fix and finish example 3 below.
Example 3: Hexagonal grid with vector as above. Here, q is
pre-calculated to have the right values to represent a uniform
vector with magnitude 5 and orientation 30 degrees counter-clockwise
from horizontal.
LLCATS: NINF LINF CONN MAP
"""
# Create the arrays to hold the node-based values of the x and y
# components of the vector (q)
node_vec_x = numpy.zeros(self.number_of_nodes)
node_vec_y = numpy.zeros(self.number_of_nodes)
# Break the link-based vector input variable, q, into x- and
# y-components.
# Notes:
# 1) We make the arrays 1 element longer than the number of links,
# so that references to -1 in the node-link matrices will refer
# to the last element of these two arrays, which will contain
# zeros. (Same trick as in the flux divergence functions)
# 2) This requires memory allocation. Because this function might be
# called repeatedly, it would be good to find a way to
# pre-allocate to improve speed.
qx = numpy.zeros(self.number_of_links + 1)
qy = numpy.zeros(self.number_of_links + 1)
qx[:self.number_of_links] = q * \
self.link_unit_vec_x[:self.number_of_links]
qy[:self.number_of_links] = q * \
self.link_unit_vec_y[:self.number_of_links]
# Loop over each row in the _node_inlink_matrix and _node_outlink_matrix.
# This isn't a big loop! In a raster grid, these have only two rows
# each; in an unstructured grid, it depends on the grid geometry;
# for a hex grid, there are up to 6 rows.
n_matrix_rows = numpy.size(self._node_inlink_matrix, 0)
for i in range(n_matrix_rows):
node_vec_x += qx[self._node_inlink_matrix[i, :]]
node_vec_x += qx[self._node_outlink_matrix[i, :]]
node_vec_y += qy[self._node_inlink_matrix[i, :]]
node_vec_y += qy[self._node_outlink_matrix[i, :]]
node_vec_x /= self.node_unit_vector_sum_x
node_vec_y /= self.node_unit_vector_sum_y
return node_vec_x, node_vec_y
@deprecated(use='plot.imshow_grid', version=1.0)
def display_grid(self, draw_voronoi=False):
"""Display the grid.
LLCATS: DEPR GINF
"""
import matplotlib.pyplot as plt
# Plot nodes, colored by boundary vs interior
plt.plot(self.node_x[self.core_nodes],
self.node_y[self.core_nodes], 'go')
plt.plot(self.node_x[self.boundary_nodes],
self.node_y[self.boundary_nodes], 'ro')
# Draw links
for i in range(self.number_of_links):
plt.plot([self.node_x[self.node_at_link_tail[i]],
self.node_x[self.node_at_link_head[i]]],
[self.node_y[self.node_at_link_tail[i]],
self.node_y[self.node_at_link_head[i]]], 'k-')
# Draw active links
for link in self._active_links:
plt.plot([self.node_x[self.node_at_link_tail[link]],
self.node_x[self.node_at_link_head[link]]],
[self.node_y[self.node_at_link_tail[link]],
self.node_y[self.node_at_link_head[link]]], 'g-')
# If caller asked for a voronoi diagram, draw that too
if draw_voronoi:
from scipy.spatial import Voronoi, voronoi_plot_2d
pts = numpy.zeros((self.number_of_nodes, 2))
pts[:, 0] = self.node_x
pts[:, 1] = self.node_y
vor = Voronoi(pts)
voronoi_plot_2d(vor)
plt.show()
@deprecated(use='node_is_boundary', version=1.0)
def is_boundary(self, ids, boundary_flag=None):
"""
LLCATS: DEPR NINF BC
"""
return self.node_is_boundary(ids, boundary_flag=boundary_flag)
def node_is_boundary(self, ids, boundary_flag=None):
"""Check if nodes are boundary nodes.
Check if nodes at given *ids* are boundary nodes. Use the
*boundary_flag* to specify a particular boundary type status flag.
Parameters
----------
ids : ndarray
Node IDs to check.
boundary_flag : int, optional
A boundary type to check for.
Returns
-------
ndarray
Array of booleans indicating if nodes are boundary nodes.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> mg = RasterModelGrid((4, 5))
>>> mg.node_is_boundary([0, 6])
array([ True, False], dtype=bool)
>>> mg.node_is_boundary([0, 6], boundary_flag=CLOSED_BOUNDARY)
array([False, False], dtype=bool)
LLCATS: NINF BC
"""
if boundary_flag is None:
return ~ (self._node_status[ids] == CORE_NODE)
else:
return self._node_status[ids] == boundary_flag
def _assign_boundary_nodes_to_grid_sides(self):
"""Assign boundary nodes to a quadrant.
For each boundary node, determines whether it belongs to the left,
right, top or bottom of the grid, based on its distance from the grid's
centerpoint (mean (x,y) position). Returns lists of nodes on each of
the four grid sides. Assumes self.status_at_node, self.number_of_nodes,
self.boundary_nodes, self._node_x, and self._node_y have been
initialized.
Returns
-------
tuple of array_like
Tuple of nodes in each coordinate. Nodes are grouped as
(*east*, *north*, *west*, *south*).
Examples
--------
>>> import landlab as ll
>>> m = ll.HexModelGrid(5, 3, 1.0)
>>> [r,t,l,b] = m._assign_boundary_nodes_to_grid_sides()
>>> l
array([ 7, 12, 3])
>>> r
array([11, 15, 6])
>>> t
array([16, 18, 17])
>>> b
array([0, 2, 1])
"""
# Calculate x and y distance from centerpoint
diff_x = self.node_x[self.boundary_nodes] - numpy.mean(self.node_x)
diff_y = self.node_y[self.boundary_nodes] - numpy.mean(self.node_y)
return _sort_points_into_quadrants(diff_x, diff_y, self.boundary_nodes)
@deprecated(use='status_at_node', version=1.0)
def set_closed_nodes(self, nodes):
"""Make nodes closed boundaries.
Sets the given nodes' boundary condition statuses to CLOSED_BOUNDARY
(==4), and resets the list of active links to reflect any changes.
LLCATS: DEPR NINF BC
"""
self._node_status[nodes] = CLOSED_BOUNDARY
self._update_links_nodes_cells_to_new_BCs()
@deprecated(use='calc_distances_of_nodes_to_point', version=1.0)
def get_distances_of_nodes_to_point(self, coord, get_az=None,
node_subset=None,
out_distance=None, out_azimuth=None):
"""
LLCATS: DEPR NINF MEAS
"""
return self.calc_distances_of_nodes_to_point(
coord, get_az=get_az, node_subset=node_subset,
out_distance=out_distance, out_azimuth=out_azimuth)
def calc_distances_of_nodes_to_point(self, coord, get_az=None,
node_subset=None,
out_distance=None, out_azimuth=None):
"""Get distances for nodes to a given point.
Returns an array of distances for each node to a provided point.
If "get_az" is set to 'angles', returns both the distance array and an
array of azimuths from up/north. If it is set to 'displacements', it
returns the azimuths as a 2xnnodes array of x and y displacements.
If it is not set, returns just the distance array.
If "node_subset" is set as an ID, or list/array/etc of IDs method
returns just the distance (and optionally azimuth) for that node.
Point is provided as a tuple (x,y).
If out_distance (& out_azimuth) are provided, these arrays are used to
store the outputs. This is recommended for memory management reasons if
you are working with node subsets.
.. note::
Angles are returned in radians but measured clockwise from
north.
Parameters
----------
coord : tuple of float
Coodinates of point as (x, y).
get_az: {None, 'angles', 'displacements'}, optional
Optionally calculate azimuths as either angles or displacements.
The calculated values will be returned along with the distances
as the second item of a tuple.
node_subset : array_like, optional
Calculate distances on a subset of grid nodes. The default is to
calculate distances from the provided points to all nodes.
out_distance : array_like, optional
If provided, put the calculated distances here. Otherwise,
create a new array.
out_azimuth : array_like, optional
If provided, put the calculated distances here. Otherwise,
create a new array.
Returns
-------
ndarray or tuple of ndarray
If *get_az* is ``None`` return the array of distances. Otherwise,
return a tuple of distances and azimuths.
Notes
-----
Once you start working with node subsets in Landlab, which can change
size between loops, it's quite possible for Python's internal memory
management to crap out after large numbers of loops (~>10k). This is
to do with the way it block allocates memory for arrays of differing
lengths, then cannot free this memory effectively.
The solution - as implemented here - is to pre-allocate all arrays as
nnodes long, then only work with the first [len_subset] entries by
slicing (in a pseudo-C-style). Care has to be taken not to
"accidentally" allow Python to allocate a new array you don't have
control over.
Then, to maintain efficient memory allocation, we create some "dummy"
nnode-long arrays to store intermediate parts of the solution in.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
Calculate distances from point at (2., 1.) to a subset of nodes on
the grid.
>>> grid.calc_distances_of_nodes_to_point((2, 1),
... node_subset=(2, 6, 7, 8, 12))
array([ 1., 1., 0., 1., 1.])
Calculate distances from a point to all nodes on the grid.
>>> dist = grid.calc_distances_of_nodes_to_point((2, 1))
>>> dist.shape == (grid.number_of_nodes, )
True
>>> dist.take((2, 6, 7, 8, 12))
array([ 1., 1., 0., 1., 1.])
Put the distances into a buffer.
>>> out = np.empty(grid.number_of_nodes, dtype=float)
>>> dist = grid.calc_distances_of_nodes_to_point((2, 1),
... out_distance=out)
>>> out is dist
True
>>> out.take((2, 6, 7, 8, 12))
array([ 1., 1., 0., 1., 1.])
Calculate azimuths along with distances. The azimuths are calculated
in radians but measured clockwise from north.
>>> (_, azim) = grid.calc_distances_of_nodes_to_point((2, 1),
... get_az='angles')
>>> azim.take((2, 6, 7, 8, 12)) * 180. / np.pi
array([ 180., 270., 0., 90., 0.])
>>> (_, azim) = grid.calc_distances_of_nodes_to_point((2, 1),
... get_az='angles', node_subset=(1, 3, 11, 13))
>>> azim * 180. / np.pi
array([ 225., 135., 315., 45.])
When calculating displacements, the first row contains displacements
in x and the second displacements in y.
>>> (_, azim) = grid.calc_distances_of_nodes_to_point((2, 1),
... get_az='displacements', node_subset=(2, 6, 7, 8, 12))
>>> azim
array([[ 0., -1., 0., 1., 0.],
[-1., 0., 0., 0., 1.]])
LLCATS: NINF MEAS
"""
if len(coord) != 2:
raise ValueError('coordinate must iterable of length 2')
if get_az not in (None, 'displacements', 'angles'):
raise ValueError('get_az not understood')
if node_subset is not None and numpy.any(numpy.isnan(node_subset)):
node_subset = None
if node_subset is not None:
if not isinstance(node_subset, numpy.ndarray):
node_subset = numpy.array(node_subset)
node_subset = node_subset.reshape((-1, ))
len_subset = node_subset.size
else:
len_subset = self.number_of_nodes
if out_distance is None:
out_distance = numpy.empty(len_subset, dtype=numpy.float)
if out_distance.size != len_subset:
raise ValueError('output array size mismatch for distances')
if get_az is not None:
if get_az == 'displacements':
az_shape = (2, len_subset)
else:
az_shape = (len_subset, )
if out_azimuth is None:
out_azimuth = numpy.empty(az_shape, dtype=numpy.float)
if out_azimuth.shape != az_shape:
raise ValueError('output array mismatch for azimuths')
azimuths_as_displacements = numpy.empty((2, self.number_of_nodes))
dummy_nodes_1 = numpy.empty(self.number_of_nodes)
dummy_nodes_2 = numpy.empty(self.number_of_nodes)
dummy_nodes_3 = numpy.empty(self.number_of_nodes)
if node_subset is None:
azimuths_as_displacements[0] = (self.node_x - coord[0])
azimuths_as_displacements[1] = (self.node_y - coord[1])
else:
azimuths_as_displacements[0, :len_subset] = (
self.node_x[node_subset] - coord[0])
azimuths_as_displacements[1, :len_subset] = (
self.node_y[node_subset] - coord[1])
numpy.square(azimuths_as_displacements[0, :len_subset],
out=dummy_nodes_1[:len_subset])
numpy.square(azimuths_as_displacements[1, :len_subset],
out=dummy_nodes_2[:len_subset])
numpy.add(dummy_nodes_1[:len_subset], dummy_nodes_2[:len_subset],
out=dummy_nodes_3[:len_subset])
numpy.sqrt(dummy_nodes_3[:len_subset], out=out_distance)
if get_az:
if get_az == 'displacements':
out_azimuth[:] = azimuths_as_displacements[:, :len_subset]
elif get_az == 'angles':
numpy.arctan2(
azimuths_as_displacements[0, :len_subset],
azimuths_as_displacements[1, :len_subset],
out=out_azimuth[:len_subset])
less_than_zero = numpy.empty(self.number_of_nodes, dtype=bool)
numpy.less(out_azimuth, 0., out=less_than_zero[:len_subset])
out_azimuth[less_than_zero[:len_subset]] += 2. * numpy.pi
return out_distance, out_azimuth
else:
return out_distance
@property
def all_node_distances_map(self):
"""Get distances from every node to every other node.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> distances = grid.all_node_distances_map
The shape of the array is ``number_of_nodes`` by ``number_of_nodes``
and distance from a node to itself is zero.
>>> distances.shape == (grid.number_of_nodes, grid.number_of_nodes)
True
>>> distances.diagonal()
array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
The distances from the first node to all nodes in its row and all the
nodes in its column.
>>> distances[0, :4]
array([ 0., 1., 2., 3.])
>>> distances[0, ::4]
array([ 0., 1., 2.])
LLCATS: NINF MEAS
"""
if self._all_node_distances_map is None:
self._create_all_node_distances_azimuths_maps()
return self._all_node_distances_map
@property
def all_node_azimuths_map(self):
"""Get azimuths from every node to every other node.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> angles = grid.all_node_azimuths_map
The shape of the array is ``number_of_nodes`` by ``number_of_nodes``
and azimuth from a node to itself is zero.
>>> angles.shape == (grid.number_of_nodes, grid.number_of_nodes)
True
>>> angles.diagonal()
array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
Angles are measured in radians and increase clockwise starting at
north.
>>> angles *= 180. / np.pi
>>> angles[0, :4]
array([ 0., 90., 90., 90.])
>>> angles[0, ::4]
array([ 0., 0., 0.])
>>> angles[0, ::5]
array([ 0., 45., 45.])
LLCATS: NINF MEAS
"""
if self._all_node_azimuths_map is None:
self._create_all_node_distances_azimuths_maps()
return self._all_node_azimuths_map
def _create_all_node_distances_azimuths_maps(self):
"""Build distance-azimuth maps.
This function creates and stores in the grid field two ``nnodes`` by
``nnodes`` arrays that map the distances and azimuths of all nodes
in the grid to all nodes in the grid.
This is useful if your module needs to make repeated lookups of
distances between the same nodes, but does potentially use up a lot
of memory so should be used with caution.
The map is symmetrical, so it does not matter whether rows are
"from" or "to".
The arrays are called:
- ``self.all_node_distances_map``
- ``self.all_node_azimuths_map``
Returns
-------
tuple of ndarrays
Tuple of (distances, azimuths)
"""
self._all_node_distances_map = numpy.empty((self.number_of_nodes,
self.number_of_nodes))
self._all_node_azimuths_map = numpy.empty((self.number_of_nodes,
self.number_of_nodes))
node_coords = numpy.empty((self.number_of_nodes, 2))
node_coords[:, 0] = self.node_x
node_coords[:, 1] = self.node_y
for i in range(self.number_of_nodes):
(self._all_node_distances_map[i, :],
self._all_node_azimuths_map[i, :]) = (
self.calc_distances_of_nodes_to_point(
(node_coords[i, 0], node_coords[i, 1]), get_az='angles'))
assert numpy.all(self._all_node_distances_map >= 0.)
return self._all_node_distances_map, self._all_node_azimuths_map
def _sort_links_by_midpoint(self):
"""Sort links in order first by midpoint x coordinate, then y.
Examples
--------
>>> from landlab import HexModelGrid
>>> hg = HexModelGrid(3, 3)
>>> hg._sort_links_by_midpoint()
"""
pts = np.zeros((self.number_of_links, 2))
pts[:, 0] = (self.node_x[self.node_at_link_tail] +
self.node_x[self.node_at_link_head]) / 2
pts[:, 1] = (self.node_y[self.node_at_link_tail] +
self.node_y[self.node_at_link_head]) / 2
indices = argsort_points_by_x_then_y(pts)
self.node_at_link_tail[:] = self.node_at_link_tail[indices]
self.node_at_link_head[:] = self.node_at_link_head[indices]
def move_origin(self, origin):
"""Changes the x, y values of all nodes. Initially a grid will have
an origin of 0,0, and all x,y values will be relative to 0,0. This
will add origin[0] to all x values and origin[1] to all y values.
Note this is most likely useful when importing a DEM that has an
absolute location, however it can be used generally.
Parameters
----------
origin : list of two float values, can be negative.
[x,y], where x is the value to add to all x values and
y is the value to add to all y values
Examples
--------
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 3), 1.0) # rows, columns, spacing
>>> rmg.node_x
array([ 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2.])
>>> rmg.node_y
array([ 0., 0., 0., 1., 1., 1., 2., 2., 2., 3., 3., 3.])
>>> rmg.move_origin((5,1.5))
>>> rmg.node_x
array([ 5., 6., 7., 5., 6., 7., 5., 6., 7., 5., 6., 7.])
>>> rmg.node_y
array([ 1.5, 1.5, 1.5, 2.5, 2.5, 2.5, 3.5, 3.5, 3.5, 4.5, 4.5,
4.5])
LLCATS: GINF MEAS
"""
self._node_x += origin[0]
self._node_y += origin[1]
add_module_functions_to_class(ModelGrid, 'mappers.py', pattern='map_*')
# add_module_functions_to_class(ModelGrid, 'gradients.py',
# pattern='calculate_*')
add_module_functions_to_class(ModelGrid, 'gradients.py', pattern='calc_*')
add_module_functions_to_class(ModelGrid, 'divergence.py', pattern='calc_*')
if __name__ == '__main__':
import doctest
doctest.testmod()
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.