repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
hsuantien/scikit-learn
|
sklearn/linear_model/logistic.py
|
105
|
56686
|
"""
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (as_float_array, DataConversionWarning,
check_X_y)
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg and lbfgs solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=True,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when logistic_regression_path
is called repeatedly with the same data, as y is modified
along the path.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
_, n_features = X.shape
check_consistent_length(X, y)
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
sample_weight = np.ones(X.shape[0])
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = [-1, 1]
mask = (y == pos_class)
y[mask] = 1
y[~mask] = -1
# To take care of object dtypes, i.e 1 and -1 are in the form of
# strings.
y = as_float_array(y, copy=False)
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
for C in Cs:
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0 = newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter,
tol=tol)
elif solver == 'liblinear':
coef_, intercept_, _, = _fit_liblinear(
X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0)
return coefs, np.array(Cs)
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, copy=True, intercept_scaling=1.,
multi_class='ovr'):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when ``_log_reg_scoring_path`` is called
repeatedly with the same data, as y is modified along the path.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test[mask] = 1
y_test[~mask] = -1
# To deal with object dtypes, we need to convert into an array of floats.
y_test = as_float_array(y_test, copy=False)
coefs, Cs = logistic_regression_path(X_train, y_train, Cs=Cs,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
copy=copy, pos_class=pos_class,
multi_class=multi_class,
tol=tol, verbose=verbose,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores)
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
max_iter : int
Useful only for the newton-cg and lbfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : int
Maximum of the actual number of iterations across all classes.
Valid only for the liblinear solver.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
See also
--------
sklearn.linear_model.SGDClassifier
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
self.classes_ = np.unique(y)
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state)
return self
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
for ind, class_ in enumerate(classes_):
coef_, _ = logistic_regression_path(
X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight)
self.coef_.append(coef_[0])
self.coef_ = np.squeeze(self.coef_)
# For the binary case, this get squeezed to a 1-D array.
if self.coef_.ndim == 1:
self.coef_ = self.coef_[np.newaxis, :]
self.coef_ = np.asarray(self.coef_)
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return self._predict_proba_lr(X)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg or
LBFGS optimizer. The newton-cg and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr'):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, dtype=None)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in ['balanced', 'auto']):
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
path_func = delayed(_log_reg_scoring_path)
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
else:
coefs_paths, Cs, scores = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1))
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
|
bsd-3-clause
|
Akshay0724/scikit-learn
|
examples/datasets/plot_iris_dataset.py
|
36
|
1929
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
|
bsd-3-clause
|
RockRaidersInc/ROS-Main
|
vision/neural_nets/p1_train_svm.py
|
1
|
6990
|
import numpy as np
from sklearn.svm import LinearSVC
from sklearn.exceptions import ConvergenceWarning
from functools import reduce
import warnings
from confusion_mat_tools import save_confusion_matrix
def read_dataset(filename):
# The dataset features are stored in a .npz file, np.load will give us a dictionary-like object with all the features
train_set_raw = np.load(filename)
features = []
labels = []
file_names = []
for key in train_set_raw:
features.append(train_set_raw[key])
labels.append(key.split("/")[2])
file_names.append(key)
return labels, features, file_names
def get_single_var_confusion(train_labels, train_features, test_labels, test_features, target_class, c_vals, test_file_names=None):
"""
Trains a SVM for each passed value of C (in c_vals), finds the best one, and returns a single row of the
confusion matrix (for target_class).
"""
# first find the best value of C. Do this by splitting the training data up into a training set and validation set
print("now finding the vest value of C for class " + target_class)
def train_and_test(C):
# divide the train set up into a new training and validation set (for testing different values of C)
n_train_features = int(len(train_features) * 0.8)
new_train_features = train_features[:n_train_features]
new_train_labels = train_labels[:n_train_features]
new_validation_features = train_features[n_train_features:]
new_validation_labels = train_labels[n_train_features:]
# now train a SVM with the new train and validation sets
clf = train_svm(target_class, new_train_features, new_train_labels, C)
correct_ratio, predictions = test_svm(clf, target_class, new_validation_features, new_validation_labels)
print("\twith C=%1.1f: labeled %3.5f%% of validation set accurately" % (C, correct_ratio * 100))
return correct_ratio, C
# train an evaluate an SVM for every value of c (the list defined at the start of the file)
all_svm_results = list(map(train_and_test, c_vals))
_, best_c = max(all_svm_results, key=lambda x: x[0])
# now train a new SVM on all the training data with the best value of C
clf = train_svm(target_class, train_features, train_labels, best_c)
best_accuracy, best_predictions = test_svm(clf, target_class, test_features, test_labels)
# print out the best results:
print("best SVM: got %3.1f%% of predictions correct on test data with C=%1.1f" % (best_accuracy * 100, best_c))
# optionally print out the file names of a false positive and a false negative
if test_file_names is not None:
bool_test_labels = np.array([1 if i == target_class else -1 for i in test_labels])
false_positives = (best_predictions != bool_test_labels) * (bool_test_labels == -1)
false_negatives = (best_predictions != bool_test_labels) * (bool_test_labels == 1)
print("false positive:", test_file_names[np.nonzero(false_positives)[0][0]])
print("false negative:", test_file_names[np.nonzero(false_negatives)[0][0]])
print() # a newline to separate classes
# put together a row of the confusion matrix
confusion_dict = {}
for i in range(len(test_labels)):
if best_predictions[i] == 1: # if the SVM predicted the right value
if test_labels[i] in confusion_dict:
confusion_dict[test_labels[i]] += 1
else:
confusion_dict[test_labels[i]] = 1
return confusion_dict
def test_svm(clf, target_class, test_features, test_labels):
"""
Evaluates the passed SVM on the passed data. The proportion of test vectors it correctly labeled and
a matrix of predicted labels are returned.
"""
test_predictions = clf.predict(test_features)
# will be 1 where test_predictions and processed_test_labels are the same and 0 everywhere else
test_labels = np.array([1 if i == target_class else -1 for i in test_labels])
correct_predictions = test_predictions == test_labels
correct_ratio = correct_predictions.mean()
return correct_ratio, test_predictions
def train_svm(target_class, train_features, train_labels, c):
"""
Trains and returns an SVM on the passed data and passed value of c.
"""
processed_train_labels = np.array([1 if i == target_class else -1 for i in train_labels])
clf = LinearSVC(C=c, random_state=1) # random_state=1 is important, this keeps results from varrying across runs
clf.fit(train_features, processed_train_labels)
return clf
def main(c_vals=(1,)):
global test_file_names
# load the datasets
train_labels, train_features, _ = read_dataset("image_descriptors_train.npz")
test_labels, test_features, test_file_names = read_dataset("image_descriptors_test.npz")
# make a python set with all the class names ("redcarpet", "grass", ect)
all_class_labels = list(reduce(lambda a, b: a | {b}, test_labels, set()))
confusion_dicts = {}
for label in all_class_labels:
confusion_dicts[label] = get_single_var_confusion(train_labels,
train_features,
test_labels,
test_features,
label,
c_vals,
test_file_names=test_file_names)
# now print the confusion matrix (and turn it into a numpy matrix while we're at it)
confusion_matrix_list = []
# first print the x axis lables (what the network predicted
print("Confusion Matrix:")
print(" " * 11, end="")
for y in all_class_labels:
print(" " * (11 - len(y)) + y, end="")
print()
# now print the y lables and values (the actual class labels)
for y in all_class_labels:
predictions = confusion_dicts[y]
print(" " * (11 - len(y)) + y, end="")
for x in all_class_labels:
print("%11i" % (predictions[x],), end="")
confusion_matrix_list.append(predictions[x])
print()
# now turn the confusion matrix into a numpy matrix
confusion_mat = np.array(confusion_matrix_list).reshape(len(all_class_labels), len(all_class_labels))
save_confusion_matrix(confusion_mat, all_class_labels, "SVM_confusion.JPEG")
if __name__ == "__main__":
# turn off convergence warnings (they make it really hard to see actual output)
warnings.filterwarnings("ignore", category=ConvergenceWarning)
# this will create an array of 11 values from 0.1 to 10. (with approximately as many points in the
# range [0.1, 1] and [1, 10]. These are the values of c that will be tried when tuning the SVMs
c_vals = [10 ** float(x) for x in np.linspace(-1, 1, 11)]
main(c_vals=c_vals)
|
gpl-3.0
|
tianqichongzhen/ProgramPrac
|
ML/SVM/svm-smo_simple.py
|
1
|
9415
|
#!/usr/bin/python
# _*_ coding:utf8 _*_
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from numpy import *
import matplotlib.pyplot as plt
def loadDataSet(fileName):
"""
对文件进行逐行解析,从而得到第行的类标签和整个特征矩阵
Args:
fileName 文件名
Returns:
dataMat 特征矩阵
labelMat 类标签
"""
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = line.strip().split('\t')
dataMat.append([float(lineArr[0]), float(lineArr[1])])
labelMat.append(float(lineArr[2]))
return dataMat, labelMat
def selectJrand(i, m):
"""
随机选择一个整数
Args:
i 第一个alpha的下标
m 所有alpha的数目
Returns:
j 返回一个不为i的随机数,在0~m之间的整数值
"""
j = i
while j == i:
j = int(random.uniform(0, m))
return j
def clipAlpha(aj, H, L):
"""clipAlpha(调整aj的值,使aj处于 L<=aj<=H)
Args:
aj 目标值
H 最大值
L 最小值
Returns:
aj 目标值
"""
if aj > H:
aj = H
if L > aj:
aj = L
return aj
def smoSimple(dataMatIn, classLabels, C, toler, maxIter):
"""smoSimple
Args:
dataMatIn 数据集
classLabels 类别标签
C 松弛变量(常量值),允许有些数据点可以处于分隔面的错误一侧。
控制最大化间隔和保证大部分的函数间隔小于1.0这两个目标的权重。
可以通过调节该参数达到不同的结果。
toler 容错率(是指在某个体系中能减小一些因素或选择对某个系统产生不稳定的概率。)
maxIter 退出前最大的循环次数
Returns:
b 模型的常量值
alphas 拉格朗日乘子
"""
dataMatrix = mat(dataMatIn)
# 矩阵转置 和 .T 一样的功能
labelMat = mat(classLabels).transpose()
m, n = shape(dataMatrix)
# 初始化 b和alphas(alpha有点类似权重值。)
b = 0
alphas = mat(zeros((m, 1)))
# 没有任何alpha改变的情况下遍历数据的次数
iter = 0
while (iter < maxIter):
# w = calcWs(alphas, dataMatIn, classLabels)
# print("w:", w)
# 记录alpha是否已经进行优化,每次循环时设为0,然后再对整个集合顺序遍历
alphaPairsChanged = 0
for i in range(m):
# print 'alphas=', alphas
# print 'labelMat=', labelMat
# print 'multiply(alphas, labelMat)=', multiply(alphas, labelMat)
# 我们预测的类别 y = w^Tx[i]+b; 其中因为 w = Σ(1~n) a[n]*lable[n]*x[n]
# 因为计算w的缘故,所以会乘上整个数据集矩阵,用矩阵计算来理解下面的计算过程
fXi = float(multiply(alphas, labelMat).T*(dataMatrix*dataMatrix[i, :].T)) + b
# 预测结果与真实结果比对,计算误差Ei
Ei = fXi - float(labelMat[i])
# 约束条件 (KKT条件是解决最优化问题的时用到的一种方法。
# 我们这里提到的最优化问题通常是指对于给定的某一函数,求其在指定作用域上的全局最小值)
# 0<=alphas[i]<=C,但由于0和C是边界值,我们无法进行优化,因为需要增加一个alphas和降低一个alphas。
# 表示发生错误的概率:labelMat[i]*Ei 如果超出了 toler, 才需要优化。至于正负号,我们考虑绝对值就对了。
'''
# 检验训练样本(xi, yi)是否满足KKT条件
yi*f(i) >= 1 and alpha = 0 (outside the boundary)
yi*f(i) == 1 and 0<alpha< C (on the boundary)
yi*f(i) <= 1 and alpha = C (between the boundary)
'''
if ((labelMat[i]*Ei < -toler) and (alphas[i] < C)) or ((labelMat[i]*Ei > toler) and (alphas[i] > 0)):
# 如果样本i在边界上(为支持向量),随机选取非i的一个样本
j = selectJrand(i, m)
# 计算样本j的预测值
fXj = float(multiply(alphas, labelMat).T*(dataMatrix*dataMatrix[j, :].T)) + b
Ej = fXj - float(labelMat[j])
alphaIold = alphas[i].copy()
alphaJold = alphas[j].copy()
# 计算在指定0<alphas<C的参数限制条件下,alphas[i]和alphas[j]所围成的二维区域内的约束直线的端点
if (labelMat[i] != labelMat[j]):
L = max(0, alphas[j] - alphas[i])
H = min(C, C + alphas[j] - alphas[i])
else:
L = max(0, alphas[j] + alphas[i] - C)
H = min(C, alphas[j] + alphas[i])
# 约束剪辑的上下界相同等于没有约束,无法优化参数
if L == H:
print("L==H")
continue
# eta是与核函数相关的一个标量,它表征两个样本x_1与x_2的核函数之间的差的平方
# eta = ||K_11 - K_22||^2 = (K_11)^2 + (K_22)^2 - 2K_11×K_22
# 这里取了负号进行计算,即eta = -eta
eta = 2.0 * dataMatrix[i, :]*dataMatrix[j, :].T - dataMatrix[i, :]*dataMatrix[i, :].T - dataMatrix[j, :]*dataMatrix[j, :].T
if eta >= 0:
print("eta>=0")
continue
# 计算出一个新的alphas[j]值
alphas[j] -= labelMat[j]*(Ei - Ej)/eta
# 剪辑操作,使更新后的值不越界
alphas[j] = clipAlpha(alphas[j], H, L)
# 检查alpha[j]是否只是轻微的改变,如果是的话,就退出for循环。
if (abs(alphas[j] - alphaJold) < 0.00001):
print("j not moving enough")
continue
# 由已得到的alphas[i]的值更新alphas[i]的值
alphas[i] += labelMat[j]*labelMat[i]*(alphaJold - alphas[j])
# 更新计算新的b的值
b1 = b - Ei- labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i, :]*dataMatrix[i, :].T - labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[i, :]*dataMatrix[j, :].T
b2 = b - Ej- labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i, :]*dataMatrix[j, :].T - labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[j, :]*dataMatrix[j, :].T
if (0 < alphas[i]) and (C > alphas[i]):
b = b1
elif (0 < alphas[j]) and (C > alphas[j]):
b = b2
else:
b = (b1 + b2)/2.0
alphaPairsChanged += 1
print("iter: %d i:%d, pairs changed %d" % (iter, i, alphaPairsChanged))
# 参数已更新则累加迭代次数,否则归零从新执行循环
if (alphaPairsChanged == 0):
iter += 1
else:
iter = 0
print("iteration number: %d" % iter)
return b, alphas
def calcWs(alphas, dataArr, classLabels):
"""
基于alpha计算w值
Args:
alphas 拉格朗日乘子
dataArr feature数据集
classLabels 目标变量数据集
Returns:
wc 回归系数
"""
X = mat(dataArr)
labelMat = mat(classLabels).transpose()
m, n = shape(X)
w = zeros((n, 1))
for i in range(m):
w += multiply(alphas[i] * labelMat[i], X[i, :].T)
return w
def plotfig_SVM(xMat, yMat, ws, b, alphas):
"""
参考地址:
http://blog.csdn.net/maoersong/article/details/24315633
http://www.cnblogs.com/JustForCS/p/5283489.html
http://blog.csdn.net/kkxgx/article/details/6951959
"""
xMat = mat(xMat)
yMat = mat(yMat)
# b原来是矩阵,先转为数组类型后其数组大小为(1,1),所以后面加[0],变为(1,)
b = array(b)[0]
fig = plt.figure()
ax = fig.add_subplot(111)
# 注意flatten的用法
ax.scatter(xMat[:, 0].flatten().A[0], xMat[:, 1].flatten().A[0])
# x最大值,最小值根据原数据集dataArr[:, 0]的大小而定
x = arange(-1.0, 10.0, 0.1)
# 根据x.w + b = 0 得到,其式子展开为w0.x1 + w1.x2 + b = 0, x2就是y值
y = (-b-ws[0, 0]*x)/ws[1, 0]
ax.plot(x, y)
for i in range(shape(yMat[0, :])[1]):
if yMat[0, i] > 0:
ax.plot(xMat[i, 0], xMat[i, 1], 'cx')
else:
ax.plot(xMat[i, 0], xMat[i, 1], 'kp')
# 找到支持向量,并在图中标红
for i in range(100):
if alphas[i] > 0.0:
ax.plot(xMat[i, 0], xMat[i, 1], 'ro')
plt.show()
if __name__ == "__main__":
# 获取特征和目标变量
dataArr, labelArr = loadDataSet('/root/桌面/MachineLearning/input/6.SVM/testSet.txt')
# print labelArr
# b是常量值, alphas是拉格朗日乘子
b, alphas = smoSimple(dataArr, labelArr, 0.6, 0.001, 40)
print '/n/n/n'
print 'b=', b
print 'alphas[alphas>0]=', alphas[alphas > 0]
print 'shape(alphas[alphas > 0])=', shape(alphas[alphas > 0])
for i in range(100):
if alphas[i] > 0:
print dataArr[i], labelArr[i]
# 画图
ws = calcWs(alphas, dataArr, labelArr)
plotfig_SVM(dataArr, labelArr, ws, b, alphas)
|
apache-2.0
|
spatialaudio/non-smooth-secondary-source-distributions
|
figure2b.py
|
1
|
2312
|
""" Generates Figure 2b of the the paper
Sascha Spors, Frank Schultz, and Hagen Wierstorf. Non-smooth secondary
source distributions in wave
field synthesis. In German Annual Conference
on Acoustics (DAGA), March 2015.
Level synthesized by a semi-infintely rectangular array driven by
two-dimensional WFS for a virtual line source.
"""
import numpy as np
import matplotlib.pyplot as plt
import sfs
# simulation parameters
xref = [0, 0, 0] # reference point
N = 1000
Nr = [0, 1, 10]
dx = 0.10
normalization = 0.0577 # normalization used for plotting
f = 500
omega = 2 * np.pi * f # angular frequency
src_angles = np.linspace(180, 90, num=180) # virtual source angles ps
grid = sfs.util.xyz_grid(xref[0], xref[1], 0, spacing=1) # evaluated position
def compute_sound_field(x0, n0, a0, omega, angle):
npw = sfs.util.direction_vector(np.radians(angle), np.radians(90))
xs = xref + (np.sqrt(xref[0]**2 + xref[1]**2) + 4) * np.asarray(npw)
d = sfs.mono.drivingfunction.wfs_2d_line(omega, x0, n0, xs)
a = sfs.mono.drivingfunction.source_selection_point(n0, x0, xs)
twin = sfs.tapering.none(a)
p = sfs.mono.synthesized.generic(omega, x0, n0, d * twin * a0, grid,
source=sfs.mono.source.line)
return p, twin, xs
# compute field at the given position for given virutal source positions
p = []
trajectory = []
lsactive = []
for Nr0 in Nr:
# get secondary source positions
x0, n0, a0 = sfs.array.rounded_edge(N, Nr0, dx, n0=[0, -1, 0],
center=[-2, 2, 0])
ptmp = []
xstmp = []
twintmp = []
for angle in src_angles:
tmp, twin, xs = compute_sound_field(x0, n0, a0, omega, angle)
ptmp.append(tmp)
xstmp.append(xs)
twintmp.append(twin)
p.append(ptmp)
trajectory.append(xstmp)
lsactive.append(twintmp)
p = np.asarray(p)
trajectory = np.asarray(trajectory)
lsactive = np.asarray(lsactive)
fig = plt.figure()
ax = plt.gca()
im = plt.plot(src_angles, 20*np.log10(np.abs(p.T/normalization)))
plt.axis([90, 180, -1, 3])
ax.invert_xaxis()
plt.xlabel('angle (deg)')
plt.ylabel('relative level (dB)')
plt.grid()
ax.legend(['rect', '$N_r = 1$', '$N_r = 10$'], loc='lower center', ncol=3)
myfig = plt.gcf()
plt.show()
|
mit
|
herilalaina/scikit-learn
|
examples/text/document_clustering.py
|
21
|
8531
|
"""
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent semantic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
# work-around for Jupyter notebook and IPython console
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
# #############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
# categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', alternate_sign=False,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
alternate_sign=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
# #############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
|
bsd-3-clause
|
aetilley/scikit-learn
|
sklearn/covariance/tests/test_graph_lasso.py
|
272
|
5245
|
""" Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
|
bsd-3-clause
|
compops/pmh-tutorial
|
python/example3-sv.py
|
1
|
5004
|
##############################################################################
# Parameter estimation using particle Metropolis-Hastings
# in a stochastic volatility model
#
# Johan Dahlin <liu (at) johandahlin.com.nospam>
# Documentation at https://github.com/compops/pmh-tutorial
# Published under GNU General Public License
##############################################################################
from __future__ import print_function, division
import matplotlib.pylab as plt
import quandl
import numpy as np
from helpers.stateEstimation import particleFilterSVmodel
from helpers.parameterEstimation import particleMetropolisHastingsSVModel
# Set the random seed to replicate results in tutorial
np.random.seed(10)
##############################################################################
# Load data
##############################################################################
data = quandl.get("NASDAQOMX/OMXS30", trim_start="2012-01-02", trim_end="2014-01-02")
logReturns = 100 * np.diff(np.log(data['Index Value']))
noLogReturns = len(logReturns)
##############################################################################
# PMH
##############################################################################
initialTheta = np.array((0.0, 0.9, 0.2)) # Inital guess of theta = (mu, phi, sigmav)
noParticles = 500 # Choose noParticles ~ noLogReturns
noBurnInIterations = 2500
noIterations = 7500
stepSize = np.diag((0.10**2, 0.01**2, 0.05**2))
logVolatilityEst, parameterTrace = particleMetropolisHastingsSVModel(
logReturns, initialTheta, noParticles,
particleFilterSVmodel, noIterations, stepSize)
##############################################################################
# Plot the results
##############################################################################
noBins = int(np.floor(np.sqrt(noIterations - noBurnInIterations)))
grid = np.arange(noBurnInIterations, noIterations, 1)
logVolatilityEst = logVolatilityEst[noBurnInIterations:noIterations, :]
parameterEst = parameterTrace[noBurnInIterations:noIterations, :]
plt.figure(1)
plt.subplot(5, 3, (1, 3))
plt.plot(logReturns, color='#1B9E77', linewidth=1.5)
plt.xlabel("time")
plt.ylabel("log-return")
plt.subplot(5, 3, (4, 6))
plt.plot(np.mean(logVolatilityEst, axis=0), color='#D95F02', linewidth=1.5)
plt.xlabel("time")
plt.ylabel("log-volatility estimate")
# Histogram of marginal parameter posterior of mu
plt.subplot(5, 3, 7)
plt.hist(parameterEst[:, 0], noBins, normed=1, facecolor='#7570B3')
plt.xlabel("mu")
plt.ylabel("posterior density estimate")
plt.axvline(np.mean(parameterEst[:, 0]), linewidth=1.5, color='k')
# Trace plot of mu
plt.subplot(5, 3, 8)
plt.plot(grid, parameterEst[:, 0], color='#7570B3')
plt.xlabel("iteration")
plt.ylabel("trace of mu")
plt.axhline(np.mean(parameterEst[:, 0]), linewidth=1.5, color='k')
# Autocorrelation function for mu
plt.subplot(5, 3, 9)
detrended_trace = parameterEst[:, 0] - np.mean(parameterEst[:, 0])
macf = np.correlate(detrended_trace, detrended_trace, mode='full')
idx = int(macf.size/2)
macf = macf[idx:]
macf = macf[0:100]
macf /= macf[0]
grid_acf = range(len(macf))
plt.plot(grid_acf, macf, color='#7570B3')
plt.xlabel("lag")
plt.ylabel("ACF of mu")
# Histogram of marginal parameter posterior of phi
plt.subplot(5, 3, 10)
plt.hist(parameterEst[:, 1], noBins, normed=1, facecolor='#E7298A')
plt.xlabel("phi")
plt.ylabel("posterior density estimate")
plt.axvline(np.mean(parameterEst[:, 1]), linewidth=1.5, color='k')
# Trace plot of phi
plt.subplot(5, 3, 11)
plt.plot(grid, parameterEst[:, 1], color='#E7298A')
plt.xlabel("iteration")
plt.ylabel("trace of phi")
plt.axhline(np.mean(parameterEst[:, 1]), linewidth=1.5, color='k')
# Autocorrelation function for phi
plt.subplot(5, 3, 12)
detrended_trace = parameterEst[:, 1] - np.mean(parameterEst[:, 1])
macf = np.correlate(detrended_trace, detrended_trace, mode='full')
idx = int(macf.size/2)
macf = macf[idx:]
macf = macf[0:100]
macf /= macf[0]
grid_acf = range(len(macf))
plt.plot(grid_acf, macf, color='#E7298A')
plt.xlabel("lag")
plt.ylabel("ACF of phi")
# Histogram of marginal parameter posterior of sigma
plt.subplot(5, 3, 13)
plt.hist(parameterEst[:, 2], noBins, normed=1, facecolor='#66A61E')
plt.xlabel("sigmav")
plt.ylabel("posterior density estimate")
plt.axvline(np.mean(parameterEst[:, 2]), linewidth=1.5, color='k')
# Trace plot of sigma
plt.subplot(5, 3, 14)
plt.plot(grid, parameterEst[:, 2], color='#66A61E')
plt.xlabel("iteration")
plt.ylabel("trace of sigmav")
plt.axhline(np.mean(parameterEst[:, 2]), linewidth=1.5, color='k')
# Autocorrelation function for sigma
plt.subplot(5, 3, 15)
detrended_trace = parameterEst[:, 2] - np.mean(parameterEst[:, 2])
macf = np.correlate(detrended_trace, detrended_trace, mode='full')
idx = int(macf.size/2)
macf = macf[idx:]
macf = macf[0:100]
macf /= macf[0]
grid_acf = range(len(macf))
plt.plot(grid_acf, macf, color='#66A61E')
plt.xlabel("lag")
plt.ylabel("ACF of sigmav")
plt.show()
|
gpl-2.0
|
lnls-fac/apsuite
|
apsuite/commisslib/measure_bbb_data.py
|
1
|
26613
|
"""Main module."""
import time as _time
import numpy as _np
import scipy.signal as _scysig
import scipy.integrate as _scyint
import scipy.optimize as _scyopt
import matplotlib.pyplot as _mplt
import matplotlib.gridspec as _mgs
from matplotlib.collections import PolyCollection as _PolyCollection
from siriuspy.devices import BunchbyBunch, PowerSupplyPU, EGTriggerPS
from ..utils import MeasBaseClass as _BaseClass, \
ParamsBaseClass as _ParamsBaseClass
class BbBLParams(_ParamsBaseClass):
"""."""
DAC_NBITS = 14
SAT_THRES = 2**(DAC_NBITS-1) - 1
CALIBRATION_FACTOR = 1000 # Counts/mA/degree
DAMPING_RATE = 1/13.0 # Hz
FREQ_RF = 499666000
HARM_NUM = 864
FREQ_REV = FREQ_RF / HARM_NUM
PER_REV = 1 / FREQ_REV
def __init__(self):
"""."""
super().__init__()
self.center_frequency = 2090 # [Hz]
self.bandwidth = 200 # [Hz]
def __str__(self):
"""."""
ftmp = '{0:24s} = {1:9.3f} {2:s}\n'.format
# dtmp = '{0:24s} = {1:9d} {2:s}\n'.format
st = ftmp('center_frequency [Hz]', self.center_frequency, '')
st += ftmp('bandwidth [Hz]', self.bandwidth, '')
return st
class BbBHParams(BbBLParams):
"""."""
DAMPING_RATE = 1/16.9e-3 # Hz
CALIBRATION_FACTOR = 1000 # Counts/mA/um
class BbBVParams(BbBLParams):
"""."""
DAMPING_RATE = 1/22.0e-3 # Hz
CALIBRATION_FACTOR = 1000 # Counts/mA/um
class BbBData(_BaseClass):
"""."""
DEVICES = BunchbyBunch.DEVICES
def __init__(self, devname):
"""."""
if devname.endswith('L'):
params = BbBLParams()
elif devname.endswith('H'):
params = BbBHParams()
elif devname.endswith('V'):
params = BbBVParams()
super().__init__(params=params)
self.devices['bbb'] = BunchbyBunch(devname)
def get_data(self):
"""Get Raw data to file."""
acqtype = self.params.acqtype
bbb = self.devices['bbb']
acq = bbb.sram if acqtype in 'SRAM' else bbb.bram
cavity_data = dict(
temperature={
'cell1': bbb.rfcav.dev_cavmon.temp_cell1,
'cell2': bbb.rfcav.dev_cavmon.temp_cell2,
'cell3': bbb.rfcav.dev_cavmon.temp_cell3,
'cell4': bbb.rfcav.dev_cavmon.temp_cell4,
'cell5': bbb.rfcav.dev_cavmon.temp_cell5,
'cell6': bbb.rfcav.dev_cavmon.temp_cell6,
'cell7': bbb.rfcav.dev_cavmon.temp_cell7,
'coupler': bbb.rfcav.dev_cavmon.temp_coupler,
},
power={
'cell2': bbb.rfcav.dev_cavmon.power_cell2,
'cell4': bbb.rfcav.dev_cavmon.power_cell4,
'cell6': bbb.rfcav.dev_cavmon.power_cell6,
'forward': bbb.rfcav.dev_cavmon.power_forward,
'reverse': bbb.rfcav.dev_cavmon.power_reverse,
},
voltage=bbb.rfcav.dev_llrf.voltage_mon,
phase=bbb.rfcav.dev_llrf.phase_mon,
detune=bbb.rfcav.dev_llrf.detune,
detune_error=bbb.rfcav.dev_llrf.detune_error,
)
self.data = dict(
stored_current=bbb.dcct.current,
timestamp=_time.time(),
cavity_data=cavity_data,
acqtype=acqtype, downsample=acq.downsample,
rawdata=_np.array(
acq.data_raw.reshape((-1, self.params.HARM_NUM)).T,
dtype=float),
fb_set0=bbb.coeffs.set0, fb_set1=bbb.coeffs.set1,
fb_set0_desc=bbb.coeffs.set0_desc,
fb_set1_desc=bbb.coeffs.set1_desc,
fb_downsample=bbb.feedback.downsample,
fb_state=bbb.feedback.loop_state,
fb_shift_gain=bbb.feedback.shift_gain,
fb_setsel=bbb.feedback.coeff_set,
fb_growdamp_state=bbb.feedback.grow_damp_state,
growth_time=acq.growthtime, acq_time=acq.acqtime,
hold_time=acq.holdtime, post_time=acq.posttime,
)
def load_and_apply_old_data(self, fname):
"""."""
data = self.load_data(fname)
if not isinstance(data['data'], _np.ndarray):
self.load_and_apply(fname)
return
data.pop('rf_freq')
data.pop('harmonic_number')
data['rawdata'] = _np.array(
data.pop('data').reshape((-1, self.params.HARM_NUM)).T,
dtype=float)
data['cavity_data'] = dict(
temperature={
'cell1': 0.0, 'cell2': 0.0, 'cell3': 0.0, 'cell4': 0.0,
'cell5': 0.0, 'cell6': 0.0, 'cell7': 0.0, 'coupler': 0.0},
power={
'cell2': 0.0, 'cell4': 0.0, 'cell6': 0.0, 'forward': 0.0,
'reverse': 0.0},
voltage=0.0, phase=0.0, detune=0.0, detune_error=0.0,
)
self.data = data
def process_data(self, rawdata=None):
"""."""
center_freq = self.params.center_frequency
sigma_freq = self.params.bandwidth
per_rev = self.params.PER_REV
calib = self.params.CALIBRATION_FACTOR
harm_num = self.params.HARM_NUM
downsample = self.data['downsample']
current = self.data.get('stored_current', None)
if current is None:
current = self.data['current']
dtime = per_rev*downsample
if rawdata is None:
dataraw = self.data['rawdata'].copy()
dataraw *= 1 / (calib * current / harm_num)
else:
dataraw = rawdata.copy()
# remove DC component from bunches
dataraw -= dataraw.mean(axis=1)[:, None]
# get the analytic data vector, via discrete hilbert transform
data_anal = _np.array(_scysig.hilbert(dataraw, axis=1))
# calculate DFT:
data_dft = _np.fft.fft(data_anal, axis=1)
freq = _np.fft.fftfreq(data_anal.shape[1], d=dtime)
# Apply Gaussian filter to get only the synchrotron frequency
H = _np.exp(-(freq - center_freq)**2/2/sigma_freq**2)
H += _np.exp(-(freq + center_freq)**2/2/sigma_freq**2)
H /= H.max()
data_dft *= H[None, :]
# compensate the different time samplings of each bunch:
dts = _np.arange(data_anal.shape[0])/data_anal.shape[0] * per_rev
comp = _np.exp(-1j*2*_np.pi * freq[None, :]*dts[:, None])
data_dft *= comp
# get the processed data by inverse DFT
data_anal = _np.fft.ifft(data_dft, axis=1)
data_dft /= data_anal.shape[1]
# decompose data into even fill eigenvectors:
data_modes = _np.fft.fft(data_anal, axis=0) / data_anal.shape[0]
analysis = dict()
analysis['bunch_numbers'] = _np.arange(1, dataraw.shape[0]+1)
analysis['dft_freq'] = freq
analysis['mode_numbers'] = _np.arange(data_modes.shape[0])
analysis['time'] = _np.arange(data_anal.shape[1]) * dtime
analysis['mode_data'] = data_modes
analysis['bunch_data'] = data_anal
analysis['dft_data'] = data_dft
analysis['dft_filter'] = H
if rawdata is None:
self.analysis.update(analysis)
return analysis
def get_dac_output(self, coeff=None, shift_gain=None, saturate=True):
"""."""
if coeff is None:
coeff = self.data[f"fb_set{self.data['fb_setsel']:d}"]
if shift_gain is None:
shift_gain = self.data['fb_shift_gain']
dac_out = _scysig.convolve(
self.data['rawdata'], coeff[None, :], mode='valid')
dac_out *= 2**shift_gain
if saturate:
idcs = dac_out > self.params.SAT_THRES
dac_out[idcs] = self.params.SAT_THRES
idcs = dac_out < -self.params.SAT_THRES
dac_out[idcs] = -self.params.SAT_THRES
return dac_out
def pca_analysis(self, rawdata=None):
"""."""
calib = self.params.CALIBRATION_FACTOR
harm_num = self.params.HARM_NUM
current = self.data.get('stored_current', None)
if current is None:
current = self.data['current']
if rawdata is None:
rawdata = self.data['rawdata'].copy()
rawdata *= 1 / (calib * current / harm_num)
else:
rawdata = rawdata.copy()
rawdata -= rawdata.mean(axis=1)[:, None]
return _np.linalg.svd(rawdata)
def estimate_fitting_intervals(self):
"""."""
tim = self.analysis['time'] * 1000
growth_time = self.data['growth_time']
post_time = self.data['post_time']
change_time = tim[-1] - (post_time - growth_time)
change_time = max(tim[0], min(tim[-1], change_time))
return [[0, change_time], [change_time, tim[-1]]]
def fit_and_plot_grow_rates(
self, mode_num, intervals=None, labels=None, title='',
analysis=None):
"""."""
if analysis is None:
analysis = self.analysis
if intervals is None:
intervals = self.estimate_fitting_intervals()
per_rev = self.params.PER_REV
downsample = self.data['downsample']
dtime = per_rev*downsample
tim = analysis['time'] * 1000
if isinstance(mode_num, _np.ndarray) and mode_num.size > 1:
data_mode = mode_num.copy()
else:
data_mode = analysis['mode_data'][mode_num]
abs_mode = _np.abs(data_mode)
labels = ['']*len(intervals) if labels is None else labels
fig = _mplt.figure(figsize=(7, 8))
gsp = _mgs.GridSpec(2, 1)
gsp.update(left=0.15, right=0.95, top=0.94, bottom=0.1, hspace=0.2)
aty = _mplt.subplot(gsp[0, 0])
atx = _mplt.subplot(gsp[1, 0], sharex=aty)
aty.plot(tim, abs_mode, label='Data')
gtimes = []
for inter, label in zip(intervals, labels):
ini, fin = inter
tim_fit, coef = self.fit_exponential(
tim, abs_mode, t_ini=ini, t_fin=fin)
fit = self.exponential_func(tim_fit, *coef)
gtimes.append(coef[2] * 1000)
aty.plot(tim_fit, fit, label=label)
aty.annotate(
f'rate = {coef[2]*1000:.2f} Hz', fontsize='x-small',
xy=(tim_fit[fit.size//2], fit[fit.size//2]),
textcoords='offset points', xytext=(-100, 10),
arrowprops=dict(arrowstyle='->'),
bbox=dict(boxstyle="round", fc="0.8"))
aty.legend(loc='best', fontsize='small')
aty.set_title(title, fontsize='small')
aty.set_xlabel('time [ms]')
aty.set_ylabel('Amplitude [°]')
idx = abs_mode > abs_mode.max()/10
inst_freq = self.calc_instant_frequency(data_mode, dtime)
inst_freq /= 1e3
atx.plot(tim[idx], inst_freq[idx])
atx.set_xlabel('time [ms]')
atx.set_ylabel('Instantaneous Frequency [kHz]')
cenf = self.params.center_frequency / 1000
sig = self.params.bandwidth / 1000
atx.set_ylim([cenf - sig, cenf + sig])
fig.show()
return fig, aty, atx, gtimes
def plot_modes_evolution(self, nr_modes=5, title='', analysis=None):
"""."""
if analysis is None:
analysis = self.analysis
data_modes = analysis['mode_data']
abs_modes = _np.abs(data_modes)
tim = analysis['time'] * 1000
per_rev = self.params.PER_REV
downsample = self.data['downsample']
dtime = per_rev*downsample
avg_modes = abs_modes.max(axis=1)
indcs = _np.argsort(avg_modes)[::-1]
indcs = indcs[:nr_modes]
fig = _mplt.figure(figsize=(7, 10))
gsp = _mgs.GridSpec(3, 1)
gsp.update(left=0.15, right=0.95, top=0.96, bottom=0.07, hspace=0.23)
ax = _mplt.subplot(gsp[0, 0])
aty = _mplt.subplot(gsp[1, 0])
atx = _mplt.subplot(gsp[2, 0], sharex=aty)
ax.plot(avg_modes, 'k')
for idx in indcs:
data = data_modes[idx, :]
abs_mode = abs_modes[idx, :]
ax.plot(idx, avg_modes[idx], 'o')
aty.plot(tim, abs_mode, label=f'{idx:03d}')
inst_freq = self.calc_instant_frequency(data, dtime)
nzer = abs_mode > abs_mode.max()/10
atx.plot(tim[nzer], inst_freq[nzer]/1e3, label=f'{idx:03d}')
aty.legend(loc='best', fontsize='small')
ax.set_title(title)
ax.set_ylabel('Max Amplitude [°]')
ax.set_xlabel('Mode Number')
aty.set_xlabel('time [ms]')
aty.set_ylabel('Amplitude [°]')
atx.set_xlabel('time [ms]')
atx.set_ylabel('Instantaneous Frequency [kHz]')
cenf = self.params.center_frequency / 1000
sig = self.params.bandwidth / 1000
atx.set_ylim([cenf - sig, cenf + sig])
fig.show()
return fig, ax, aty, atx
def plot_average_spectrum(self, rawdata=None, subtract_mean=True):
"""."""
if rawdata is None:
rawdata = self.data['rawdata']
rawdata = rawdata.copy()
per_rev = self.params.PER_REV
downsample = self.data['downsample']
dtime = per_rev*downsample
if subtract_mean:
rawdata -= rawdata.mean(axis=1)[:, None]
dataraw_dft = _np.fft.rfft(rawdata, axis=1)
rfreq = _np.fft.rfftfreq(rawdata.shape[1], d=dtime)
avg_dataraw = _np.abs(dataraw_dft).mean(axis=0)/rawdata.shape[1]
f = _mplt.figure(figsize=(7, 4))
gs = _mgs.GridSpec(1, 1)
gs.update(
left=0.15, right=0.95, top=0.97, bottom=0.18, wspace=0.35,
hspace=0.2)
aty = _mplt.subplot(gs[0, 0])
aty.plot(rfreq, avg_dataraw)
aty.set_yscale('log')
f.show()
return f, aty
def plot_modes_summary(self, analysis=None):
"""."""
if analysis is None:
analysis = self.analysis
data_modes = analysis['mode_data']
data_anal = analysis['bunch_data']
tim = analysis['time'] * 1000
mode_nums = analysis['mode_numbers']
bunch_nums = analysis['bunch_numbers']
f = _mplt.figure(figsize=(12, 9))
gs = _mgs.GridSpec(2, 2)
gs.update(
left=0.10, right=0.95, top=0.97, bottom=0.10, wspace=0.35,
hspace=0.2)
# aty = _mplt.subplot(gs[0, :7], projection='3d')
# afy = _mplt.subplot(gs[1, :7], projection='3d')
aty = _mplt.subplot(gs[0, 0])
afy = _mplt.subplot(gs[1, 0])
atx = _mplt.subplot(gs[0, 1])
afx = _mplt.subplot(gs[1, 1])
abs_modes = _np.abs(data_modes)
abs_dataf = _np.abs(data_anal)
afx.plot(mode_nums, abs_modes.mean(axis=1))
afx.set_xlabel('Mode Number')
afx.set_ylabel('Average Amplitude [°]')
# afx.set_yscale('log')
# waterfall_plot(afy, tim, mode_nums, abs_modes)
# afy.set_ylabel('\ntime [ms]')
# afy.set_xlabel('\nmode number')
# afy.set_zlabel('amplitude')
T, M = _np.meshgrid(tim, mode_nums)
cf = afy.pcolormesh(
T, M, abs_modes, cmap='jet',
vmin=abs_modes.min(), vmax=abs_modes.max())
afy.set_xlabel('Time [ms]')
afy.set_ylabel('Mode Number')
cb = f.colorbar(cf, ax=afy, pad=0.01)
cb.set_label('Amplitude [°]')
atx.plot(bunch_nums, abs_dataf.mean(axis=1))
atx.set_xlabel('Bunch Number')
atx.set_ylabel('Average Amplitude [°]')
# waterfall_plot(aty, tim, bunch_nums, abs_dataf)
# aty.set_ylabel('\ntime [ms]')
# aty.set_xlabel('\nbunch number')
# aty.set_zlabel('amplitude')
T, M = _np.meshgrid(tim, bunch_nums)
cf = aty.pcolormesh(
T, M, abs_dataf, cmap='jet',
vmin=abs_dataf.min(), vmax=abs_dataf.max())
aty.set_xlabel('Time [ms]')
aty.set_ylabel('Bunch Number')
cb = f.colorbar(cf, ax=aty, pad=0.01)
cb.set_label('Amplitude [°]')
f.show()
return f
@classmethod
def fit_exponential(cls, times, data, t_ini=None, t_fin=None, offset=True):
"""Fit exponential function."""
t_ini = t_ini or times.min()
t_fin = t_fin or times.max()
idx = (times >= t_ini) & (times <= t_fin)
tim = times[idx]
dtim = data[idx]
# Exponential function without offset
if not offset:
log_amp, rate = _np.polynomial.polynomial.polyfit(
tim, _np.log(dtim), deg=1, rcond=None)
return tim, (0, _np.exp(log_amp), rate)
# method to estimate fitting parameters of
# y = a + b*exp(c*x)
# based on:
# https://www.scribd.com/doc/14674814/Regressions-et-equations-integrales
# pages 16-18
s = _scyint.cumtrapz(dtim, x=tim, initial=0.0)
ym = dtim - dtim[0]
xm = tim - tim[0]
mat = _np.array([xm, s]).T
(_, rate), *_ = _np.linalg.lstsq(mat, ym, rcond=None)
theta = _np.exp(rate*tim)
mat = _np.ones((theta.size, 2))
mat[:, 1] = theta
(off, amp), *_ = _np.linalg.lstsq(mat, dtim, rcond=None)
# Now use scipy to refine the estimatives:
coefs = (off, amp, rate)
try:
coefs, _ = _scyopt.curve_fit(
cls.exponential_func, tim, dtim, p0=coefs)
except RuntimeError:
pass
return tim, coefs
@staticmethod
def exponential_func(tim, off, amp, rate):
"""Return exponential function with offset."""
return off + amp*_np.exp(rate*tim)
@staticmethod
def calc_instant_frequency(data, dtime):
"""."""
return _np.gradient(_np.unwrap(_np.angle(data)))/(2*_np.pi*dtime)
@staticmethod
def waterfall_plot(axis, xs, zs, data):
"""."""
vertcs, colors = [], []
cors = ['b', 'r', 'g', 'y', 'm', 'c']
for i, y in enumerate(zs):
ys = data[i, :].copy()
ys[0], ys[-1] = 0, 0
vertcs.append(list(zip(xs, ys)))
colors.append(cors[i % len(cors)])
poly = _PolyCollection(
vertcs, closed=False, edgecolors='k',
linewidths=1, facecolors=colors)
poly.set_alpha(0.7)
axis.add_collection3d(poly, zs=zs, zdir='x')
axis.view_init(elev=35.5, azim=-135)
axis.set_ylim3d(xs.min(), xs.max())
axis.set_xlim3d(zs.min(), zs.max())
axis.set_zlim3d(0, data.max())
class TuneShiftParams(_ParamsBaseClass):
"""."""
TIME_REV = 864 / 499666000 # s
WAIT_INJ = 0.2 # s
DEF_TOL_CURRENT = 0.01 # mA
def __init__(self):
"""."""
super().__init__()
self.plane = 'HV' # 'H', 'V' or 'HV'
self.kickh = -25/1000 # mrad
self.kickv = +20/1000 # mrad
self.wait_bbb = 9 # s
self.currents = list()
self.filename = ''
def __str__(self):
"""."""
dtmp = '{0:10s} = {1:9d} {2:s}\n'.format
ftmp = '{0:10s} = {1:9.3f} {2:s}\n'.format
ltmp = '{0:6.3f},'.format
stg = f"{'plane':10s} = {self.plane:4s} ('H', 'V' or 'HV')\n"
stg += ftmp('kickh', self.kickh, '[mrad]')
stg += ftmp('kickv', self.kickv, '[mrad]')
stg += dtmp('wait_bbb', self.wait_bbb, '[s]')
stg += f"{'currents':10s} = ("
stg += ''.join(map(ltmp, self.currents))
stg += ' ) mA \n'
stg += f"{'filename':10s} = '{self.filename:s}'\n"
return stg
class MeasTuneShift(_BaseClass):
"""."""
def __init__(self, isonline=True):
"""."""
self.devices = dict()
self.data = dict()
self.params = TuneShiftParams()
self.pingers = list()
if isonline:
self.devices['bbbh'] = BunchbyBunch(BunchbyBunch.DEVICES.H)
self.devices['bbbv'] = BunchbyBunch(BunchbyBunch.DEVICES.V)
self.devices['pingh'] = PowerSupplyPU(
PowerSupplyPU.DEVICES.SI_INJ_DPKCKR)
self.devices['pingv'] = PowerSupplyPU(
PowerSupplyPU.DEVICES.SI_PING_V)
self.devices['egun'] = EGTriggerPS()
self.pingers = [self.devices['pingh'], self.devices['pingv']]
def get_data(self, plane):
"""."""
bbbtype = 'bbbh' if plane.upper() == 'H' else 'bbbv'
bbb = self.devices[bbbtype]
sb_tune = bbb.single_bunch
data = {
'timestamp': _time.time(),
'stored_current': bbb.dcct.current,
'data': sb_tune.data_raw,
'spec_mag': sb_tune.spec_mag, 'spec_freq': sb_tune.spec_freq,
'spec_phase': sb_tune.spec_phase,
'bunch_id': sb_tune.bunch_id, 'fft_size': sb_tune.fft_size,
'fft_overlap': sb_tune.fft_overlap,
'delay_calibration': sb_tune.delay_calibration,
'nr_averages': sb_tune.nr_averages}
return data
def merge_data(data1, data2):
"""."""
if data1.keys() != data2.keys():
raise Exception('Incompatible data sets')
merge = dict()
for key in data1:
merge[key] = data1[key] + data2[key]
return merge
def turn_on_pingers_pulse(self):
"""."""
for ping in self.pingers:
ping.cmd_turn_on_pulse()
def turn_off_pingers_pulse(self):
"""."""
for ping in self.pingers:
ping.cmd_turn_off_pulse()
def turn_on_pingers(self):
"""."""
for ping in self.pingers:
ping.cmd_turn_on()
def turn_off_pingers(self):
"""."""
for ping in self.pingers:
ping.cmd_turn_off()
def prepare_pingers(self):
"""."""
self.devices['pingh'].strength = self.params.kickh
self.devices['pingv'].strength = self.params.kickv
self.turn_on_pingers()
self.turn_on_pingers_pulse()
def inject_in_storage_ring(self, goal_curr):
"""."""
self.devices['egun'].cmd_enable_trigger()
while not self._check_stored_current(goal_curr):
_time.sleep(TuneShiftParams.WAIT_INJ)
curr = self.devices['bbbh'].dcct.current
print(
f'Stored Current: {curr:.3f}/{goal_curr:.3f}mA.')
self.devices['egun'].cmd_disable_trigger()
def _check_stored_current(
self, goal_curr, tol=TuneShiftParams.DEF_TOL_CURRENT):
dcct_curr = self.devices['bbbh'].dcct.current
return dcct_curr > goal_curr or abs(dcct_curr - goal_curr) < tol
def _check_pingers_problem(self):
for ping in self.pingers:
if ping.voltage_mon < 0:
# reset pinger
ping.cmd_turn_off()
ping.cmd_turn_on()
return True
return False
def run_meas(self, save=True):
"""."""
data = dict()
datah = list()
datav = list()
currs = list()
for curr in self.params.currents:
t0 = _time.time()
self.inject_in_storage_ring(curr)
trial = 0
while self._check_pingers_problem():
if trial > 2:
print('3 unsucessful reset trials. Exiting...')
break
print('Problem with pingers voltage. Resetting...')
_time.sleep(5)
trial += 1
_time.sleep(self.params.wait_bbb)
print('Acquiring data...')
currs.append(self.devices['bbbh'].dcct.current)
data['stored_current'] = currs
if 'H' in self.params.plane:
datah.append(self.get_data(plane='H'))
data['horizontal'] = datah
if 'V' in self.params.plane:
datav.append(self.get_data(plane='V'))
data['vertical'] = datav
self.data = data
if save:
self.save_data(fname=self.params.filename, overwrite=True)
print('Data saved!')
tf = _time.time()
print(f'Elapsed time: {tf-t0:.2f}s \n')
def plot_spectrum(
self, plane, freq_min=None, freq_max=None,
title=None, fname=None):
"""plane: must be 'H' or 'V'."""
if plane.upper() == 'H':
data = self.data['horizontal']
freq_min = freq_min or 38
freq_max = freq_max or 52
elif plane.upper() == 'V':
data = self.data['vertical']
freq_min = freq_min or 72
freq_max = freq_max or 84
else:
raise Exception("plane input must be 'H' or 'V'.")
curr = _np.array(self.data['stored_current'])
mag = [dta['spec_mag'] for dta in data]
mag = _np.array(mag, dtype=float)
freq = _np.array(data[-1]['spec_freq'])
idcs = (freq > freq_min) & (freq < freq_max)
freq = freq[idcs]
idx = _np.argsort(curr)
curr = curr[idx]
mag = mag[idx, :]
mag = mag[:, idcs]
freq, curr = _np.meshgrid(freq, curr)
freq, curr, mag = freq.T, curr.T, mag.T
fig = _mplt.figure(figsize=(8, 6))
gs = _mgs.GridSpec(1, 1)
ax = fig.add_subplot(gs[0, 0])
ax.pcolormesh(curr, freq, mag)
ax.set_ylabel('Frequency [kHz]')
ax.set_xlabel('Current [mA]')
ax.set_title(title)
if fname:
fig.savefig(fname, format='png', dpi=300)
return fig
def plot_time_evolution(
self, plane, title=None, fname=None):
"""plane: must be 'H' or 'V'."""
if plane.upper() == 'H':
data = self.data['horizontal']
elif plane.upper() == 'V':
data = self.data['vertical']
else:
raise Exception("plane input must be 'H' or 'V'.")
curr = _np.array(self.data['stored_current'])
mag = [dta['spec_mag'] for dta in data]
mag = _np.array(mag, dtype=float)
mag -= _np.mean(mag, axis=1)[:, None]
mag = _np.abs(mag)
dtime = _np.arange(0, mag.shape[1]) * TuneShiftParams.TIME_REV
idx = _np.argsort(curr)
curr = curr[idx]
mag = mag[idx, :]
dtime, curr = _np.meshgrid(dtime, curr)
dtime, curr, mag = dtime.T, curr.T, mag.T
fig = _mplt.figure(figsize=(8, 6))
gs = _mgs.GridSpec(1, 1)
ax = fig.add_subplot(gs[0, 0])
ax.pcolormesh(curr, dtime * 1e3, mag)
ax.set_ylabel('Time [ms]')
ax.set_xlabel('Current [mA]')
ax.set_title(title)
if fname:
fig.savefig(fname, format='png', dpi=300)
return fig
|
mit
|
ujjwalkarn/DataSciencePython
|
Logistic-Regression/classifier_corrected.py
|
1
|
3736
|
#https://www.kaggle.com/c/amazon-employee-access-challenge/forums/t/4797/starter-code-in-python-with-scikit-learn-auc-885
""" Amazon Access Challenge Starter Code
These files provide some starter code using
the scikit-learn library. It provides some examples on how
to design a simple algorithm, including pre-processing,
training a logistic regression classifier on the data,
assess its performance through cross-validation and some
pointers on where to go next.
Paul Duan <[email protected]>
"""
from __future__ import division
import numpy as np
from sklearn import (metrics, cross_validation, linear_model, preprocessing)
SEED = 42 # always use a seed for randomized procedures
def load_data(filename, use_labels=True):
"""
Load data from CSV files and return them as numpy arrays
The use_labels parameter indicates whether one should
read the first column (containing class labels). If false,
return all 0s.
"""
# load column 1 to 8 (ignore last one)
data = np.loadtxt(open("data/" + filename), delimiter=',',
usecols=range(1, 9), skiprows=1)
if use_labels:
labels = np.loadtxt(open("data/" + filename), delimiter=',',
usecols=[0], skiprows=1)
else:
labels = np.zeros(data.shape[0])
return labels, data
def save_results(predictions, filename):
"""Given a vector of predictions, save results in CSV format."""
with open(filename, 'w') as f:
f.write("id,ACTION\n")
for i, pred in enumerate(predictions):
f.write("%d,%f\n" % (i + 1, pred))
def main():
"""
Fit models and make predictions.
We'll use one-hot encoding to transform our categorical features
into binary features.
y and X will be numpy array objects.
"""
model = linear_model.LogisticRegression(C=3) # the classifier we'll use
# === load data in memory === #
print "loading data"
y, X = load_data('train.csv')
y_test, X_test = load_data('test.csv', use_labels=False)
# === one-hot encoding === #
# we want to encode the category IDs encountered both in
# the training and the test set, so we fit the encoder on both
encoder = preprocessing.OneHotEncoder()
encoder.fit(np.vstack((X, X_test)))
X = encoder.transform(X) # Returns a sparse matrix (see numpy.sparse)
X_test = encoder.transform(X_test)
# if you want to create new features, you'll need to compute them
# before the encoding, and append them to your dataset after
# === training & metrics === #
mean_auc = 0.0
n = 10 # repeat the CV procedure 10 times to get more precise results
for i in range(n):
# for each iteration, randomly hold out 20% of the data as CV set
X_train, X_cv, y_train, y_cv = cross_validation.train_test_split(
X, y, test_size=.20, random_state=i*SEED)
# if you want to perform feature selection / hyperparameter
# optimization, this is where you want to do it
# train model and make predictions
model.fit(X_train, y_train)
preds = model.predict_proba(X_cv)[:, 1]
# compute AUC metric for this CV fold
fpr, tpr, thresholds = metrics.roc_curve(y_cv, preds)
roc_auc = metrics.auc(fpr, tpr)
print "AUC (fold %d/%d): %f" % (i + 1, n, roc_auc)
mean_auc += roc_auc
print "Mean AUC: %f" % (mean_auc/n)
# === Predictions === #
# When making predictions, retrain the model on the whole training set
model.fit(X, y)
preds = model.predict_proba(X_test)[:, 1]
filename = raw_input("Enter name for submission file: ")
save_results(preds, filename + ".csv")
if __name__ == '__main__':
main()
|
mit
|
stylianos-kampakis/scikit-learn
|
examples/ensemble/plot_voting_decision_regions.py
|
230
|
2386
|
"""
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
|
bsd-3-clause
|
linebp/pandas
|
pandas/tests/frame/test_operators.py
|
7
|
43594
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import operator
import pytest
from numpy import nan, random
import numpy as np
from pandas.compat import lrange
from pandas import compat
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
import pandas.core.common as com
import pandas.io.formats.printing as printing
import pandas as pd
from pandas.util.testing import (assert_numpy_array_equal,
assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import (TestData, _check_mixed_float,
_check_mixed_int)
class TestDataFrameOperators(TestData):
def test_operators(self):
garbage = random.random(4)
colSeries = Series(garbage, index=np.array(self.frame.columns))
idSum = self.frame + self.frame
seriesSum = self.frame + colSeries
for col, series in compat.iteritems(idSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] * 2
if not np.isnan(val):
assert val == origVal
else:
assert np.isnan(origVal)
for col, series in compat.iteritems(seriesSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] + colSeries[col]
if not np.isnan(val):
assert val == origVal
else:
assert np.isnan(origVal)
added = self.frame2 + self.frame2
expected = self.frame2 * 2
assert_frame_equal(added, expected)
df = DataFrame({'a': ['a', None, 'b']})
assert_frame_equal(df + df, DataFrame({'a': ['aa', np.nan, 'bb']}))
# Test for issue #10181
for dtype in ('float', 'int64'):
frames = [
DataFrame(dtype=dtype),
DataFrame(columns=['A'], dtype=dtype),
DataFrame(index=[0], dtype=dtype),
]
for df in frames:
assert (df + df).equals(df)
assert_frame_equal(df + df, df)
def test_ops_np_scalar(self):
vals, xs = np.random.rand(5, 3), [nan, 7, -23, 2.718, -3.14, np.inf]
f = lambda x: DataFrame(x, index=list('ABCDE'),
columns=['jim', 'joe', 'jolie'])
df = f(vals)
for x in xs:
assert_frame_equal(df / np.array(x), f(vals / x))
assert_frame_equal(np.array(x) * df, f(vals * x))
assert_frame_equal(df + np.array(x), f(vals + x))
assert_frame_equal(np.array(x) - df, f(x - vals))
def test_operators_boolean(self):
# GH 5808
# empty frames, non-mixed dtype
result = DataFrame(index=[1]) & DataFrame(index=[1])
assert_frame_equal(result, DataFrame(index=[1]))
result = DataFrame(index=[1]) | DataFrame(index=[1])
assert_frame_equal(result, DataFrame(index=[1]))
result = DataFrame(index=[1]) & DataFrame(index=[1, 2])
assert_frame_equal(result, DataFrame(index=[1, 2]))
result = DataFrame(index=[1], columns=['A']) & DataFrame(
index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(index=[1], columns=['A']))
result = DataFrame(True, index=[1], columns=['A']) & DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(True, index=[1], columns=['A']))
result = DataFrame(True, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(True, index=[1], columns=['A']))
# boolean ops
result = DataFrame(1, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(1, index=[1], columns=['A']))
def f():
DataFrame(1.0, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
pytest.raises(TypeError, f)
def f():
DataFrame('foo', index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
pytest.raises(TypeError, f)
def test_operators_none_as_na(self):
df = DataFrame({"col1": [2, 5.0, 123, None],
"col2": [1, 2, 3, 4]}, dtype=object)
ops = [operator.add, operator.sub, operator.mul, operator.truediv]
# since filling converts dtypes from object, changed expected to be
# object
for op in ops:
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isnull(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isnull(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
assert_frame_equal(result, expected, check_dtype=False)
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
pytest.raises(TypeError, lambda: x == y)
pytest.raises(TypeError, lambda: x != y)
pytest.raises(TypeError, lambda: x >= y)
pytest.raises(TypeError, lambda: x > y)
pytest.raises(TypeError, lambda: x < y)
pytest.raises(TypeError, lambda: x <= y)
# GH4968
# invalid date/int comparisons
df = DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])
df['dates'] = date_range('20010101', periods=len(df))
df2 = df.copy()
df2['dates'] = df['a']
check(df, df2)
df = DataFrame(np.random.randint(10, size=(10, 2)), columns=['a', 'b'])
df2 = DataFrame({'a': date_range('20010101', periods=len(
df)), 'b': date_range('20100101', periods=len(df))})
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
df = DataFrame({'dates1': date_range('20010101', periods=10),
'dates2': date_range('20010102', periods=10),
'intcol': np.random.randint(1000000000, size=10),
'floatcol': np.random.randn(10),
'stringcol': list(tm.rands(10))})
df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(df, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), df)
assert_frame_equal(result, expected)
# nats
expected = left_f(df, Timestamp('nat'))
result = right_f(Timestamp('nat'), df)
assert_frame_equal(result, expected)
def test_modulo(self):
# GH3590, modulo as ints
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong as the integer portion is coerced to float
# ###
expected = DataFrame({'first': Series([0, 0, 0, 0], dtype='float64'),
'second': Series([np.nan, np.nan, np.nan, 0])})
result = p % p
assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatement
with np.errstate(all='ignore'):
arr = p.values % p.values
result2 = DataFrame(arr, index=p.index,
columns=p.columns, dtype='float64')
result2.iloc[0:3, 1] = np.nan
assert_frame_equal(result2, expected)
result = p % 0
expected = DataFrame(np.nan, index=p.index, columns=p.columns)
assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatement
with np.errstate(all='ignore'):
arr = p.values.astype('float64') % 0
result2 = DataFrame(arr, index=p.index, columns=p.columns)
assert_frame_equal(result2, expected)
# not commutative with series
p = DataFrame(np.random.randn(10, 5))
s = p[0]
res = s % p
res2 = p % s
assert not np.array_equal(res.fillna(0), res2.fillna(0))
def test_div(self):
# integer div, but deal with the 0's (GH 9144)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p / p
expected = DataFrame({'first': Series([1.0, 1.0, 1.0, 1.0]),
'second': Series([nan, nan, nan, 1])})
assert_frame_equal(result, expected)
with np.errstate(all='ignore'):
arr = p.values.astype('float') / p.values
result2 = DataFrame(arr, index=p.index,
columns=p.columns)
assert_frame_equal(result2, expected)
result = p / 0
expected = DataFrame(np.inf, index=p.index, columns=p.columns)
expected.iloc[0:3, 1] = nan
assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatement
with np.errstate(all='ignore'):
arr = p.values.astype('float64') / 0
result2 = DataFrame(arr, index=p.index,
columns=p.columns)
assert_frame_equal(result2, expected)
p = DataFrame(np.random.randn(10, 5))
s = p[0]
res = s / p
res2 = p / s
assert not np.array_equal(res.fillna(0), res2.fillna(0))
def test_logical_operators(self):
def _check_bin_op(op):
result = op(df1, df2)
expected = DataFrame(op(df1.values, df2.values), index=df1.index,
columns=df1.columns)
assert result.values.dtype == np.bool_
assert_frame_equal(result, expected)
def _check_unary_op(op):
result = op(df1)
expected = DataFrame(op(df1.values), index=df1.index,
columns=df1.columns)
assert result.values.dtype == np.bool_
assert_frame_equal(result, expected)
df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': False, 'b': False, 'c': True,
'd': False, 'e': False},
'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}}
df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'd': {'a': False, 'b': False, 'c': False,
'd': True, 'e': False},
'e': {'a': False, 'b': False, 'c': False,
'd': False, 'e': True}}
df1 = DataFrame(df1)
df2 = DataFrame(df2)
_check_bin_op(operator.and_)
_check_bin_op(operator.or_)
_check_bin_op(operator.xor)
# operator.neg is deprecated in numpy >= 1.9
_check_unary_op(operator.inv)
def test_logical_typeerror(self):
if not compat.PY3:
pytest.raises(TypeError, self.frame.__eq__, 'foo')
pytest.raises(TypeError, self.frame.__lt__, 'foo')
pytest.raises(TypeError, self.frame.__gt__, 'foo')
pytest.raises(TypeError, self.frame.__ne__, 'foo')
else:
pytest.skip('test_logical_typeerror not tested on PY3')
def test_logical_with_nas(self):
d = DataFrame({'a': [np.nan, False], 'b': [True, True]})
# GH4947
# bool comparisons should return bool
result = d['a'] | d['b']
expected = Series([False, True])
assert_series_equal(result, expected)
# GH4604, automatic casting here
result = d['a'].fillna(False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
result = d['a'].fillna(False, downcast=False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
def test_neg(self):
# what to do?
assert_frame_equal(-self.frame, -1 * self.frame)
def test_invert(self):
assert_frame_equal(-(self.frame < 0), ~(self.frame < 0))
def test_arith_flex_frame(self):
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']
if not compat.PY3:
aliases = {}
else:
aliases = {'div': 'truediv'}
for op in ops:
try:
alias = aliases.get(op, op)
f = getattr(operator, alias)
result = getattr(self.frame, op)(2 * self.frame)
exp = f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype=dict(C=None))
# vs mix int
if op in ['add', 'sub', 'mul']:
result = getattr(self.mixed_int, op)(2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# no overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B='uint64', C=None)
elif op in ['add', 'mul']:
dtype = dict(C=None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype=dtype)
# rops
r_f = lambda x, y: f(y, x)
result = getattr(self.frame, 'r' + op)(2 * self.frame)
exp = r_f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(
2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype=dict(C=None))
result = getattr(self.intframe, op)(2 * self.intframe)
exp = f(self.intframe, 2 * self.intframe)
assert_frame_equal(result, exp)
# vs mix int
if op in ['add', 'sub', 'mul']:
result = getattr(self.mixed_int, op)(
2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# no overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B='uint64', C=None)
elif op in ['add', 'mul']:
dtype = dict(C=None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype=dtype)
except:
printing.pprint_thing("Failing operation %r" % op)
raise
# ndim >= 3
ndim_5 = np.ones(self.frame.shape + (3, 4, 5))
msg = "Unable to coerce to Series/DataFrame"
with tm.assert_raises_regex(ValueError, msg):
f(self.frame, ndim_5)
with tm.assert_raises_regex(ValueError, msg):
getattr(self.frame, op)(ndim_5)
# res_add = self.frame.add(self.frame)
# res_sub = self.frame.sub(self.frame)
# res_mul = self.frame.mul(self.frame)
# res_div = self.frame.div(2 * self.frame)
# assert_frame_equal(res_add, self.frame + self.frame)
# assert_frame_equal(res_sub, self.frame - self.frame)
# assert_frame_equal(res_mul, self.frame * self.frame)
# assert_frame_equal(res_div, self.frame / (2 * self.frame))
const_add = self.frame.add(1)
assert_frame_equal(const_add, self.frame + 1)
# corner cases
result = self.frame.add(self.frame[:0])
assert_frame_equal(result, self.frame * np.nan)
result = self.frame[:0].add(self.frame)
assert_frame_equal(result, self.frame * np.nan)
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.iloc[0], fill_value=3)
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.iloc[0], axis='index', fill_value=3)
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product([list('abc'),
['one', 'two', 'three'],
[1, 2, 3]],
names=['first', 'second', 'third'])
df = DataFrame(np.arange(27 * 3).reshape(27, 3),
index=index,
columns=['value1', 'value2', 'value3']).sort_index()
idx = pd.IndexSlice
for op in ['add', 'sub', 'mul', 'div', 'truediv']:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level='third', axis=0)
expected = pd.concat([opa(df.loc[idx[:, :, i], :], v)
for i, v in x.iteritems()]).sort_index()
assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ['two', 'three'])
result = getattr(df, op)(x, level='second', axis=0)
expected = (pd.concat([opa(df.loc[idx[:, i], :], v)
for i, v in x.iteritems()])
.reindex_like(df).sort_index())
assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([['A', 'B'], ['a', 'b']])
df = DataFrame(np.ones((2, 4), dtype='int64'), columns=midx)
s = pd.Series({'a': 1, 'b': 2})
df2 = df.copy()
df2.columns.names = ['lvl0', 'lvl1']
s2 = s.copy()
s2.index.name = 'lvl1'
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level='lvl1')
res6 = df2.mul(s2, axis=1, level='lvl1')
exp = DataFrame(np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype='int64'),
columns=midx)
for res in [res1, res2]:
assert_frame_equal(res, exp)
exp.columns.names = ['lvl0', 'lvl1']
for res in [res3, res4, res5, res6]:
assert_frame_equal(res, exp)
def test_arith_mixed(self):
left = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 2, 3]})
result = left + left
expected = DataFrame({'A': ['aa', 'bb', 'cc'],
'B': [2, 4, 6]})
assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
self._test_op(df, operator.add)
self._test_op(df, operator.sub)
self._test_op(df, operator.mul)
self._test_op(df, operator.truediv)
self._test_op(df, operator.floordiv)
self._test_op(df, operator.pow)
self._test_op(df, lambda x, y: y + x)
self._test_op(df, lambda x, y: y - x)
self._test_op(df, lambda x, y: y * x)
self._test_op(df, lambda x, y: y / x)
self._test_op(df, lambda x, y: y ** x)
self._test_op(df, lambda x, y: x + y)
self._test_op(df, lambda x, y: x - y)
self._test_op(df, lambda x, y: x * y)
self._test_op(df, lambda x, y: x / y)
self._test_op(df, lambda x, y: x ** y)
@staticmethod
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
assert_series_equal(result[col], op(df[col], 1))
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = DataFrame(data)
other = DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
assert_frame_equal(f(other.values), o(df, other.values))
# scalar
assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
assert_frame_equal(f(np.nan), o(df, np.nan))
with tm.assert_raises_regex(ValueError, msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
assert_frame_equal(col_eq, df == Series(col_ser))
assert_frame_equal(col_eq, -col_ne)
assert_frame_equal(idx_eq, -idx_ne)
assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
assert_frame_equal(col_eq, df.eq(list(col_ser)))
assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))
assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
assert_frame_equal(col_gt, df > Series(col_ser))
assert_frame_equal(col_gt, -col_le)
assert_frame_equal(idx_gt, -idx_le)
assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
assert_frame_equal(col_ge, df >= Series(col_ser))
assert_frame_equal(col_ge, -col_lt)
assert_frame_equal(idx_ge, -idx_lt)
assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = Series(np.random.randn(5))
col_ser = Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = DataFrame({'a': arr})
df2 = DataFrame({'a': arr2})
rs = df.gt(df2)
assert not rs.values.any()
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = DataFrame({'a': arr3})
rs = df3.gt(2j)
assert not rs.values.any()
# corner, dtype=object
df1 = DataFrame({'col': ['foo', np.nan, 'bar']})
df2 = DataFrame({'col': ['foo', datetime.now(), 'bar']})
result = df1.ne(df2)
exp = DataFrame({'col': [False, True, False]})
assert_frame_equal(result, exp)
def test_return_dtypes_bool_op_costant(self):
# GH15077
df = DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
# not empty DataFrame
for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
result = getattr(df, op)(const).get_dtype_counts()
tm.assert_series_equal(result, Series([2], ['bool']))
# empty DataFrame
empty = df.iloc[:0]
for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
result = getattr(empty, op)(const).get_dtype_counts()
tm.assert_series_equal(result, Series([2], ['bool']))
def test_dti_tz_convert_to_utc(self):
base = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz='UTC')
idx1 = base.tz_convert('Asia/Tokyo')[:2]
idx2 = base.tz_convert('US/Eastern')[1:]
df1 = DataFrame({'A': [1, 2]}, index=idx1)
df2 = DataFrame({'A': [1, 1]}, index=idx2)
exp = DataFrame({'A': [np.nan, 3, np.nan]}, index=base)
assert_frame_equal(df1 + df2, exp)
def test_arith_flex_series(self):
df = self.simple
row = df.xs('a')
col = df['two']
# after arithmetic refactor, add truediv here
ops = ['add', 'sub', 'mul', 'mod']
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
assert_frame_equal(f(row), op(df, row))
assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
assert_frame_equal(df.div(row), df / row)
assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH7325
df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype='int64')
expected = DataFrame([[nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis='index')
assert_frame_equal(result, expected)
df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype='float64')
expected = DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis='index')
assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = self.simple
val1 = df.xs('a').values
added = DataFrame(df.values + val1, index=df.index, columns=df.columns)
assert_frame_equal(df + val1, added)
added = DataFrame((df.values.T + val1).T,
index=df.index, columns=df.columns)
assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df['two'])
added = DataFrame(df.values + val2, index=df.index, columns=df.columns)
assert_frame_equal(df + val2, added)
added = DataFrame((df.values.T + val2).T, index=df.index,
columns=df.columns)
assert_frame_equal(df.add(val2, axis='index'), added)
val3 = np.random.rand(*df.shape)
added = DataFrame(df.values + val3, index=df.index, columns=df.columns)
assert_frame_equal(df.add(val3), added)
def test_combineFrame(self):
frame_copy = self.frame.reindex(self.frame.index[::2])
del frame_copy['D']
frame_copy['C'][:5] = nan
added = self.frame + frame_copy
indexer = added['A'].valid().index
exp = (self.frame['A'] * 2).copy()
tm.assert_series_equal(added['A'].valid(), exp.loc[indexer])
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added['A'], exp.loc[added['A'].index])
assert np.isnan(added['C'].reindex(frame_copy.index)[:5]).all()
# assert(False)
assert np.isnan(added['D']).all()
self_added = self.frame + self.frame
tm.assert_index_equal(self_added.index, self.frame.index)
added_rev = frame_copy + self.frame
assert np.isnan(added['D']).all()
assert np.isnan(added_rev['D']).all()
# corner cases
# empty
plus_empty = self.frame + self.empty
assert np.isnan(plus_empty.values).all()
empty_plus = self.empty + self.frame
assert np.isnan(empty_plus.values).all()
empty_empty = self.empty + self.empty
assert empty_empty.empty
# out of order
reverse = self.frame.reindex(columns=self.frame.columns[::-1])
assert_frame_equal(reverse + self.frame, self.frame * 2)
# mix vs float64, upcast
added = self.frame + self.mixed_float
_check_mixed_float(added, dtype='float64')
added = self.mixed_float + self.frame
_check_mixed_float(added, dtype='float64')
# mix vs mix
added = self.mixed_float + self.mixed_float2
_check_mixed_float(added, dtype=dict(C=None))
added = self.mixed_float2 + self.mixed_float
_check_mixed_float(added, dtype=dict(C=None))
# with int
added = self.frame + self.mixed_int
_check_mixed_float(added, dtype='float64')
def test_combineSeries(self):
# Series
series = self.frame.xs(self.frame.index[0])
added = self.frame + series
for key, s in compat.iteritems(added):
assert_series_equal(s, self.frame[key] + series[key])
larger_series = series.to_dict()
larger_series['E'] = 1
larger_series = Series(larger_series)
larger_added = self.frame + larger_series
for key, s in compat.iteritems(self.frame):
assert_series_equal(larger_added[key], s + series[key])
assert 'E' in larger_added
assert np.isnan(larger_added['E']).all()
# vs mix (upcast) as needed
added = self.mixed_float + series
_check_mixed_float(added, dtype='float64')
added = self.mixed_float + series.astype('float32')
_check_mixed_float(added, dtype=dict(C=None))
added = self.mixed_float + series.astype('float16')
_check_mixed_float(added, dtype=dict(C=None))
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
# added = self.mixed_int + (100*series).astype('int64')
# _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
# 'int64', D = 'int64'))
# added = self.mixed_int + (100*series).astype('int32')
# _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
# 'int32', D = 'int64'))
# TimeSeries
ts = self.tsframe['A']
# 10890
# we no longer allow auto timeseries broadcasting
# and require explict broadcasting
added = self.tsframe.add(ts, axis='index')
for key, col in compat.iteritems(self.tsframe):
result = col + ts
assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
if col.name == ts.name:
assert result.name == 'A'
else:
assert result.name is None
smaller_frame = self.tsframe[:-5]
smaller_added = smaller_frame.add(ts, axis='index')
tm.assert_index_equal(smaller_added.index, self.tsframe.index)
smaller_ts = ts[:-5]
smaller_added2 = self.tsframe.add(smaller_ts, axis='index')
assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = self.tsframe.add(ts[:0], axis='index')
expected = DataFrame(np.nan, index=self.tsframe.index,
columns=self.tsframe.columns)
assert_frame_equal(result, expected)
# Frame is all-nan
result = self.tsframe[:0].add(ts, axis='index')
expected = DataFrame(np.nan, index=self.tsframe.index,
columns=self.tsframe.columns)
assert_frame_equal(result, expected)
# empty but with non-empty index
frame = self.tsframe[:1].reindex(columns=[])
result = frame.mul(ts, axis='index')
assert len(result) == len(ts)
def test_combineFunc(self):
result = self.frame * 2
tm.assert_numpy_array_equal(result.values, self.frame.values * 2)
# vs mix
result = self.mixed_float * 2
for c, s in compat.iteritems(result):
tm.assert_numpy_array_equal(
s.values, self.mixed_float[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
result = self.empty * 2
assert result.index is self.empty.index
assert len(result.columns) == 0
def test_comparisons(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = self.simple.xs('a')
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
tm.assert_numpy_array_equal(result.values,
func(df1.values, df2.values))
with tm.assert_raises_regex(ValueError,
'Wrong number of dimensions'):
func(df1, ndim_5)
result2 = func(self.simple, row)
tm.assert_numpy_array_equal(result2.values,
func(self.simple.values, row.values))
result3 = func(self.frame, 0)
tm.assert_numpy_array_equal(result3.values,
func(self.frame.values, 0))
with tm.assert_raises_regex(ValueError,
'Can only compare identically'
'-labeled DataFrame'):
func(self.simple, self.simple[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_comparison_protected_from_errstate(self):
missing_df = tm.makeDataFrame()
missing_df.iloc[0]['A'] = np.nan
with np.errstate(invalid='ignore'):
expected = missing_df.values < 0
with np.errstate(invalid='raise'):
result = (missing_df < 0).values
tm.assert_numpy_array_equal(result, expected)
def test_string_comparison(self):
df = DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
assert_frame_equal(df[mask_a], df.loc[1:1, :])
assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
assert_frame_equal(df[mask_b], df.loc[0:0, :])
assert_frame_equal(df[-mask_b], df.loc[1:1, :])
def test_float_none_comparison(self):
df = DataFrame(np.random.randn(8, 3), index=lrange(8),
columns=['A', 'B', 'C'])
pytest.raises(TypeError, df.__eq__, None)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3, 2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2, 2])
b_c = b_r.T
l = (2, 2, 2)
tup = tuple(l)
# gt
expected = DataFrame([[False, False], [False, True], [True, True]])
result = df > b
assert_frame_equal(result, expected)
result = df.values > b
assert_numpy_array_equal(result, expected.values)
result = df > l
assert_frame_equal(result, expected)
result = df > tup
assert_frame_equal(result, expected)
result = df > b_r
assert_frame_equal(result, expected)
result = df.values > b_r
assert_numpy_array_equal(result, expected.values)
pytest.raises(ValueError, df.__gt__, b_c)
pytest.raises(ValueError, df.values.__gt__, b_c)
# ==
expected = DataFrame([[False, False], [True, False], [False, False]])
result = df == b
assert_frame_equal(result, expected)
result = df == l
assert_frame_equal(result, expected)
result = df == tup
assert_frame_equal(result, expected)
result = df == b_r
assert_frame_equal(result, expected)
result = df.values == b_r
assert_numpy_array_equal(result, expected.values)
pytest.raises(ValueError, lambda: df == b_c)
assert not np.array_equal(df.values, b_c)
# with alignment
df = DataFrame(np.arange(6).reshape((3, 2)),
columns=list('AB'), index=list('abc'))
expected.index = df.index
expected.columns = df.columns
result = df == l
assert_frame_equal(result, expected)
result = df == tup
assert_frame_equal(result, expected)
# not shape compatible
pytest.raises(ValueError, lambda: df == (2, 2))
pytest.raises(ValueError, lambda: df == [2, 2])
def test_combine_generic(self):
df1 = self.frame
df2 = self.frame.loc[self.frame.index[:-5], ['A', 'B', 'C']]
combined = df1.combine(df2, np.add)
combined2 = df2.combine(df1, np.add)
assert combined['D'].isnull().all()
assert combined2['D'].isnull().all()
chunk = combined.loc[combined.index[:-5], ['A', 'B', 'C']]
chunk2 = combined2.loc[combined2.index[:-5], ['A', 'B', 'C']]
exp = self.frame.loc[self.frame.index[:-5],
['A', 'B', 'C']].reindex_like(chunk) * 2
assert_frame_equal(chunk, exp)
assert_frame_equal(chunk2, exp)
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list('abcdefg')
X_orig = DataFrame(np.arange(10 * len(columns))
.reshape(-1, len(columns)),
columns=columns, index=range(10))
Z = 100 * X_orig.iloc[:, 1:-1].copy()
block1 = list('bedcf')
subs = list('bcdef')
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0, 5, size=10).reshape(-1, 5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
assert_series_equal(s, s2)
assert_series_equal(s_orig + 1, s)
assert s is s2
assert s._data is s2._data
df = df_orig.copy()
df2 = df
df += 1
assert_frame_equal(df, df2)
assert_frame_equal(df_orig + 1, df)
assert df is df2
assert df._data is df2._data
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
assert_series_equal(s, s2)
assert_series_equal(s_orig + 1.5, s)
df = df_orig.copy()
df2 = df
df += 1.5
assert_frame_equal(df, df2)
assert_frame_equal(df_orig + 1.5, df)
assert df is df2
assert df._data is df2._data
# mixed dtype
arr = np.random.randint(0, 10, size=5)
df_orig = DataFrame({'A': arr.copy(), 'B': 'foo'})
df = df_orig.copy()
df2 = df
df['A'] += 1
expected = DataFrame({'A': arr.copy() + 1, 'B': 'foo'})
assert_frame_equal(df, expected)
assert_frame_equal(df2, expected)
assert df._data is df2._data
df = df_orig.copy()
df2 = df
df['A'] += 1.5
expected = DataFrame({'A': arr.copy() + 1.5, 'B': 'foo'})
assert_frame_equal(df, expected)
assert_frame_equal(df2, expected)
assert df._data is df2._data
def test_alignment_non_pandas(self):
index = ['A', 'B', 'C']
columns = ['X', 'Y', 'Z']
df = pd.DataFrame(np.random.randn(3, 3), index=index, columns=columns)
align = pd.core.ops._align_method_FRAME
for val in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype=np.int64)]:
tm.assert_series_equal(align(df, val, 'index'),
Series([1, 2, 3], index=df.index))
tm.assert_series_equal(align(df, val, 'columns'),
Series([1, 2, 3], index=df.columns))
# length mismatch
msg = 'Unable to coerce to Series, length must be 3: given 2'
for val in [[1, 2], (1, 2), np.array([1, 2])]:
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'index')
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'columns')
val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(align(df, val, 'index'),
DataFrame(val, index=df.index,
columns=df.columns))
tm.assert_frame_equal(align(df, val, 'columns'),
DataFrame(val, index=df.index,
columns=df.columns))
# shape mismatch
msg = 'Unable to coerce to DataFrame, shape must be'
val = np.array([[1, 2, 3], [4, 5, 6]])
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'index')
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'columns')
val = np.zeros((3, 3, 3))
with pytest.raises(ValueError):
align(df, val, 'index')
with pytest.raises(ValueError):
align(df, val, 'columns')
|
bsd-3-clause
|
Avikalp7/image-aesthetics-learning
|
src/Learning/selected_feature.py
|
1
|
15701
|
"""
Constructing the final feature vectors with selected features from the initial 56
along with RAG cut features.
"""
from __future__ import division
from scipy import misc
import numpy as np
from skimage import color
from skimage import data
import os
import PIL
from PIL import Image
from pywt import wavedec2
from sklearn.cluster import KMeans
from disjoint_sets import Graph
# from disjoint_sets import countIslands
global IH, IS, IV, path, image_sizes
global LH, HL, HH, S1, S2, S3
global _f10, _f11, _f12, _f13, _f14, _f15, _f16, _f17, _f18
# Parameter K for Kmeans is set here
kmeans_cluster_num = 12
# Some images (b/w) give zero values on S1, S2, S3 - leading to division by zero
def check_zero(epsilon = 50):
global S1, S2, S3
if S1 == 0:
S1 = epsilon
if S2 == 0:
S2 = epsilon
if S3 == 0:
S3 = epsilon
# Prerequiste for features _f10,11,12, calculating LL, LH, HL, HH for 3-level 2-D Discrete Wavelet Transform
def prereq_f10_f11_f12(i):
global S1, S2, S3, LH, HL, HH
HL = LH = HH = [0]*3
coeffs = wavedec2(IH[i], 'db1', level = 3)
LL, (HL[2], LH[2], HH[2]), (HL[1], LH[1], HH[1]), (HL[0], LH[0], HH[0]) = coeffs
S1 = sum(sum(abs(LH[0]))) + sum(sum(abs(HL[0]))) + sum(sum(abs(HH[0])))
S2 = sum(sum(abs(LH[1]))) + sum(sum(abs(HL[1]))) + sum(sum(abs(HH[1])))
S3 = sum(sum(abs(LH[2]))) + sum(sum(abs(HL[2]))) + sum(sum(abs(HH[2])))
# print('S1, S2, S3',S1, S2, S3)
check_zero()
# Prerequiste for features _f10,11,12, calculating LL, LH, HL, HH for 3-level 2-D Discrete Wavelet Transform
def prereq_f13_f14_f15(i):
global S1, S2, S3, LL, HL, HH
HL = LH = HH = [0]*3
coeffs = wavedec2(IS[i], 'db1', level = 3)
LL, (HL[2], HL[2], HH[2]), (HL[1], HL[1], HH[1]), (HL[0], HL[0], HH[0]) = coeffs
S1 = sum(sum(abs(LH[0]))) + sum(sum(abs(HL[0]))) + sum(sum(abs(HH[0])))
S2 = sum(sum(abs(LH[1]))) + sum(sum(abs(HL[1]))) + sum(sum(abs(HH[1])))
S3 = sum(sum(abs(LH[2]))) + sum(sum(abs(HL[2]))) + sum(sum(abs(HH[2])))
check_zero()
# Prerequiste for features _f10,11,12, calculating LL, LH, HL, HH for 3-level 2-D Discrete Wavelet Transform
def prereq_f16_f17_f18(i):
global S1, S2, S3, LL, HL, HH
HL = LH = HH = [0]*3
coeffs = wavedec2(IV[i], 'db1', level = 3)
LL, (HL[2], HL[2], HH[2]), (HL[1], HL[1], HH[1]), (HL[0], HL[0], HH[0]) = coeffs
S1 = sum(sum(abs(LH[0]))) + sum(sum(abs(HL[0]))) + sum(sum(abs(HH[0])))
S2 = sum(sum(abs(LH[1]))) + sum(sum(abs(HL[1]))) + sum(sum(abs(HH[1])))
S3 = sum(sum(abs(LH[2]))) + sum(sum(abs(HL[2]))) + sum(sum(abs(HH[2])))
check_zero()
def segmentation(graph):
row = len(graph)
col = len(graph[0])
g = Graph(row, col, graph)
dic = {}
for cluster_num in range(kmeans_cluster_num):
# print ("Number of points in cluster number", cluster_num, "is: ")
dic[cluster_num] = g.countIslands(cluster_num)
# print('Len pathces = ', len(dic[cluster_num][1]), ' Len lis = ', len(dic[cluster_num][0]))
# print('i, BLOB_COUNT = ', i, blob_count)
# print('Ending K-Means')
return dic
def segments(dic):
all_lengths = []
all_patches = []
for key in dic:
all_lengths += dic[key][0]
all_patches += dic[key][1]
# print (len(all_lengths), len(all_patches))
all_lengths = np.array(all_lengths)
all_patches = np.array(all_patches)
max_5_indices = all_lengths.argsort()[-5:][::-1] # np.array
return all_patches[max_5_indices]
# Exposure of Light
def f1(i):
return sum(sum(IV[i]))/(IV.shape[0] * IV.shape[1])
# Average Saturation / Saturation Indicator
def f3(i):
return sum(sum(IS[i]))/(IS.shape[0] * IS.shape[1])
# Average Hue / Hue Indicator
def f4(i):
return sum(sum(IH[i]))/(IH.shape[0] * IH.shape[1])
# Average hue in inner rectangle for rule of thirds inference
def f5(i):
X = IH[i].shape[0]
Y = IH[i].shape[1]
return sum(sum(IH[i, int(X/3) : int(2*X/3), int(Y/3) : int(2*Y/3)])) * 9 / (X * Y)
# Average saturation in inner rectangle for rule of thirds inference
def f6(i):
X = IS[i].shape[0]
Y = IS[i].shape[1]
return sum(sum(IS[i, int(X/3) : int(2*X/3), int(Y/3) : int(2*Y/3)])) * (9/(X * Y))
# Average V in inner rectangle for rule of thirds inference
def f7(i):
X = IV[i].shape[0]
Y = IV[i].shape[1]
return sum(sum(IV[i, int(X/3) : int(2*X/3), int(Y/3) : int(2*Y/3)])) * (9/(X * Y))
# Spacial Smoothness of first level of Hue property
def f10(i):
global _f10
prereq_f10_f11_f12(i)
_f10 = (1/S1)*(sum(sum(HH[0])) + sum(sum(HL[0])) + sum(sum(LH[0])))
return _f10
# Spacial Smoothness of second level of Hue property
def f11(i):
global _f11
_f11 = (1/S2)*(sum(sum(HH[1])) + sum(sum(HL[1])) + sum(sum(LH[1])))
return _f11
# Spacial Smoothness of third level of Hue property
def f12(i):
global _f12
_f12 = (1/S3)*(sum(sum(HH[2])) + sum(sum(HL[2])) + sum(sum(LH[2])))
return _f12
# Spacial Smoothness of first level of Saturation property
def f13(i):
global _f13
prereq_f13_f14_f15(i)
_f13 = (1/S1)*(sum(sum(HH[0])) + sum(sum(HL[0])) + sum(sum(LH[0])))
return _f13
# Spacial Smoothness of second level of Saturation property
def f14(i):
global _f14
_f14 = (1/S2)*(sum(sum(HH[1])) + sum(sum(HL[1])) + sum(sum(LH[1])))
return _f14
# Spacial Smoothness of third level of Saturation property
def f15(i):
global _f15
_f15 = (1/S3)*(sum(sum(HH[2])) + sum(sum(HL[2])) + sum(sum(LH[2])))
return _f15
# Spacial Smoothness of first level of Intensity property
def f16(i):
global _f16
prereq_f16_f17_f18(i)
_f16 = (1/S1)*(sum(sum(HH[0])) + sum(sum(HL[0])) + sum(sum(LH[0])))
return _f16
# Spacial Smoothness of second level of Intensity property
def f17(i):
global _f17
_f17 = (1/S2)*(sum(sum(HH[1])) + sum(sum(HL[1])) + sum(sum(LH[1])))
return _f17
# Spacial Smoothness of third level of Intensity property
def f18(i):
global _f18
_f18 = (1/S3)*(sum(sum(HH[2])) + sum(sum(HL[2])) + sum(sum(LH[2])))
return _f18
# Sum of the average wavelet coefficients over all three frequency levels of Hue property
def f19(i):
return _f10 + _f11 + _f12
# Sum of the average wavelet coefficients over all three frequency levels of Saturation property
def f20(i):
return _f13 + _f14 + _f15
# Sum of the average wavelet coefficients over all three frequency levels of Intensity property
def f21(i):
return _f16 + _f17 + _f18
# Image Size feature
def f22(i):
return image_sizes[i][0] + image_sizes[i][1]
# Aspect Ratio Feature
def f23(i):
return image_sizes[i][0] / float(image_sizes[i][1])
# Number of patches > XY/100 pixels, how many disconnected significantly large regions are present
def f24(i, s):
count = 0
for si in s:
if len(si) >= 164:
count += 1
return count
# Number of different color blobs / color complexity of image
def f25(i, dic):
count = 0
for key in dic:
max_length = max(dic[key][0])
if max_length > 1000:
count += 1
return count
# Average Hue value for patch 1
def f26(i, s):
si = s[0]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IH[i][j][k]
return sum_/len(si)
# Average Hue value for patch 2
def f27(i, s):
si = s[1]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IH[i][j][k]
return sum_/len(si)
# Average Hue value for patch 3
def f28(i, s):
si = s[2]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IH[i][j][k]
return sum_/len(si)
# Average Hue value for patch 4
def f29(i, s):
si = s[3]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IH[i][j][k]
return sum_/len(si)
# Average Hue value for patch 5
def f30(i, s):
si = s[4]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IH[i][j][k]
return sum_/len(si)
# Average Saturation value for patch 1
def f31(i, s):
si = s[0]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IS[i][j][k]
return sum_/len(si)
# Average Saturation value for patch 2
def f32(i, s):
si = s[1]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IS[i][j][k]
return sum_/len(si)
# Average Saturation value for patch 3
def f33(i, s):
si = s[2]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IS[i][j][k]
return sum_/len(si)
# Average Saturation value for patch 4
def f34(i, s):
si = s[3]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IS[i][j][k]
return sum_/len(si)
# Average Saturation value for patch 5
def f35(i, s):
si = s[4]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IS[i][j][k]
return sum_/len(si)
# Average Intensity value for patch 1
def f36(i, s):
si = s[0]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IV[i][j][k]
return sum_/len(si)
# Average Intensity value for patch 2
def f37(i, s):
si = s[1]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IV[i][j][k]
return sum_/len(si)
# Average Intensity value for patch 3
def f38(i, s):
si = s[2]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IV[i][j][k]
return sum_/len(si)
# Average Intensity value for patch 4
def f39(i, s):
si = s[3]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IV[i][j][k]
return sum_/len(si)
# Average Intensity value for patch 5
def f40(i, s):
si = s[4]
sum_ = 0
for pixel in si:
j, k = pixel
sum_ += IV[i][j][k]
return sum_/len(si)
# Measure of largest patch
def f41(i):
si = s[0]
return len(si)/16384
def f42(i):
si = s[1]
return len(si)/16384
def f43(i):
si = s[2]
return len(si)/16384
def f44(i):
si = s[3]
return len(si)/16384
def f45(i):
si = s[4]
return len(si)/16384
def f46(i, h):
sumh = 0
for j in range(5):
for k in range(5):
sumh += abs(h[j] - h[k])
return sumh
def f47(i, h):
sumh = 0
for j in range(5):
for k in range(5):
t = abs(h[j] - h[k])
if t < 0.5:
sumh += 360*t
else:
sumh += 360 - 360*t
return sumh
def f48_pre(i, s):
centers = []
for si in s:
point_sum_x = 0
point_sum_y = 0
for point in si:
x, y = point
point_sum_x += x
point_sum_y += y
x = point_sum_x/len(si)
y = point_sum_y/len(si)
centers.append([x,y])
return centers
def f48(i, s):
centers = f48_pre(i, s)
n = 0
c = centers[n]
if c[0] < 43:
r = 10
elif c[1] < 86:
r = 20
else:
r = 30
if c[1] < 43:
cc = 1
elif c[1] < 86:
cc = 2
else:
cc = 3
return r + cc
def f49(i, s):
centers = f48_pre(i, s)
n = 1
c = centers[n]
if c[0] < 43:
r = 10
elif c[1] < 86:
r = 20
else:
r = 30
if c[1] < 43:
cc = 1
elif c[1] < 86:
cc = 2
else:
cc = 3
return r + cc
def f50(i, s):
centers = f48_pre(i, s)
n = 2
c = centers[n]
if c[0] < 43:
r = 10
elif c[1] < 86:
r = 20
else:
r = 30
if c[1] < 43:
cc = 1
elif c[1] < 86:
cc = 2
else:
cc = 3
return r + cc
def f51(i, s):
centers = f48_pre(i, s)
n = 3
c = centers[n]
if c[0] < 43:
r = 10
elif c[1] < 86:
r = 20
else:
r = 30
if c[1] < 43:
cc = 1
elif c[1] < 86:
cc = 2
else:
cc = 3
return r + cc
def f52(i, s):
centers = f48_pre(i, s)
n = 4
c = centers[n]
if c[0] < 43:
r = 10
elif c[1] < 86:
r = 20
else:
r = 30
if c[1] < 43:
cc = 1
elif c[1] < 86:
cc = 2
else:
cc = 3
return r + cc
# DoF feature for Hue property
def f53(i):
prereq_f10_f11_f12(i)
v1 = v2 = v3 = 0
sumv1 = sum(sum(LH[2]))
if sumv1 > 0:
v1 = sum(sum(abs(LH[2][4:12,4:12]))) / sumv1
sumv2 = sum(sum(HL[2]))
if sumv2 > 0:
v2 = sum(sum(abs(HL[2][4:12,4:12]))) / sumv2
sumv3 = sum(sum(HH[2]))
if sumv3 > 0:
v3 = sum(sum(abs(HH[2][4:12,4:12]))) / sumv3
if sumv1 == 0:
v1 = (v2 + v3)/2
if sumv2 == 0:
v2 = (v1 + v3)/2
if sumv3 == 0:
v3 = (v1 + v2)/2
return v1 + v2 + v3
# DoF feature for Saturation property
def f54(i):
prereq_f13_f14_f15(i)
v1 = v2 = v3 = 0
sumv1 = sum(sum(LH[2]))
if sumv1 > 0:
v1 = sum(sum(abs(LH[2][4:12,4:12]))) / sumv1
sumv2 = sum(sum(HL[2]))
if sumv2 > 0:
v2 = sum(sum(abs(HL[2][4:12,4:12]))) / sumv2
sumv3 = sum(sum(HH[2]))
if sumv3 > 0:
v3 = sum(sum(abs(HH[2][4:12,4:12]))) / sumv3
if sumv1 == 0:
v1 = (v2 + v3)/2
if sumv2 == 0:
v2 = (v1 + v3)/2
if sumv3 == 0:
v3 = (v1 + v2)/2
return v1 + v2 + v3
# DoF feature for Intensity property
def f55(i):
prereq_f16_f17_f18(i)
v1 = v2 = v3 = 0
sumv1 = sum(sum(LH[2]))
if sumv1 > 0:
v1 = sum(sum(abs(LH[2][4:12,4:12]))) / sumv1
sumv2 = sum(sum(HL[2]))
if sumv2 > 0:
v2 = sum(sum(abs(HL[2][4:12,4:12]))) / sumv2
sumv3 = sum(sum(HH[2]))
if sumv3 > 0:
v3 = sum(sum(abs(HH[2][4:12,4:12]))) / sumv3
if sumv1 == 0:
v1 = (v2 + v3)/2
if sumv2 == 0:
v2 = (v1 + v3)/2
if sumv3 == 0:
v3 = (v1 + v2)/2
return v1 + v2 + v3
path = "/home/avikalp/semester6/SIGIR/photonet_dataset/images/"
if __name__ == '__main__':
# graph = [[1, 1, 0, 0, 0],
# [0, 1, 0, 0, 2],
# [1, 0, 0, 2, 2],
# [0, 0, 0, 0, 0],
# [1, 0, 1, 0, 1]]
# row = len(graph)
# col = len(graph[0])
# g= Graph(row, col, graph)
# k = 0
# print ("Number of islands is :",)
# print(g.countIslands(k))
# exit()
subset_indices = list(np.load('good_indices.npy'))
image_sizes = list(np.load('image_sizes_40p.npy'))
print('Loading IHSV...')
IH = np.load('IH_40p.npy')
IS = np.load('IS_40p.npy')
IV = np.load('IV_40p.npy')
print('IV','IHSV loaded.')
print('Loading LUV...')
LUV = np.load('LUV_40p.npy')
print('LUV loaded.')
feature_vec = []
for i, index in enumerate(subset_indices):
print (i)
feature_vec.append([])
feature_vec[i].append(f1(i))
# feature_vec[i].append(f2(i))
# feature_vec[i].append(f3(i))
# feature_vec[i].append(f4(i))
# feature_vec[i].append(f5(i))
feature_vec[i].append(f6(i))
# feature_vec[i].append(f7(i))
# feature_vec[i].append(f8(i))
# feature_vec[i].append(f9(i))
# feature_vec[i].append(f10(i))
# feature_vec[i].append(f11(i))
# feature_vec[i].append(f12(i))
# feature_vec[i].append(f13(i))
# feature_vec[i].append(f14(i))
feature_vec[i].append(f15(i))
# feature_vec[i].append(f16(i))
feature_vec[i].append(f17(i))
# feature_vec[i].append(f18(i))
# feature_vec[i].append(f19(i))
feature_vec[i].append(f20(i))
feature_vec[i].append(f21(i))
feature_vec[i].append(f22(i))
feature_vec[i].append(f23(i))
# print('Starting K-Means')
# kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
# kmeans.labels_
# kmeans.predict([[0, 0], [4, 4]])
_LUV = LUV[i].reshape((16384, 3))
kmeans = KMeans(n_clusters=kmeans_cluster_num, random_state=0).fit(_LUV)
# centers = kmeans.cluster_centers_
graph = kmeans.labels_
graph = graph.reshape((128,128))
dic = segmentation(graph)
s = list(segments(dic))
H = []
for k in range(5):
sumh = 0
for i1, j1 in s[k]:
sumh += IH[i][i1][j1]
H.append(sumh)
# feature_vec[i].append(f24(i, s))
feature_vec[i].append(f25(i, dic))
# feature_vec[i].append(f26(i, s))
# feature_vec[i].append(f27(i, s))
feature_vec[i].append(f28(i, s))
# feature_vec[i].append(f29(i, s))
# feature_vec[i].append(f30(i, s))
feature_vec[i].append(f31(i, s))
# feature_vec[i].append(f32(i, s))
# feature_vec[i].append(f33(i, s))
# feature_vec[i].append(f34(i, s))
# feature_vec[i].append(f35(i, s))
# feature_vec[i].append(f36(i, s))
# feature_vec[i].append(f37(i, s))
# feature_vec[i].append(f38(i, s))
# feature_vec[i].append(f39(i, s))
# feature_vec[i].append(f40(i, s))
# feature_vec[i].append(f41(i))
# feature_vec[i].append(f42(i))
feature_vec[i].append(f43(i))
# feature_vec[i].append(f44(i))
# feature_vec[i].append(f45(i))
# feature_vec[i].append(f46(i, H))
# feature_vec[i].append(f47(i, H))
# feature_vec[i].append(f48(i, s))
# feature_vec[i].append(f49(i, s))
# feature_vec[i].append(f50(i, s))
# feature_vec[i].append(f51(i, s))
# feature_vec[i].append(f52(i, s))
# feature_vec[i].append(f53(i))
feature_vec[i].append(f54(i))
# feature_vec[i].append(f55(i))
# feature_vec[i].append(f56(i))
# -------------------------- #
# Do something
#
#
# del feature_vec[i][:]
np.save('../../data/selected_feature_vecs.npy', feature_vec)
|
mit
|
xguse/bokeh
|
bokeh/compat/mplexporter/renderers/base.py
|
44
|
14355
|
import warnings
import itertools
from contextlib import contextmanager
import numpy as np
from matplotlib import transforms
from .. import utils
from .. import _py3k_compat as py3k
class Renderer(object):
@staticmethod
def ax_zoomable(ax):
return bool(ax and ax.get_navigate())
@staticmethod
def ax_has_xgrid(ax):
return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())
@staticmethod
def ax_has_ygrid(ax):
return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())
@property
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
@property
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
@property
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
@contextmanager
def draw_figure(self, fig, props):
if hasattr(self, "_current_fig") and self._current_fig is not None:
warnings.warn("figure embedded in figure: something is wrong")
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
yield
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
@contextmanager
def draw_axes(self, ax, props):
if hasattr(self, "_current_ax") and self._current_ax is not None:
warnings.warn("axes embedded in axes: something is wrong")
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
yield
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
@contextmanager
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
yield
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
# Following are the functions which should be overloaded in subclasses
def open_figure(self, fig, props):
"""
Begin commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The Figure which will contain the ensuing axes and elements
props : dictionary
The dictionary of figure properties
"""
pass
def close_figure(self, fig):
"""
Finish commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The figure which is finished being drawn.
"""
pass
def open_axes(self, ax, props):
"""
Begin commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which will contain the ensuing axes and elements
props : dictionary
The dictionary of axes properties
"""
pass
def close_axes(self, ax):
"""
Finish commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which is finished being drawn.
"""
pass
def open_legend(self, legend, props):
"""
Beging commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend that will contain the ensuing elements
props : dictionary
The dictionary of legend properties
"""
pass
def close_legend(self, legend):
"""
Finish commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend which is finished being drawn
"""
pass
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ['M'] + (data.shape[0] - 1) * ['L']
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates,
pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
@staticmethod
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if not path_transforms:
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if np.size(edgecolor) == 0:
edgecolor = ['none']
facecolor = styles['facecolor']
if np.size(facecolor) == 0:
facecolor = ['none']
elements = [paths, path_transforms, offsets,
edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms,
offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {"edgecolor": utils.color_to_hex(ec),
"facecolor": utils.color_to_hex(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles['alpha'],
"zorder": styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates,
pathcodes=pathcodes, style=style, offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style['markerpath']
pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',
'facecolor', 'zorder',
'edgewidth'])
pathstyle['dasharray'] = "10,0"
for vertex in data:
self.draw_path(data=vertices, coordinates="points",
pathcodes=pathcodes, style=pathstyle,
offset=vertex, offset_coordinates=coordinates,
mplobj=mplobj)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"""
Draw text on the image.
Parameters
----------
text : string
The text to draw
position : tuple
The (x, y) position of the text
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the text.
text_type : string or None
if specified, a type of text such as "xlabel", "ylabel", "title"
mplobj : matplotlib object
the matplotlib plot element which generated this text
"""
raise NotImplementedError()
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
"""
Draw an image.
Parameters
----------
imdata : string
base64 encoded png representation of the image
extent : list
the axes extent of the image: [xmin, xmax, ymin, ymax]
coordinates: string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the image
mplobj : matplotlib object
the matplotlib plot object which generated this image
"""
raise NotImplementedError()
|
bsd-3-clause
|
wchan/tensorflow
|
tensorflow/examples/skflow/iris_run_config.py
|
3
|
1575
|
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, metrics, cross_validation
from tensorflow.contrib import skflow
# Load dataset.
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target,
test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. num_cores and gpu_memory_fraction
run_config = skflow.estimators.RunConfig(num_cores=3, gpu_memory_fraction=0.6)
# Build 3 layer DNN with 10, 20, 10 units respecitvely.
classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=200, config=run_config)
# Fit and predict.
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
|
apache-2.0
|
ibis-project/ibis
|
ibis/backends/pyspark/tests/test_basic.py
|
1
|
7855
|
import pandas as pd
import pandas.testing as tm
import pytest
from pytest import param
import ibis
import ibis.common.exceptions as com
from ibis.backends.pyspark.compiler import _can_be_replaced_by_column_name
pytestmark = pytest.mark.pyspark
def test_basic(client):
table = client.table('basic_table')
result = table.compile().toPandas()
expected = pd.DataFrame({'id': range(0, 10), 'str_col': 'value'})
tm.assert_frame_equal(result, expected)
def test_projection(client):
table = client.table('basic_table')
result1 = table.mutate(v=table['id']).compile().toPandas()
expected1 = pd.DataFrame(
{'id': range(0, 10), 'str_col': 'value', 'v': range(0, 10)}
)
result2 = (
table.mutate(v=table['id'])
.mutate(v2=table['id'])
.mutate(id=table['id'] * 2)
.compile()
.toPandas()
)
expected2 = pd.DataFrame(
{
'id': range(0, 20, 2),
'str_col': 'value',
'v': range(0, 10),
'v2': range(0, 10),
}
)
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)
def test_aggregation_col(client):
table = client.table('basic_table')
result = table['id'].count().execute()
assert result == table.compile().count()
def test_aggregation(client):
import pyspark.sql.functions as F
table = client.table('basic_table')
result = table.aggregate(table['id'].max()).compile()
expected = table.compile().agg(F.max('id').alias('max'))
tm.assert_frame_equal(result.toPandas(), expected.toPandas())
def test_groupby(client):
import pyspark.sql.functions as F
table = client.table('basic_table')
result = table.groupby('id').aggregate(table['id'].max()).compile()
expected = table.compile().groupby('id').agg(F.max('id').alias('max'))
tm.assert_frame_equal(result.toPandas(), expected.toPandas())
def test_window(client):
import pyspark.sql.functions as F
from pyspark.sql.window import Window
table = client.table('basic_table')
w = ibis.window()
result = table.mutate(
grouped_demeaned=table['id'] - table['id'].mean().over(w)
).compile()
spark_window = Window.partitionBy()
spark_table = table.compile()
expected = spark_table.withColumn(
'grouped_demeaned',
spark_table['id'] - F.mean(spark_table['id']).over(spark_window),
)
tm.assert_frame_equal(result.toPandas(), expected.toPandas())
def test_greatest(client):
table = client.table('basic_table')
result = table.mutate(greatest=ibis.greatest(table.id)).compile()
df = table.compile()
expected = table.compile().withColumn('greatest', df.id)
tm.assert_frame_equal(result.toPandas(), expected.toPandas())
def test_selection(client):
table = client.table('basic_table')
table = table.mutate(id2=table['id'] * 2)
result1 = table[['id']].compile()
result2 = table[['id', 'id2']].compile()
result3 = table[[table, (table.id + 1).name('plus1')]].compile()
result4 = table[[(table.id + 1).name('plus1'), table]].compile()
df = table.compile()
tm.assert_frame_equal(result1.toPandas(), df[['id']].toPandas())
tm.assert_frame_equal(result2.toPandas(), df[['id', 'id2']].toPandas())
tm.assert_frame_equal(
result3.toPandas(),
df[[df.columns]].withColumn('plus1', df.id + 1).toPandas(),
)
tm.assert_frame_equal(
result4.toPandas(),
df.withColumn('plus1', df.id + 1)[['plus1', *df.columns]].toPandas(),
)
def test_join(client):
table = client.table('basic_table')
result = table.join(table, ['id', 'str_col']).compile()
spark_table = table.compile()
expected = spark_table.join(spark_table, ['id', 'str_col'])
tm.assert_frame_equal(result.toPandas(), expected.toPandas())
@pytest.mark.parametrize(
('filter_fn', 'expected_fn'),
[
param(lambda t: t.filter(t.id < 5), lambda df: df[df.id < 5]),
param(lambda t: t.filter(t.id != 5), lambda df: df[df.id != 5]),
param(
lambda t: t.filter([t.id < 5, t.str_col == 'na']),
lambda df: df[df.id < 5][df.str_col == 'na'],
),
param(
lambda t: t.filter((t.id > 3) & (t.id < 11)),
lambda df: df[(df.id > 3) & (df.id < 11)],
),
param(
lambda t: t.filter((t.id == 3) | (t.id == 5)),
lambda df: df[(df.id == 3) | (df.id == 5)],
),
],
)
def test_filter(client, filter_fn, expected_fn):
table = client.table('basic_table')
result = filter_fn(table).compile()
df = table.compile()
expected = expected_fn(df)
tm.assert_frame_equal(result.toPandas(), expected.toPandas())
def test_cast(client):
table = client.table('basic_table')
result = table.mutate(id_string=table.id.cast('string')).compile()
df = table.compile()
df = df.withColumn('id_string', df.id.cast('string'))
tm.assert_frame_equal(result.toPandas(), df.toPandas())
@pytest.mark.parametrize(
'fn',
[
param(lambda t: t.date_str.to_timestamp('yyyy-MM-dd')),
param(lambda t: t.date_str.to_timestamp('yyyy-MM-dd', timezone='UTC')),
],
)
def test_string_to_timestamp(client, fn):
import pyspark.sql.functions as F
table = client.table('date_table')
result = table.mutate(date=fn(table)).compile()
df = table.compile()
expected = df.withColumn(
'date', F.to_date(df.date_str, 'yyyy-MM-dd').alias('date')
)
expected_pdf = expected.toPandas()
expected_pdf['date'] = pd.to_datetime(expected_pdf['date'])
tm.assert_frame_equal(result.toPandas(), expected_pdf)
def test_string_to_timestamp_tz_error(client):
table = client.table('date_table')
with pytest.raises(com.UnsupportedArgumentError):
table.mutate(
date=table.date_str.to_timestamp('yyyy-MM-dd', 'non-utc-timezone')
).compile()
def test_alias_after_select(client):
# Regression test for issue 2136
table = client.table('basic_table')
table = table[['id']]
table = table.mutate(id2=table['id'])
result = table.compile().toPandas()
tm.assert_series_equal(result['id'], result['id2'], check_names=False)
@pytest.mark.parametrize(
('selection_fn', 'selection_idx', 'expected'),
[
# selected column id is selections[0], OK to replace since
# id == t['id'] (straightforward column projection)
(lambda t: t[['id']], 0, True),
# new column v is selections[1], cannot be replaced since it does
# not exist in the root table
(lambda t: t.mutate(v=t['id']), 1, False),
# new column id is selections[0], cannot be replaced since
# new id != t['id']
(lambda t: t.mutate(id=t['str_col']), 0, False),
# new column id is selections[0], OK to replace since
# new id == t['id'] (mutation is no-op)
(lambda t: t.mutate(id=t['id']), 0, True),
# new column id is selections[0], cannot be replaced since
# new id != t['id']
(lambda t: t.mutate(id=t['id'] + 1), 0, False),
# new column id is selections[0], OK to replace since
# new id == t['id'] (relabel is a no-op)
(lambda t: t.relabel({'id': 'id'}), 0, True),
# new column id2 is selections[0], cannot be replaced since
# id2 does not exist in the table
(lambda t: t.relabel({'id': 'id2'}), 0, False),
],
)
def test_can_be_replaced_by_column_name(selection_fn, selection_idx, expected):
table = ibis.table([('id', 'double'), ('str_col', 'string')])
table = selection_fn(table)
selection_to_test = table.op().selections[selection_idx]
result = _can_be_replaced_by_column_name(
selection_to_test, table.op().table
)
assert result == expected
|
apache-2.0
|
jkarnows/scikit-learn
|
examples/mixture/plot_gmm.py
|
248
|
2817
|
"""
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
|
bsd-3-clause
|
newemailjdm/scipy
|
scipy/signal/ltisys.py
|
38
|
76123
|
"""
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
from __future__ import division, print_function, absolute_import
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
# Aug 2013: Juan Luis Cano
# Rewrote abcd_normalize.
# Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr
# Added pole placement
# Mar 2015: Clancy Rowley
# Rewrote lsim
# May 2015: Felix Berkenkamp
# Split lti class into subclasses
#
import warnings
import numpy as np
#np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7
#use scipy's qr until this is solved
from scipy.linalg import qr as s_qr
import numpy
from numpy import (r_, eye, real, atleast_1d, atleast_2d, poly,
squeeze, asarray, product, zeros, array,
dot, transpose, ones, zeros_like, linspace, nan_to_num)
import copy
from scipy import integrate, interpolate, linalg
from scipy._lib.six import xrange
from .filter_design import tf2zpk, zpk2tf, normalize, freqs
__all__ = ['tf2ss', 'ss2tf', 'abcd_normalize', 'zpk2ss', 'ss2zpk', 'lti',
'TransferFunction', 'ZerosPolesGain', 'StateSpace', 'lsim',
'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode',
'freqresp', 'place_poles']
def tf2ss(num, den):
"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the numerator and denominator polynomials.
The denominator needs to be at least as long as the numerator.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if M > K:
msg = "Improper transfer function. `num` is longer than `den`."
raise ValueError(msg)
if M == 0 or K == 0: # Null system
return (array([], float), array([], float), array([], float),
array([], float))
# pad numerator to have same number of columns has denominator
num = r_['-1', zeros((num.shape[0], K - M), num.dtype), num]
if num.shape[-1] > 0:
D = num[:, 0]
else:
D = array([], float)
if K == 1:
return array([], float), array([], float), array([], float), D
frow = -array([den[1:]])
A = r_[frow, eye(K - 2, K - 1)]
B = eye(K - 1, 1)
C = num[:, 1:] - num[:, 0] * den[1:]
return A, B, C, D
def _none_to_empty_2d(arg):
if arg is None:
return zeros((0, 0))
else:
return arg
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
def _shape_or_none(M):
if M is not None:
return M.shape
else:
return (None,) * 2
def _choice_not_none(*args):
for arg in args:
if arg is not None:
return arg
def _restore(M, shape):
if M.shape == (0, 0):
return zeros(shape)
else:
if M.shape != shape:
raise ValueError("The input arrays have incompatible shapes.")
return M
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are two-dimensional.
If enough information on the system is provided, that is, enough
properly-shaped arrays are passed to the function, the missing ones
are built from this information, ensuring the correct number of
rows and columns. Otherwise a ValueError is raised.
Parameters
----------
A, B, C, D : array_like, optional
State-space matrices. All of them are None (missing) by default.
Returns
-------
A, B, C, D : array
Properly shaped state-space matrices.
Raises
------
ValueError
If not enough information on the system was provided.
"""
A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D))
MA, NA = _shape_or_none(A)
MB, NB = _shape_or_none(B)
MC, NC = _shape_or_none(C)
MD, ND = _shape_or_none(D)
p = _choice_not_none(MA, MB, NC)
q = _choice_not_none(NB, ND)
r = _choice_not_none(MC, MD)
if p is None or q is None or r is None:
raise ValueError("Not enough information on the system.")
A, B, C, D = map(_none_to_empty_2d, (A, B, C, D))
A = _restore(A, (p, p))
B = _restore(B, (p, q))
C = _restore(C, (r, p))
D = _restore(D, (r, q))
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
"""State-space to transfer function.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
num : 2-D ndarray
Numerator(s) of the resulting transfer function(s). `num` has one row
for each of the system's outputs. Each row is a sequence representation
of the numerator polynomial.
den : 1-D ndarray
Denominator of the resulting transfer function(s). `den` is a sequence
representation of the denominator polynomial.
"""
# transfer function is C (sI - A)**(-1) B + D
A, B, C, D = map(asarray, (A, B, C, D))
# Check consistency and make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make MOSI from possibly MOMI system.
B = B[:, input:input + 1]
D = D[:, input:input + 1]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape, axis=0) == 0) and (product(C.shape, axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape, axis=0) == 0) and (product(A.shape, axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:, 0] + B[:, 0] + C[0, :] + D
num = numpy.zeros((nout, num_states + 1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k, :])
num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
"""
return tf2ss(*zpk2tf(z, p, k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A, B, C, D, input=input))
class lti(object):
"""
Linear Time Invariant system base class.
Parameters
----------
*system : arguments
The `lti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
Notes
-----
`lti` instances do not exist directly. Instead, `lti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies.
"""
def __new__(cls, *system):
"""Create an instance of the appropriate subclass."""
if cls is lti:
N = len(system)
if N == 2:
return super(lti, cls).__new__(TransferFunction)
elif N == 3:
return super(lti, cls).__new__(ZerosPolesGain)
elif N == 4:
return super(lti, cls).__new__(StateSpace)
else:
raise ValueError('Needs 2, 3 or 4 arguments.')
# __new__ was called from a subclass, let it call its own functions
return super(lti, cls).__new__(cls)
def __init__(self, *system):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
self.inputs = None
self.outputs = None
@property
def num(self):
"""Numerator of the `TransferFunction` system."""
return self.to_tf().num
@num.setter
def num(self, num):
obj = self.to_tf()
obj.num = num
source_class = type(self)
self._copy(source_class(obj))
@property
def den(self):
"""Denominator of the `TransferFunction` system."""
return self.to_tf().den
@den.setter
def den(self, den):
obj = self.to_tf()
obj.den = den
source_class = type(self)
self._copy(source_class(obj))
@property
def zeros(self):
"""Zeros of the `ZerosPolesGain` system."""
return self.to_zpk().zeros
@zeros.setter
def zeros(self, zeros):
obj = self.to_zpk()
obj.zeros = zeros
source_class = type(self)
self._copy(source_class(obj))
@property
def poles(self):
"""Poles of the `ZerosPolesGain` system."""
return self.to_zpk().poles
@poles.setter
def poles(self, poles):
obj = self.to_zpk()
obj.poles = poles
source_class = type(self)
self._copy(source_class(obj))
@property
def gain(self):
"""Gain of the `ZerosPolesGain` system."""
return self.to_zpk().gain
@gain.setter
def gain(self, gain):
obj = self.to_zpk()
obj.gain = gain
source_class = type(self)
self._copy(source_class(obj))
@property
def A(self):
"""A matrix of the `StateSpace` system."""
return self.to_ss().A
@A.setter
def A(self, A):
obj = self.to_ss()
obj.A = A
source_class = type(self)
self._copy(source_class(obj))
@property
def B(self):
"""B matrix of the `StateSpace` system."""
return self.to_ss().B
@B.setter
def B(self, B):
obj = self.to_ss()
obj.B = B
source_class = type(self)
self._copy(source_class(obj))
@property
def C(self):
"""C matrix of the `StateSpace` system."""
return self.to_ss().C
@C.setter
def C(self, C):
obj = self.to_ss()
obj.C = C
source_class = type(self)
self._copy(source_class(obj))
@property
def D(self):
"""D matrix of the `StateSpace` system."""
return self.to_ss().D
@D.setter
def D(self, D):
obj = self.to_ss()
obj.D = D
source_class = type(self)
self._copy(source_class(obj))
def impulse(self, X0=None, T=None, N=None):
"""
Return the impulse response of a continuous-time system.
See `scipy.signal.impulse` for details.
"""
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
"""
Return the step response of a continuous-time system.
See `scipy.signal.step` for details.
"""
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
"""
Return the response of a continuous-time system to input `U`.
See `scipy.signal.lsim` for details.
"""
return lsim(self, U, T, X0=X0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `scipy.signal.bode` for details.
Notes
-----
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([1], [1, 1])
>>> w, mag, phase = s1.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return bode(self, w=w, n=n)
def freqresp(self, w=None, n=10000):
"""
Calculate the frequency response of a continuous-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `scipy.signal.freqresp` for details.
"""
return freqresp(self, w=w, n=n)
class TransferFunction(lti):
"""Linear Time Invariant system class in transfer function form.
Represents the system as the transfer function
:math:`H(s)=\sum_i b[i] s^i / \sum_j a[j] s^i`, where :math:`a` are
elements of the numerator `num` and :math:`b` are the elements of the
denominator `den`.
Parameters
----------
*system : arguments
The `TransferFunction` class can be instantiated with 1 or 2 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies.
"""
def __new__(cls, *system):
"""Handle object conversion if input is an instance of lti."""
if len(system) == 1 and isinstance(system[0], lti):
return system[0].to_tf()
# No special conversion needed
return super(TransferFunction, cls).__new__(cls)
def __init__(self, *system):
"""Initialize the state space LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], lti):
return
super(TransferFunction, self).__init__(self, *system)
self._num = None
self._den = None
self.num, self.den = normalize(*system)
def __repr__(self):
"""Return representation of the system's transfer function"""
return '{0}(\n{1},\n{2}\n)'.format(
self.__class__.__name__,
repr(self.num),
repr(self.den),
)
@property
def num(self):
return self._num
@num.setter
def num(self, num):
self._num = atleast_1d(num)
# Update dimensions
if len(self.num.shape) > 1:
self.outputs, self.inputs = self.num.shape
else:
self.outputs = 1
self.inputs = 1
@property
def den(self):
return self._den
@den.setter
def den(self, den):
self._den = atleast_1d(den)
def _copy(self, system):
"""
Copy the parameters of another `TransferFunction` object
Parameters
----------
system : `TransferFunction`
The `StateSpace` system that is to be copied
"""
self.num = system.num
self.den = system.den
def to_tf(self):
"""
Return a copy of the current `TransferFunction` system.
Returns
-------
sys : instance of `TransferFunction`
The current system (copy)
"""
return copy.deepcopy(self)
def to_zpk(self):
"""
Convert system representation to `ZerosPolesGain`.
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*tf2zpk(self.num, self.den))
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*tf2ss(self.num, self.den))
class ZerosPolesGain(lti):
"""
Linear Time Invariant system class in zeros, poles, gain form.
Represents the system as the transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies.
"""
def __new__(cls, *system):
"""Handle object conversion if input is an instance of `lti`"""
if len(system) == 1 and isinstance(system[0], lti):
return system[0].to_zpk()
# No special conversion needed
return super(ZerosPolesGain, cls).__new__(cls)
def __init__(self, *system):
"""Initialize the zeros, poles, gain LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], lti):
return
super(ZerosPolesGain, self).__init__(self, *system)
self._zeros = None
self._poles = None
self._gain = None
self.zeros, self.poles, self.gain = system
def __repr__(self):
"""Return representation of the `ZerosPolesGain` system"""
return '{0}(\n{1},\n{2},\n{3}\n)'.format(
self.__class__.__name__,
repr(self.zeros),
repr(self.poles),
repr(self.gain),
)
@property
def zeros(self):
return self._zeros
@zeros.setter
def zeros(self, zeros):
self._zeros = atleast_1d(zeros)
# Update dimensions
if len(self.zeros.shape) > 1:
self.outputs, self.inputs = self.zeros.shape
else:
self.outputs = 1
self.inputs = 1
@property
def poles(self):
return self._poles
@poles.setter
def poles(self, poles):
self._poles = atleast_1d(poles)
@property
def gain(self):
return self._gain
@gain.setter
def gain(self, gain):
self._gain = gain
def _copy(self, system):
"""
Copy the parameters of another `ZerosPolesGain` system.
Parameters
----------
system : instance of `ZerosPolesGain`
The zeros, poles gain system that is to be copied
"""
self.poles = system.poles
self.zeros = system.zeros
self.gain = system.gain
def to_tf(self):
"""
Convert system representation to `TransferFunction`.
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain))
def to_zpk(self):
"""
Return a copy of the current 'ZerosPolesGain' system.
Returns
-------
sys : instance of `ZerosPolesGain`
The current system (copy)
"""
return copy.deepcopy(self)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain))
class StateSpace(lti):
"""
Linear Time Invariant system class in state-space form.
Represents the system as the first order differential equation
:math:`\dot{x} = A x + B u`.
Parameters
----------
*system : arguments
The `StateSpace` class can be instantiated with 1 or 4 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies.
"""
def __new__(cls, *system):
"""Handle object conversion if input is an instance of `lti`"""
if len(system) == 1 and isinstance(system[0], lti):
return system[0].to_ss()
# No special conversion needed
return super(StateSpace, cls).__new__(cls)
def __init__(self, *system):
"""Initialize the state space LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], lti):
return
super(StateSpace, self).__init__(self, *system)
self._A = None
self._B = None
self._C = None
self._D = None
self.A, self.B, self.C, self.D = abcd_normalize(*system)
def __repr__(self):
"""Return representation of the `StateSpace` system."""
return '{0}(\n{1},\n{2},\n{3},\n{4}\n)'.format(
self.__class__.__name__,
repr(self.A),
repr(self.B),
repr(self.C),
repr(self.D),
)
@property
def A(self):
return self._A
@A.setter
def A(self, A):
self._A = _atleast_2d_or_none(A)
@property
def B(self):
return self._B
@B.setter
def B(self, B):
self._B = _atleast_2d_or_none(B)
self.inputs = self.B.shape[-1]
@property
def C(self):
return self._C
@C.setter
def C(self, C):
self._C = _atleast_2d_or_none(C)
self.outputs = self.C.shape[0]
@property
def D(self):
return self._D
@D.setter
def D(self, D):
self._D = _atleast_2d_or_none(D)
def _copy(self, system):
"""
Copy the parameters of another `StateSpace` system.
Parameters
----------
system : instance of `StateSpace`
The state-space system that is to be copied
"""
self.A = system.A
self.B = system.B
self.C = system.C
self.D = system.D
def to_tf(self, **kwargs):
"""
Convert system representation to `TransferFunction`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D,
**kwargs))
def to_zpk(self, **kwargs):
"""
Convert system representation to `ZerosPolesGain`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D,
**kwargs))
def to_ss(self):
"""
Return a copy of the current `StateSpace` system.
Returns
-------
sys : instance of `StateSpace`
The current system (copy)
"""
return copy.deepcopy(self)
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
`odeint`. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses `scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for `scipy.integrate.odeint` for the full list of arguments.
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an exception; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1, 1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A, x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C, transpose(xout))
return T, squeeze(transpose(yout)), xout
def _cast_to_array_dtype(in1, in2):
"""Cast array to dtype of other array, while avoiding ComplexWarning.
Those can be raised when casting complex to real.
"""
if numpy.issubdtype(in2.dtype, numpy.float):
# dtype to cast to is not complex, so use .real
in1 = in1.real.astype(in2.dtype)
else:
in1 = in1.astype(in2.dtype)
return in1
def lsim(system, U, T, X0=None, interp=True):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U = 0 or None, a zero input is used.
T : array_like
The time steps at which the input is defined and at which the
output is desired. Must be nonnegative, increasing, and equally spaced.
X0 : array_like, optional
The initial conditions on the state vector (zero by default).
interp : bool, optional
Whether to use linear (True, the default) or zero-order-hold (False)
interpolation for the input array.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time evolution of the state vector.
Examples
--------
Simulate a double integrator y'' = u, with a constant input u = 1
>>> from scipy import signal
>>> system = signal.lti([[0., 1.], [0., 0.]], [[0.], [1.]], [[1., 0.]], 0.)
>>> t = np.linspace(0, 5)
>>> u = np.ones_like(t)
>>> tout, y, x = signal.lsim(system, u, t)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D))
n_states = A.shape[0]
n_inputs = B.shape[1]
n_steps = T.size
if X0 is None:
X0 = zeros(n_states, sys.A.dtype)
xout = zeros((n_steps, n_states), sys.A.dtype)
if T[0] == 0:
xout[0] = X0
elif T[0] > 0:
# step forward to initial time, with zero input
xout[0] = dot(X0, linalg.expm(transpose(A) * T[0]))
else:
raise ValueError("Initial time must be nonnegative")
no_input = (U is None
or (isinstance(U, (int, float)) and U == 0.)
or not np.any(U))
if n_steps == 1:
yout = squeeze(dot(xout, transpose(C)))
if not no_input:
yout += squeeze(dot(U, transpose(D)))
return T, squeeze(yout), squeeze(xout)
dt = T[1] - T[0]
if not np.allclose((T[1:] - T[:-1]) / dt, 1.0):
warnings.warn("Non-uniform timesteps are deprecated. Results may be "
"slow and/or inaccurate.", DeprecationWarning)
return lsim2(system, U, T, X0)
if no_input:
# Zero input: just use matrix exponential
# take transpose because state is a row vector
expAT_dt = linalg.expm(transpose(A) * dt)
for i in xrange(1, n_steps):
xout[i] = dot(xout[i-1], expAT_dt)
yout = squeeze(dot(xout, transpose(C)))
return T, squeeze(yout), squeeze(xout)
# Nonzero input
U = atleast_1d(U)
if U.ndim == 1:
U = U[:, np.newaxis]
if U.shape[0] != n_steps:
raise ValueError("U must have the same number of rows "
"as elements in T.")
if U.shape[1] != n_inputs:
raise ValueError("System does not define that many inputs.")
if not interp:
# Zero-order hold
# Algorithm: to integrate from time 0 to time dt, we solve
# xdot = A x + B u, x(0) = x0
# udot = 0, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 ] [ u0 ]
M = np.vstack([np.hstack([A * dt, B * dt]),
np.zeros((n_inputs, n_states + n_inputs))])
# transpose everything because the state and input are row vectors
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd = expMT[n_states:, :n_states]
for i in xrange(1, n_steps):
xout[i] = dot(xout[i-1], Ad) + dot(U[i-1], Bd)
else:
# Linear interpolation between steps
# Algorithm: to integrate from time 0 to time dt, with linear
# interpolation between inputs u(0) = u0 and u(dt) = u1, we solve
# xdot = A x + B u, x(0) = x0
# udot = (u1 - u0) / dt, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 I ] [ u0 ]
# [u1 - u0] [ 0 0 0 ] [u1 - u0]
M = np.vstack([np.hstack([A * dt, B * dt,
np.zeros((n_states, n_inputs))]),
np.hstack([np.zeros((n_inputs, n_states + n_inputs)),
np.identity(n_inputs)]),
np.zeros((n_inputs, n_states + 2 * n_inputs))])
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd1 = expMT[n_states+n_inputs:, :n_states]
Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1
for i in xrange(1, n_steps):
xout[i] = (dot(xout[i-1], Ad) + dot(U[i-1], Bd0) + dot(U[i], Bd1))
yout = (squeeze(dot(xout, transpose(C))) + squeeze(dot(U, transpose(D))))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : ndarray
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval.
# TODO: This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7 * tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
if X0 is None:
X = squeeze(sys.B)
else:
X = squeeze(sys.B + X0)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
_, h, _ = lsim(sys, 0., T, X, interp=False)
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
ic = B + X0
Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0, interp=False)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
kwargs : various types
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`. See the documentation for
`scipy.integrate.odeint` for information about these arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
def bode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is calculated
for every value in this array. If not given a reasonable set will be
calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([1], [1, 1])
>>> w, mag, phase = signal.bode(s1)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = freqresp(system, w=w, n=n)
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi
return w, mag, phase
def freqresp(system, w=None, n=10000):
"""Calculate the frequency response of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
H : 1D ndarray
Array of complex magnitude values
Examples
--------
# Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([], [1, 1, 1], [5])
# transfer function: H(s) = 5 / (s-1)^3
>>> w, H = signal.freqresp(s1)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if isinstance(system, lti):
sys = system.to_tf()
else:
sys = lti(*system).to_tf()
if sys.inputs != 1 or sys.outputs != 1:
raise ValueError("freqresp() requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
# In the call to freqs(), sys.num.ravel() is used because there are
# cases where sys.num is a 2-D array with a single row.
w, h = freqs(sys.num.ravel(), sys.den, worN=worN)
return w, h
# This class will be used by place_poles to return its results
# see http://code.activestate.com/recipes/52308/
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def _valid_inputs(A, B, poles, method, rtol, maxiter):
"""
Check the poles come in complex conjugage pairs
Check shapes of A, B and poles are compatible.
Check the method chosen is compatible with provided poles
Return update method to use and ordered poles
"""
poles = np.asarray(poles)
if poles.ndim > 1:
raise ValueError("Poles must be a 1D array like.")
# Will raise ValueError if poles do not come in complex conjugates pairs
poles = _order_complex_poles(poles)
if A.ndim > 2:
raise ValueError("A must be a 2D array/matrix.")
if B.ndim > 2:
raise ValueError("B must be a 2D array/matrix")
if A.shape[0] != A.shape[1]:
raise ValueError("A must be square")
if len(poles) > A.shape[0]:
raise ValueError("maximum number of poles is %d but you asked for %d" %
(A.shape[0], len(poles)))
if len(poles) < A.shape[0]:
raise ValueError("number of poles is %d but you should provide %d" %
(len(poles), A.shape[0]))
r = np.linalg.matrix_rank(B)
for p in poles:
if sum(p == poles) > r:
raise ValueError("at least one of the requested pole is repeated "
"more than rank(B) times")
# Choose update method
update_loop = _YT_loop
if method not in ('KNV0','YT'):
raise ValueError("The method keyword must be one of 'YT' or 'KNV0'")
if method == "KNV0":
update_loop = _KNV0_loop
if not all(np.isreal(poles)):
raise ValueError("Complex poles are not supported by KNV0")
if maxiter < 1:
raise ValueError("maxiter must be at least equal to 1")
# We do not check rtol <= 0 as the user can use a negative rtol to
# force maxiter iterations
if rtol > 1:
raise ValueError("rtol can not be greater than 1")
return update_loop, poles
def _order_complex_poles(poles):
"""
Check we have complex conjugates pairs and reorder P according to YT, ie
real_poles, complex_i, conjugate complex_i, ....
The lexicographic sort on the complex poles is added to help the user to
compare sets of poles.
"""
ordered_poles = np.sort(poles[np.isreal(poles)])
im_poles = []
for p in np.sort(poles[np.imag(poles) < 0]):
if np.conj(p) in poles:
im_poles.extend((p, np.conj(p)))
ordered_poles = np.hstack((ordered_poles, im_poles))
if poles.shape[0] != len(ordered_poles):
raise ValueError("Complex poles must come with their conjugates")
return ordered_poles
def _KNV0(B, ker_pole, transfer_matrix, j, poles):
"""
Algorithm "KNV0" Kautsky et Al. Robust pole
assignment in linear state feedback, Int journal of Control
1985, vol 41 p 1129->1155
http://la.epfl.ch/files/content/sites/la/files/
users/105941/public/KautskyNicholsDooren
"""
# Remove xj form the base
transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1)
# If we QR this matrix in full mode Q=Q0|Q1
# then Q1 will be a single column orthogonnal to
# Q0, that's what we are looking for !
# After merge of gh-4249 great speed improvements could be achieved
# using QR updates instead of full QR in the line below
# To debug with numpy qr uncomment the line below
# Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete")
Q, R = s_qr(transfer_matrix_not_j, mode="full")
mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T)
yj = np.dot(mat_ker_pj, Q[:, -1])
# If Q[:, -1] is "almost" orthogonal to ker_pole[j] its
# projection into ker_pole[j] will yield a vector
# close to 0. As we are looking for a vector in ker_pole[j]
# simply stick with transfer_matrix[:, j] (unless someone provides me with
# a better choice ?)
if not np.allclose(yj, 0):
xj = yj/np.linalg.norm(yj)
transfer_matrix[:, j] = xj
# KNV does not support complex poles, using YT technique the two lines
# below seem to work 9 out of 10 times but it is not reliable enough:
# transfer_matrix[:, j]=real(xj)
# transfer_matrix[:, j+1]=imag(xj)
# Add this at the beginning of this function if you wish to test
# complex support:
# if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])):
# return
# Problems arise when imag(xj)=>0 I have no idea on how to fix this
def _YT_real(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.1 page 19 related to real pairs
"""
# step 1 page 19
u = Q[:, -2, np.newaxis]
v = Q[:, -1, np.newaxis]
# step 2 page 19
m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) -
np.dot(v, u.T)), ker_pole[j])
# step 3 page 19
um, sm, vm = np.linalg.svd(m)
# mu1, mu2 two first columns of U => 2 first lines of U.T
mu1, mu2 = um.T[:2, :, np.newaxis]
# VM is V.T with numpy we want the first two lines of V.T
nu1, nu2 = vm[:2, :, np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
transfer_matrix_j_mo_transfer_matrix_j = np.vstack((
transfer_matrix[:, i, np.newaxis],
transfer_matrix[:, j, np.newaxis]))
if not np.allclose(sm[0], sm[1]):
ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1)
ker_pole_i_nu1 = np.dot(ker_pole[j], nu1)
ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1))
else:
ker_pole_ij = np.vstack((
np.hstack((ker_pole[i],
np.zeros(ker_pole[i].shape))),
np.hstack((np.zeros(ker_pole[j].shape),
ker_pole[j]))
))
mu_nu_matrix = np.vstack(
(np.hstack((mu1, mu2)), np.hstack((nu1, nu2)))
)
ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix)
transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_ij, 0):
transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij /
np.linalg.norm(transfer_matrix_ij))
transfer_matrix[:, i] = transfer_matrix_ij[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = transfer_matrix_ij[
transfer_matrix[:, i].shape[0]:, 0
]
else:
# As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to
# Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to
# ker_pole_mu_nu and iterate. As we are looking for a vector in
# Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help
# (that's a guess, not a claim !)
transfer_matrix[:, i] = ker_pole_mu_nu[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = ker_pole_mu_nu[
transfer_matrix[:, i].shape[0]:, 0
]
def _YT_complex(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.2 page 20 related to complex pairs
"""
# step 1 page 20
ur = np.sqrt(2)*Q[:, -2, np.newaxis]
ui = np.sqrt(2)*Q[:, -1, np.newaxis]
u = ur + 1j*ui
# step 2 page 20
ker_pole_ij = ker_pole[i]
m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) -
np.dot(np.conj(u), u.T)), ker_pole_ij)
# step 3 page 20
e_val, e_vec = np.linalg.eig(m)
# sort eigenvalues according to their module
e_val_idx = np.argsort(np.abs(e_val))
mu1 = e_vec[:, e_val_idx[-1], np.newaxis]
mu2 = e_vec[:, e_val_idx[-2], np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
# remember transfer_matrix_i has been split as
# transfer_matrix[i]=real(transfer_matrix_i) and
# transfer_matrix[j]=imag(transfer_matrix_i)
transfer_matrix_j_mo_transfer_matrix_j = (
transfer_matrix[:, i, np.newaxis] +
1j*transfer_matrix[:, j, np.newaxis]
)
if not np.allclose(np.abs(e_val[e_val_idx[-1]]),
np.abs(e_val[e_val_idx[-2]])):
ker_pole_mu = np.dot(ker_pole_ij, mu1)
else:
mu1_mu2_matrix = np.hstack((mu1, mu2))
ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix)
transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_i_j, 0):
transfer_matrix_i_j = (transfer_matrix_i_j /
np.linalg.norm(transfer_matrix_i_j))
transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0])
transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0])
else:
# same idea as in YT_real
transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0])
transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0])
def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Algorithm "YT" Tits, Yang. Globally Convergent
Algorithms for Robust Pole Assignment by State Feedback
http://drum.lib.umd.edu/handle/1903/5598
The poles P have to be sorted accordingly to section 6.2 page 20
"""
# The IEEE edition of the YT paper gives useful information on the
# optimal update order for the real poles in order to minimize the number
# of times we have to loop over all poles, see page 1442
nb_real = poles[np.isreal(poles)].shape[0]
# hnb => Half Nb Real
hnb = nb_real // 2
# Stick to the indices in the paper and then remove one to get numpy array
# index it is a bit easier to link the code to the paper this way even if it
# is not very clean. The paper is unclear about what should be done when
# there is only one real pole => use KNV0 on this real pole seem to work
if nb_real > 0:
#update the biggest real pole with the smallest one
update_order = [[nb_real], [1]]
else:
update_order = [[],[]]
r_comp = np.arange(nb_real+1, len(poles)+1, 2)
# step 1.a
r_p = np.arange(1, hnb+nb_real % 2)
update_order[0].extend(2*r_p)
update_order[1].extend(2*r_p+1)
# step 1.b
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 1.c
r_p = np.arange(1, hnb+1)
update_order[0].extend(2*r_p-1)
update_order[1].extend(2*r_p)
# step 1.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.a
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+j)
# step 2.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.c
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(hnb+1, nb_real+1):
idx_1 = i+j
if idx_1 > nb_real:
idx_1 = i+j-nb_real
update_order[0].append(i)
update_order[1].append(idx_1)
# step 2.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 3.a
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+hnb)
# step 3.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
update_order = np.array(update_order).T-1
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for i, j in update_order:
if i == j:
assert i == 0, "i!=0 for KNV call in YT"
assert np.isreal(poles[i]), "calling KNV on a complex pole"
_KNV0(B, ker_pole, transfer_matrix, i, poles)
else:
transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j),
axis=1)
# after merge of gh-4249 great speed improvements could be
# achieved using QR updates instead of full QR in the line below
#to debug with numpy qr uncomment the line below
#Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete")
Q, _ = s_qr(transfer_matrix_not_i_j, mode="full")
if np.isreal(poles[i]):
assert np.isreal(poles[j]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_real(ker_pole, Q, transfer_matrix, i, j)
else:
assert ~np.isreal(poles[i]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_complex(ker_pole, Q, transfer_matrix, i, j)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs(
(det_transfer_matrix -
det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Loop over all poles one by one and apply KNV method 0 algorithm
"""
# This method is useful only because we need to be able to call
# _KNV0 from YT without looping over all poles, otherwise it would
# have been fine to mix _KNV0_loop and _KNV0 in a single function
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for j in range(B.shape[0]):
_KNV0(B, ker_pole, transfer_matrix, j, poles)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30):
"""
Compute K such that eigenvalues (A - dot(B, K))=poles.
K is the gain matrix such as the plant described by the linear system
``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``,
as close as possible to those asked for in poles.
SISO, MISO and MIMO systems are supported.
Parameters
----------
A, B : ndarray
State-space representation of linear system ``AX + BU``.
poles : array_like
Desired real poles and/or complex conjugates poles.
Complex poles are only supported with ``method="YT"`` (default).
method: {'YT', 'KNV0'}, optional
Which method to choose to find the gain matrix K. One of:
- 'YT': Yang Tits
- 'KNV0': Kautsky, Nichols, Van Dooren update method 0
See References and Notes for details on the algorithms.
rtol: float, optional
After each iteration the determinant of the eigenvectors of
``A - B*K`` is compared to its previous value, when the relative
error between these two values becomes lower than `rtol` the algorithm
stops. Default is 1e-3.
maxiter: int, optional
Maximum number of iterations to compute the gain matrix.
Default is 30.
Returns
-------
full_state_feedback : Bunch object
full_state_feedback is composed of:
gain_matrix : 1-D ndarray
The closed loop matrix K such as the eigenvalues of ``A-BK``
are as close as possible to the requested poles.
computed_poles : 1-D ndarray
The poles corresponding to ``A-BK`` sorted as first the real
poles in increasing order, then the complex congugates in
lexicographic order.
requested_poles : 1-D ndarray
The poles the algorithm was asked to place sorted as above,
they may differ from what was achieved.
X : 2D ndarray
The transfer matrix such as ``X * diag(poles) = (A - B*K)*X``
(see Notes)
rtol : float
The relative tolerance achieved on ``det(X)`` (see Notes).
`rtol` will be NaN if the optimisation algorithms can not run,
i.e when ``B.shape[1] == 1``, or 0 when the solution is unique.
nb_iter : int
The number of iterations performed before converging.
`nb_iter` will be NaN if the optimisation algorithms can
not run, i.e when ``B.shape[1] == 1``, or 0 when the solution
is unique.
Notes
-----
The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et
al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer
matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses
rank-2 updates. This yields on average more robust solutions (see [2]_
pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV
does not in its original version. Only update method 0 proposed by KNV has
been implemented here, hence the name ``'KNV0'``.
KNV extended to complex poles is used in Matlab's ``place`` function, YT is
distributed under a non-free licence by Slicot under the name ``robpole``.
It is unclear and undocumented how KNV0 has been extended to complex poles
(Tits and Yang claim on page 14 of their paper that their method can not be
used to extend KNV to complex poles), therefore only YT supports them in
this implementation.
As the solution to the problem of pole placement is not unique for MIMO
systems, both methods start with a tentative transfer matrix which is
altered in various way to increase its determinant. Both methods have been
proven to converge to a stable solution, however depending on the way the
initial transfer matrix is chosen they will converge to different
solutions and therefore there is absolutely no guarantee that using
``'KNV0'`` will yield results similar to Matlab's or any other
implementation of these algorithms.
Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'``
is only provided because it is needed by ``'YT'`` in some specific cases.
Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'``
when ``abs(det(X))`` is used as a robustness indicator.
[2]_ is available as a technical report on the following URL:
http://drum.lib.umd.edu/handle/1903/5598
References
----------
.. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment
in linear state feedback", International Journal of Control, Vol. 41
pp. 1129-1155, 1985.
.. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust
pole assignment by state feedback, IEEE Transactions on Automatic
Control, Vol. 41, pp. 1432-1452, 1996.
Examples
--------
A simple example demonstrating real pole placement using both KNV and YT
algorithms. This is example number 1 from section 4 of the reference KNV
publication ([1]_):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ],
... [-0.5814, -4.290, 0, 0.6750 ],
... [ 1.067, 4.273, -6.654, 5.893 ],
... [ 0.0480, 4.273, 1.343, -2.104 ]])
>>> B = np.array([[ 0, 5.679 ],
... [ 1.136, 1.136 ],
... [ 0, 0, ],
... [-3.146, 0 ]])
>>> P = np.array([-0.2, -0.5, -5.0566, -8.6659])
Now compute K with KNV method 0, with the default YT method and with the YT
method while forcing 100 iterations of the algorithm and print some results
after each call.
>>> fsf1 = signal.place_poles(A, B, P, method='KNV0')
>>> fsf1.gain_matrix
array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785],
[ 0.50587268, 0.57779091, 0.51795763, -0.41991442]])
>>> fsf2 = signal.place_poles(A, B, P) # uses YT method
>>> fsf2.computed_poles
array([-8.6659, -5.0566, -0.5 , -0.2 ])
>>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100)
>>> fsf3.X
array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j],
[-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j],
[-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j],
[ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]])
The absolute value of the determinant of X is a good indicator to check the
robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing
it. Below a comparison of the robustness of the results above:
>>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X))
True
>>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X))
True
Now a simple example for complex poles:
>>> A = np.array([[ 0, 7/3., 0, 0 ],
... [ 0, 0, 0, 7/9. ],
... [ 0, 0, 0, 0 ],
... [ 0, 0, 0, 0 ]])
>>> B = np.array([[ 0, 0 ],
... [ 0, 0 ],
... [ 1, 0 ],
... [ 0, 1 ]])
>>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3.
>>> fsf = signal.place_poles(A, B, P, method='YT')
We can plot the desired and computed poles in the complex plane:
>>> t = np.linspace(0, 2*np.pi, 401)
>>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle
>>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag,
... 'wo', label='Desired')
>>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx',
... label='Placed')
>>> plt.grid()
>>> plt.axis('image')
>>> plt.axis([-1.1, 1.1, -1.1, 1.1])
>>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1)
"""
# Move away all the inputs checking, it only adds noise to the code
update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter)
# The current value of the relative tolerance we achieved
cur_rtol = np.nan
# The number of iterations needed before converging
nb_iter = np.nan
# Step A: QR decomposition of B page 1132 KN
# to debug with numpy qr uncomment the line below
# u, z = np.linalg.qr(B, mode="complete")
u, z = s_qr(B, mode="full")
rankB = np.linalg.matrix_rank(B)
u0 = u[:, :rankB]
u1 = u[:, rankB:]
z = z[:rankB, :]
# If the solution is unique
if B.shape[0] == rankB:
# if B is square and full rank there is only one solution
# such as (A+BK)=diag(P) i.e BK=diag(P)-A
# if B has as many lines as its rank (but not square) the solution
# is the same as above using least squares
# => use lstsq in both cases
# for complex poles we use the following trick
#
# |a -b| has for eigenvalues a+b and a-b
# |b a|
#
# |a+bi 0| has the obvious eigenvalues a+bi and a-bi
# |0 a-bi|
#
# e.g solving the first one in R gives the solution
# for the second one in C
diag_poles = np.zeros(A.shape)
idx = 0
while idx < poles.shape[0]:
p = poles[idx]
diag_poles[idx, idx] = np.real(p)
if ~np.isreal(p):
diag_poles[idx, idx+1] = -np.imag(p)
diag_poles[idx+1, idx+1] = np.real(p)
diag_poles[idx+1, idx] = np.imag(p)
idx += 1 # skip next one
idx += 1
gain_matrix = np.linalg.lstsq(B, diag_poles-A)[0]
transfer_matrix = np.eye(A.shape[0])
cur_rtol = 0
nb_iter = 0
else:
# step A (p1144 KNV) and begining of step F: decompose
# dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors
# in the same loop
ker_pole = []
# flag to skip the conjugate of a complex pole
skip_conjugate = False
# select orthonormal base ker_pole for each Pole and vectors for
# transfer_matrix
for j in range(B.shape[0]):
if skip_conjugate:
skip_conjugate = False
continue
pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T
# after QR Q=Q0|Q1
# only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix.
# Q1 is orthogonnal to Q0 and will be multiplied by the zeros in
# R when using mode "complete". In default mode Q1 and the zeros
# in R are not computed
# To debug with numpy qr uncomment the line below
# Q, _ = np.linalg.qr(pole_space_j, mode="complete")
Q, _ = s_qr(pole_space_j, mode="full")
ker_pole_j = Q[:, pole_space_j.shape[1]:]
# We want to select one vector in ker_pole_j to build the transfer
# matrix, however qr returns sometimes vectors with zeros on the same
# line for each pole and this yields very long convergence times.
# Or some other times a set of vectors, one with zero imaginary
# part and one (or several) with imaginary parts. After trying
# many ways to select the best possible one (eg ditch vectors
# with zero imaginary part for complex poles) I ended up summing
# all vectors in ker_pole_j, this solves 100% of the problems and is
# still a valid choice for transfer_matrix. Indeed for complex poles
# we are sure to have a non zero imaginary part that way, and the
# problem of lines full of zeros in transfer_matrix is solved too as
# when a vector from ker_pole_j has a zero the other one(s)
# (when ker_pole_j.shape[1]>1) for sure won't have a zero there.
transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis]
transfer_matrix_j = (transfer_matrix_j /
np.linalg.norm(transfer_matrix_j))
if ~np.isreal(poles[j]): # complex pole
transfer_matrix_j = np.hstack([np.real(transfer_matrix_j),
np.imag(transfer_matrix_j)])
ker_pole.extend([ker_pole_j, ker_pole_j])
# Skip next pole as it is the conjugate
skip_conjugate = True
else: # real pole, nothing to do
ker_pole.append(ker_pole_j)
if j == 0:
transfer_matrix = transfer_matrix_j
else:
transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j))
if rankB > 1: # otherwise there is nothing we can optimize
stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix,
poles, B, maxiter, rtol)
if not stop and rtol > 0:
# if rtol<=0 the user has probably done that on purpose,
# don't annoy him
err_msg = (
"Convergence was not reached after maxiter iterations.\n"
"You asked for a relative tolerance of %f we got %f" %
(rtol, cur_rtol)
)
warnings.warn(err_msg)
# reconstruct transfer_matrix to match complex conjugate pairs,
# ie transfer_matrix_j/transfer_matrix_j+1 are
# Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after
transfer_matrix = transfer_matrix.astype(complex)
idx = 0
while idx < poles.shape[0]-1:
if ~np.isreal(poles[idx]):
rel = transfer_matrix[:, idx].copy()
img = transfer_matrix[:, idx+1]
# rel will be an array referencing a column of transfer_matrix
# if we don't copy() it will changer after the next line and
# and the line after will not yield the correct value
transfer_matrix[:, idx] = rel-1j*img
transfer_matrix[:, idx+1] = rel+1j*img
idx += 1 # skip next one
idx += 1
try:
m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles),
transfer_matrix.T)).T
gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A))
except np.linalg.LinAlgError:
raise ValueError("The poles you've chosen can't be placed. "
"Check the controllability matrix and try "
"another set of poles")
# Beware: Kautsky solves A+BK but the usual form is A-BK
gain_matrix = -gain_matrix
# K still contains complex with ~=0j imaginary parts, get rid of them
gain_matrix = np.real(gain_matrix)
full_state_feedback = Bunch()
full_state_feedback.gain_matrix = gain_matrix
full_state_feedback.computed_poles = _order_complex_poles(
np.linalg.eig(A - np.dot(B, gain_matrix))[0]
)
full_state_feedback.requested_poles = poles
full_state_feedback.X = transfer_matrix
full_state_feedback.rtol = cur_rtol
full_state_feedback.nb_iter = nb_iter
return full_state_feedback
|
bsd-3-clause
|
matthew-tucker/mne-python
|
mne/preprocessing/tests/test_infomax.py
|
7
|
3898
|
# Authors: Denis A. Engemann <[email protected]>
#
# License: BSD (3-clause)
"""
Test the infomax algorithm.
Parts of this code are taken from scikit-learn
"""
import numpy as np
from numpy.testing import assert_almost_equal
from scipy import stats
from scipy import linalg
from mne.preprocessing.infomax_ import infomax
from mne.utils import requires_sklearn
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
@requires_sklearn
def test_infomax_simple(add_noise=False):
""" Test the infomax algorithm on very simple data.
"""
from sklearn.decomposition import RandomizedPCA
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
algos = [True, False]
for algo in algos:
X = RandomizedPCA(n_components=2, whiten=True).fit_transform(m.T)
k_ = infomax(X, extended=algo)
s_ = np.dot(k_, X.T)
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
@requires_sklearn
def test_non_square_infomax(add_noise=False):
""" Test non-square infomax
"""
from sklearn.decomposition import RandomizedPCA
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
n_observed = 6
mixing = rng.randn(n_observed, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(n_observed, n_samples)
center_and_norm(m)
pca = RandomizedPCA(n_components=2, whiten=True, random_state=rng)
m = m.T
m = pca.fit_transform(m)
# we need extended since input signals are sub-gaussian
unmixing_ = infomax(m, random_state=rng, extended=True)
s_ = np.dot(unmixing_, m.T)
# Check that the mixing model described in the docstring holds:
mixing_ = linalg.pinv(unmixing_.T)
assert_almost_equal(m, s_.T.dot(mixing_))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
|
bsd-3-clause
|
jamesjarlathlong/resourceful
|
exploit_sharedbattery.py
|
1
|
8457
|
import os
from exploit_agent import *
import asyncio
from qlearn import QLearn
from sarsa import Sarsa
import itertools
import functools
import json
import random
import sklearn
import collections
import websockets
import json
import copy
import time
import random
###Helper functions###
def merge(dicts):
super_dict = collections.defaultdict(list)
for d in dicts:
for k, v in d.items():
super_dict[k]+=v
return super_dict
def tuple_namer(name,tupl):
"""convert an unnamed state tuple
to a namedtuple object"""
tupl_templ = collections.namedtuple(name, 'battery status neighbour nstat')
named = tupl_templ(battery = tupl[0], status = tupl[1], neighbour = tupl[2], nstat = tupl[3])
return named
def dictionary_saver(d, filename):
"""d is a dictionary whose keys are of the form (namedtuple, 'string')"""
json_friendly_d = {json.dumps(k):v for k,v in d.items()}
sklearn.externals.joblib.dump(json_friendly_d, filename)
def tracker_saver(d, filename):
"""d is a dictionary whose keys are of the form (namedtuple, 'string')"""
json_friendly_d = {json.dumps(k):json.dumps(v) for k,v in d.items()}
sklearn.externals.joblib.dump(json_friendly_d, filename)
#======actions========#
def go_to_sleep(old):
new = old._replace(status = 'sleeping')
return new
def prep_sleep(old):
new = old._replace(status='pending')
return new
def wakeup(old):
new = old._replace(status = 'running')
return new
def noop(old):
print('noop')
return copy.deepcopy(old)
def create_action_states(states):
actions_states_sleeping = {i:[noop, wakeup] for i in states if i.status=='sleeping'}
actions_states_running = {i:[noop, prep_sleep] for i in states if i.status == 'running'}
actions_states_pending = {i:[go_to_sleep] for i in states if i.status == 'pending'}
return merge([actions_states_sleeping, actions_states_running, actions_states_pending])
#####rewards###########
def state_rewards(state1, state2):
initial_reward = 0
if state2.status =='running' or state2.nstat=='running':
initial_reward += 100
if state1.status != state2.status:
initial_reward -= 10
if state2.battery == 0:#:
initial_reward = -100
return initial_reward
###message passing
def find_lead(qs,recruiter):
"""for recruiter, find potential helper"""
all_candidates = [k for k in qs if k!=recruiter]
return all_candidates[0]
def broadcast_change(old_state, new_state):
"""gets called when a sensor changes
from sleeping to awake, notifies the other
sensors of this change"""
def neighbor_changed(old_other, new_other,old_self):
new_self = old_self._replace(neighbour=new_other.battery, nstat= new_other.status)
return new_self
update_from = type(new_state).__name__
update_to = find_lead(qs, update_from)
print('updating from: ', update_from, ' to: ', update_to)
neighbor_change_func = functools.partial(neighbor_changed,old_state, new_state)
qs[update_to].update((1,neighbor_change_func))
"""environments"""
#=====autonomous actions=======#
def battery_action():
sunny = True
def adjust_battery(is_sunny, sensor):
old_sensor = copy.deepcopy(sensor)
if sensor.status =='sleeping':
new_battery = sensor.battery + is_sunny*1#increase by 1 if not sunny, by 2 if sunny
sensor = sensor._replace(battery=new_battery)
elif sensor.status=='running':
new_battery = sensor.battery - is_sunny*1
sensor = sensor._replace(battery=new_battery)
if sensor.battery<=0:
sensor = sensor._replace(battery=0, status = 'pending')
if sensor.battery>=5:
sensor = sensor._replace(battery=5)
if old_sensor.battery!=sensor.battery:
broadcast_change(old_sensor, sensor)
return sensor
while True:
#if random.random()<0.1:
# sunny = not sunny
adjust_battery_sunny = functools.partial(adjust_battery, sunny)
yield adjust_battery_sunny
#======reactions to agent actions==========#
def reaction_default(state1,state2, action):
if (state1.battery!=state2.battery) or (state1.status!=state2.status):
print('broadcasting change')
broadcast_change(state1, state2)
return state2
def writer(self, state):
t = self.loop.time()-self.loop.t0
name_id_map = {'Sensor1':0, 'Sensor2':1}
idee = name_id_map[type(state).__name__]
update = (t,{'_id':idee, 'battery':state.battery, 'status':state.status, 'neighbour': state.neighbour, 'nstat':state.nstat})
print('update: ', update)
writerq.append(update)
"""special update function to ensure only latest event
with info about neighbour is kept on the queue"""
def update(self, new_item):
priority_level = new_item[0]
def matching_level(element, priority_level):
return element[0]==priority_level
try:
match_generator = (index for index,element in enumerate(self._queue)
if matching_level(element, priority_level))
matching_index = next(match_generator)
self._queue[matching_index] = new_item
except StopIteration:
self.put_nowait(new_item)
asyncio.PriorityQueue.update = update
def loader(dictionary):
def load_keys(k):
unjsoned = json.loads(k)
loaded = tuple( ((tuple(unjsoned[0])), unjsoned[1] ))
return loaded
return {load_keys(k):v for k,v in dictionary.items()}
def q_loader(filename, agentname):
learned = sklearn.externals.joblib.load(filename)
loaded = loader(learned)
named = {(tuple_namer(agentname, k[0]), k[1]):v for k,v in loaded.items()}
return named
if __name__ == '__main__':
loop = asyncio.get_event_loop()
"""prelearned q"""
learned_1 = q_loader('agent1_batt', 'Sensor1')
learned_2 = q_loader('agent2_batt', 'Sensor2')
t0 = loop.time()
loop.t0 = t0
"""States"""
battery = range(6)
status = ['sleeping', 'running', 'pending']
neighbour = range(6)
nstat = ['sleeping', 'running', 'pending']
all_vars = [battery,status, neighbour, nstat]
state_combinations = list(itertools.product(*all_vars))
"""websocket comm"""
Agent.writer = writer
"""agent 1"""
states1 = [tuple_namer('Sensor1', i) for i in state_combinations]
initial_state1 = tuple_namer('Sensor1', (5,'running', 5, 'running'))
actions_states1 = create_action_states(states1)
agent1 = Agent(actions_states1, state_rewards, initial_state1, wakeup, Sarsa, 1011, loop, learned_1)
"""agent 2"""
states2 = [tuple_namer('Sensor2', i) for i in state_combinations]
initial_state2 = tuple_namer('Sensor2', (5,'running', 5, 'running'))
actions_states2 = create_action_states(states2)
agent2 = Agent(actions_states2, state_rewards, initial_state2, wakeup, Sarsa, 1022, loop, learned_2)
"""message passing between agents"""
qs = {'Sensor1':agent1.sensing_q, 'Sensor2':agent2.sensing_q}
"""message passing to websocket"""
writerq = []#collections.deque([], maxlen=1000)#asyncio.PriorityQueue(maxsize = 2048)
"""now define our environments"""
env_reactions = {'go_to_sleep':reaction_default, 'wakeup':reaction_default,
'noop':reaction_default, 'prep_sleep': reaction_default}
env1 = Environment(env_reactions,[battery_action()], agent1.sensing_q, agent1.action_q)
env2 = Environment(env_reactions,[battery_action()], agent2.sensing_q, agent2.action_q)
"""now run the simulation"""
tasks = [agent1.experience_environment(), env1.react_to_action(),
agent2.experience_environment(), env2.react_to_action()]
#for i in env1.env_actions:
# tasks.append(i(agent1.sensing_q))
#for j in env2.env_actions:
# tasks.append(j(agent2.sensing_q))
def replace(i):
i[1]['_id'] += 0
return i
def loop_stopper():
print('loop stopper')
loop.stop()
print('saving')
sklearn.externals.joblib.dump([replace(i) for i in writerq], 'batt_exwriter1')
dictionary_saver(agent1.learner.q, 'agent1_battex')
tracker_saver(agent1.learner.updatecount, 'agent1_batthistex')
dictionary_saver(agent2.learner.q, 'agent2_battex')
tracker_saver(agent2.learner.updatecount, 'agent2_batthistex')
print('saved')
loop.call_later(100, loop_stopper)
loop.run_until_complete(asyncio.wait(tasks))
|
mit
|
beepee14/scikit-learn
|
examples/plot_isotonic_regression.py
|
303
|
1767
|
"""
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
|
bsd-3-clause
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/backend_tools.py
|
10
|
28346
|
"""
Abstract base classes define the primitives for Tools.
These tools are used by `matplotlib.backend_managers.ToolManager`
:class:`ToolBase`
Simple stateless tool
:class:`ToolToggleBase`
Tool that has two states, only one Toggle tool can be
active at any given time for the same
`matplotlib.backend_managers.ToolManager`
"""
from matplotlib import rcParams
from matplotlib._pylab_helpers import Gcf
import matplotlib.cbook as cbook
from weakref import WeakKeyDictionary
import numpy as np
import six
import warnings
class Cursors(object):
"""Simple namespace for cursor reference"""
HAND, POINTER, SELECT_REGION, MOVE = list(range(4))
cursors = Cursors()
# Views positions tool
_views_positions = 'viewpos'
class ToolBase(object):
"""
Base tool class
A base tool, only implements `trigger` method or not method at all.
The tool is instantiated by `matplotlib.backend_managers.ToolManager`
Attributes
----------
toolmanager: `matplotlib.backend_managers.ToolManager`
ToolManager that controls this Tool
figure: `FigureCanvas`
Figure instance that is affected by this Tool
name: String
Used as **Id** of the tool, has to be unique among tools of the same
ToolManager
"""
default_keymap = None
"""
Keymap to associate with this tool
**String**: List of comma separated keys that will be used to call this
tool when the keypress event of *self.figure.canvas* is emited
"""
description = None
"""
Description of the Tool
**String**: If the Tool is included in the Toolbar this text is used
as a Tooltip
"""
image = None
"""
Filename of the image
**String**: Filename of the image to use in the toolbar. If None, the
`name` is used as a label in the toolbar button
"""
def __init__(self, toolmanager, name):
warnings.warn('Treat the new Tool classes introduced in v1.5 as ' +
'experimental for now, the API will likely change in ' +
'version 2.1, and some tools might change name')
self._name = name
self._figure = None
self.toolmanager = toolmanager
self.figure = toolmanager.canvas.figure
@property
def figure(self):
return self._figure
def trigger(self, sender, event, data=None):
"""
Called when this tool gets used
This method is called by
`matplotlib.backend_managers.ToolManager.trigger_tool`
Parameters
----------
event: `Event`
The Canvas event that caused this tool to be called
sender: object
Object that requested the tool to be triggered
data: object
Extra data
"""
pass
@figure.setter
def figure(self, figure):
"""
Set the figure
Set the figure to be affected by this tool
Parameters
----------
figure: `Figure`
"""
self._figure = figure
@property
def name(self):
"""Tool Id"""
return self._name
def destroy(self):
"""
Destroy the tool
This method is called when the tool is removed by
`matplotlib.backend_managers.ToolManager.remove_tool`
"""
pass
class ToolToggleBase(ToolBase):
"""
Toggleable tool
Every time it is triggered, it switches between enable and disable
"""
radio_group = None
"""Attribute to group 'radio' like tools (mutually exclusive)
**String** that identifies the group or **None** if not belonging to a
group
"""
cursor = None
"""Cursor to use when the tool is active"""
def __init__(self, *args, **kwargs):
ToolBase.__init__(self, *args, **kwargs)
self._toggled = False
def trigger(self, sender, event, data=None):
"""Calls `enable` or `disable` based on `toggled` value"""
if self._toggled:
self.disable(event)
else:
self.enable(event)
self._toggled = not self._toggled
def enable(self, event=None):
"""
Enable the toggle tool
`trigger` calls this method when `toggled` is False
"""
pass
def disable(self, event=None):
"""
Disable the toggle tool
`trigger` call this method when `toggled` is True.
This can happen in different circumstances
* Click on the toolbar tool button
* Call to `matplotlib.backend_managers.ToolManager.trigger_tool`
* Another `ToolToggleBase` derived tool is triggered
(from the same `ToolManager`)
"""
pass
@property
def toggled(self):
"""State of the toggled tool"""
return self._toggled
class SetCursorBase(ToolBase):
"""
Change to the current cursor while inaxes
This tool, keeps track of all `ToolToggleBase` derived tools, and calls
set_cursor when a tool gets triggered
"""
def __init__(self, *args, **kwargs):
ToolBase.__init__(self, *args, **kwargs)
self._idDrag = self.figure.canvas.mpl_connect(
'motion_notify_event', self._set_cursor_cbk)
self._cursor = None
self._default_cursor = cursors.POINTER
self._last_cursor = self._default_cursor
self.toolmanager.toolmanager_connect('tool_added_event',
self._add_tool_cbk)
# process current tools
for tool in self.toolmanager.tools.values():
self._add_tool(tool)
def _tool_trigger_cbk(self, event):
if event.tool.toggled:
self._cursor = event.tool.cursor
else:
self._cursor = None
self._set_cursor_cbk(event.canvasevent)
def _add_tool(self, tool):
"""set the cursor when the tool is triggered"""
if getattr(tool, 'cursor', None) is not None:
self.toolmanager.toolmanager_connect('tool_trigger_%s' % tool.name,
self._tool_trigger_cbk)
def _add_tool_cbk(self, event):
"""Process every newly added tool"""
if event.tool is self:
return
self._add_tool(event.tool)
def _set_cursor_cbk(self, event):
if not event:
return
if not getattr(event, 'inaxes', False) or not self._cursor:
if self._last_cursor != self._default_cursor:
self.set_cursor(self._default_cursor)
self._last_cursor = self._default_cursor
elif self._cursor:
cursor = self._cursor
if cursor and self._last_cursor != cursor:
self.set_cursor(cursor)
self._last_cursor = cursor
def set_cursor(self, cursor):
"""
Set the cursor
This method has to be implemented per backend
"""
raise NotImplementedError
class ToolCursorPosition(ToolBase):
"""
Send message with the current pointer position
This tool runs in the background reporting the position of the cursor
"""
def __init__(self, *args, **kwargs):
ToolBase.__init__(self, *args, **kwargs)
self._idDrag = self.figure.canvas.mpl_connect(
'motion_notify_event', self.send_message)
def send_message(self, event):
"""Call `matplotlib.backend_managers.ToolManager.message_event`"""
if self.toolmanager.messagelock.locked():
return
message = ' '
if event.inaxes and event.inaxes.get_navigate():
try:
s = event.inaxes.format_coord(event.xdata, event.ydata)
except (ValueError, OverflowError):
pass
else:
artists = [a for a in event.inaxes.mouseover_set
if a.contains(event) and a.get_visible()]
if artists:
a = max(artists, key=lambda x: x.zorder)
if a is not event.inaxes.patch:
data = a.get_cursor_data(event)
if data is not None:
s += ' [%s]' % a.format_cursor_data(data)
message = s
self.toolmanager.message_event(message, self)
class RubberbandBase(ToolBase):
"""Draw and remove rubberband"""
def trigger(self, sender, event, data):
"""Call `draw_rubberband` or `remove_rubberband` based on data"""
if not self.figure.canvas.widgetlock.available(sender):
return
if data is not None:
self.draw_rubberband(*data)
else:
self.remove_rubberband()
def draw_rubberband(self, *data):
"""
Draw rubberband
This method must get implemented per backend
"""
raise NotImplementedError
def remove_rubberband(self):
"""
Remove rubberband
This method should get implemented per backend
"""
pass
class ToolQuit(ToolBase):
"""Tool to call the figure manager destroy method"""
description = 'Quit the figure'
default_keymap = rcParams['keymap.quit']
def trigger(self, sender, event, data=None):
Gcf.destroy_fig(self.figure)
class ToolEnableAllNavigation(ToolBase):
"""Tool to enable all axes for toolmanager interaction"""
description = 'Enables all axes toolmanager'
default_keymap = rcParams['keymap.all_axes']
def trigger(self, sender, event, data=None):
if event.inaxes is None:
return
for a in self.figure.get_axes():
if (event.x is not None and event.y is not None
and a.in_axes(event)):
a.set_navigate(True)
class ToolEnableNavigation(ToolBase):
"""Tool to enable a specific axes for toolmanager interaction"""
description = 'Enables one axes toolmanager'
default_keymap = (1, 2, 3, 4, 5, 6, 7, 8, 9)
def trigger(self, sender, event, data=None):
if event.inaxes is None:
return
n = int(event.key) - 1
for i, a in enumerate(self.figure.get_axes()):
if (event.x is not None and event.y is not None
and a.in_axes(event)):
a.set_navigate(i == n)
class ToolGrid(ToolToggleBase):
"""Tool to toggle the grid of the figure"""
description = 'Toogle Grid'
default_keymap = rcParams['keymap.grid']
def trigger(self, sender, event, data=None):
if event.inaxes is None:
return
ToolToggleBase.trigger(self, sender, event, data)
def enable(self, event):
event.inaxes.grid(True)
self.figure.canvas.draw_idle()
def disable(self, event):
event.inaxes.grid(False)
self.figure.canvas.draw_idle()
class ToolFullScreen(ToolToggleBase):
"""Tool to toggle full screen"""
description = 'Toogle Fullscreen mode'
default_keymap = rcParams['keymap.fullscreen']
def enable(self, event):
self.figure.canvas.manager.full_screen_toggle()
def disable(self, event):
self.figure.canvas.manager.full_screen_toggle()
class AxisScaleBase(ToolToggleBase):
"""Base Tool to toggle between linear and logarithmic"""
def trigger(self, sender, event, data=None):
if event.inaxes is None:
return
ToolToggleBase.trigger(self, sender, event, data)
def enable(self, event):
self.set_scale(event.inaxes, 'log')
self.figure.canvas.draw_idle()
def disable(self, event):
self.set_scale(event.inaxes, 'linear')
self.figure.canvas.draw_idle()
class ToolYScale(AxisScaleBase):
"""Tool to toggle between linear and logarithmic scales on the Y axis"""
description = 'Toogle Scale Y axis'
default_keymap = rcParams['keymap.yscale']
def set_scale(self, ax, scale):
ax.set_yscale(scale)
class ToolXScale(AxisScaleBase):
"""Tool to toggle between linear and logarithmic scales on the X axis"""
description = 'Toogle Scale X axis'
default_keymap = rcParams['keymap.xscale']
def set_scale(self, ax, scale):
ax.set_xscale(scale)
class ToolViewsPositions(ToolBase):
"""
Auxiliary Tool to handle changes in views and positions
Runs in the background and should get used by all the tools that
need to access the figure's history of views and positions, e.g.
* `ToolZoom`
* `ToolPan`
* `ToolHome`
* `ToolBack`
* `ToolForward`
"""
def __init__(self, *args, **kwargs):
self.views = WeakKeyDictionary()
self.positions = WeakKeyDictionary()
ToolBase.__init__(self, *args, **kwargs)
def add_figure(self):
"""Add the current figure to the stack of views and positions"""
if self.figure not in self.views:
self.views[self.figure] = cbook.Stack()
self.positions[self.figure] = cbook.Stack()
# Define Home
self.push_current()
# Adding the clear method as axobserver, removes this burden from
# the backend
self.figure.add_axobserver(self.clear)
def clear(self, figure):
"""Reset the axes stack"""
if figure in self.views:
self.views[figure].clear()
self.positions[figure].clear()
def update_view(self):
"""
Update the viewlim and position from the view and
position stack for each axes
"""
views = self.views[self.figure]()
if views is None:
return
pos = self.positions[self.figure]()
if pos is None:
return
for i, a in enumerate(self.figure.get_axes()):
a._set_view(views[i])
# Restore both the original and modified positions
a.set_position(pos[i][0], 'original')
a.set_position(pos[i][1], 'active')
self.figure.canvas.draw_idle()
def push_current(self):
"""push the current view limits and position onto the stack"""
views = []
pos = []
for a in self.figure.get_axes():
views.append(a._get_view())
# Store both the original and modified positions
pos.append((
a.get_position(True).frozen(),
a.get_position().frozen()))
self.views[self.figure].push(views)
self.positions[self.figure].push(pos)
def refresh_locators(self):
"""Redraw the canvases, update the locators"""
for a in self.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
zaxis = getattr(a, 'zaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
if zaxis is not None:
locators.append(zaxis.get_major_locator())
locators.append(zaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.figure.canvas.draw_idle()
def home(self):
"""Recall the first view and position from the stack"""
self.views[self.figure].home()
self.positions[self.figure].home()
def back(self):
"""Back one step in the stack of views and positions"""
self.views[self.figure].back()
self.positions[self.figure].back()
def forward(self):
"""Forward one step in the stack of views and positions"""
self.views[self.figure].forward()
self.positions[self.figure].forward()
class ViewsPositionsBase(ToolBase):
"""Base class for `ToolHome`, `ToolBack` and `ToolForward`"""
_on_trigger = None
def trigger(self, sender, event, data=None):
self.toolmanager.get_tool(_views_positions).add_figure()
getattr(self.toolmanager.get_tool(_views_positions),
self._on_trigger)()
self.toolmanager.get_tool(_views_positions).update_view()
class ToolHome(ViewsPositionsBase):
"""Restore the original view lim"""
description = 'Reset original view'
image = 'home.png'
default_keymap = rcParams['keymap.home']
_on_trigger = 'home'
class ToolBack(ViewsPositionsBase):
"""Move back up the view lim stack"""
description = 'Back to previous view'
image = 'back.png'
default_keymap = rcParams['keymap.back']
_on_trigger = 'back'
class ToolForward(ViewsPositionsBase):
"""Move forward in the view lim stack"""
description = 'Forward to next view'
image = 'forward.png'
default_keymap = rcParams['keymap.forward']
_on_trigger = 'forward'
class ConfigureSubplotsBase(ToolBase):
"""Base tool for the configuration of subplots"""
description = 'Configure subplots'
image = 'subplots.png'
class SaveFigureBase(ToolBase):
"""Base tool for figure saving"""
description = 'Save the figure'
image = 'filesave.png'
default_keymap = rcParams['keymap.save']
class ZoomPanBase(ToolToggleBase):
"""Base class for `ToolZoom` and `ToolPan`"""
def __init__(self, *args):
ToolToggleBase.__init__(self, *args)
self._button_pressed = None
self._xypress = None
self._idPress = None
self._idRelease = None
self._idScroll = None
self.base_scale = 2.
def enable(self, event):
"""Connect press/release events and lock the canvas"""
self.figure.canvas.widgetlock(self)
self._idPress = self.figure.canvas.mpl_connect(
'button_press_event', self._press)
self._idRelease = self.figure.canvas.mpl_connect(
'button_release_event', self._release)
self._idScroll = self.figure.canvas.mpl_connect(
'scroll_event', self.scroll_zoom)
def disable(self, event):
"""Release the canvas and disconnect press/release events"""
self._cancel_action()
self.figure.canvas.widgetlock.release(self)
self.figure.canvas.mpl_disconnect(self._idPress)
self.figure.canvas.mpl_disconnect(self._idRelease)
self.figure.canvas.mpl_disconnect(self._idScroll)
def trigger(self, sender, event, data=None):
self.toolmanager.get_tool(_views_positions).add_figure()
ToolToggleBase.trigger(self, sender, event, data)
def scroll_zoom(self, event):
# https://gist.github.com/tacaswell/3144287
if event.inaxes is None:
return
ax = event.inaxes
cur_xlim = ax.get_xlim()
cur_ylim = ax.get_ylim()
# set the range
cur_xrange = (cur_xlim[1] - cur_xlim[0])*.5
cur_yrange = (cur_ylim[1] - cur_ylim[0])*.5
xdata = event.xdata # get event x location
ydata = event.ydata # get event y location
if event.button == 'up':
# deal with zoom in
scale_factor = 1 / self.base_scale
elif event.button == 'down':
# deal with zoom out
scale_factor = self.base_scale
else:
# deal with something that should never happen
scale_factor = 1
# set new limits
ax.set_xlim([xdata - cur_xrange*scale_factor,
xdata + cur_xrange*scale_factor])
ax.set_ylim([ydata - cur_yrange*scale_factor,
ydata + cur_yrange*scale_factor])
self.figure.canvas.draw_idle() # force re-draw
class ToolZoom(ZoomPanBase):
"""Zoom to rectangle"""
description = 'Zoom to rectangle'
image = 'zoom_to_rect.png'
default_keymap = rcParams['keymap.zoom']
cursor = cursors.SELECT_REGION
radio_group = 'default'
def __init__(self, *args):
ZoomPanBase.__init__(self, *args)
self._ids_zoom = []
def _cancel_action(self):
for zoom_id in self._ids_zoom:
self.figure.canvas.mpl_disconnect(zoom_id)
self.toolmanager.trigger_tool('rubberband', self)
self.toolmanager.get_tool(_views_positions).refresh_locators()
self._xypress = None
self._button_pressed = None
self._ids_zoom = []
return
def _press(self, event):
"""the _press mouse button in zoom to rect mode callback"""
# If we're already in the middle of a zoom, pressing another
# button works to "cancel"
if self._ids_zoom != []:
self._cancel_action()
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._cancel_action()
return
x, y = event.x, event.y
self._xypress = []
for i, a in enumerate(self.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_zoom()):
self._xypress.append((x, y, a, i, a._get_view()))
id1 = self.figure.canvas.mpl_connect(
'motion_notify_event', self._mouse_move)
id2 = self.figure.canvas.mpl_connect(
'key_press_event', self._switch_on_zoom_mode)
id3 = self.figure.canvas.mpl_connect(
'key_release_event', self._switch_off_zoom_mode)
self._ids_zoom = id1, id2, id3
self._zoom_mode = event.key
def _switch_on_zoom_mode(self, event):
self._zoom_mode = event.key
self._mouse_move(event)
def _switch_off_zoom_mode(self, event):
self._zoom_mode = None
self._mouse_move(event)
def _mouse_move(self, event):
"""the drag callback in zoom mode"""
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, _ind, _view = self._xypress[0]
# adjust x, last, y, last
x1, y1, x2, y2 = a.bbox.extents
x, lastx = max(min(x, lastx), x1), min(max(x, lastx), x2)
y, lasty = max(min(y, lasty), y1), min(max(y, lasty), y2)
if self._zoom_mode == "x":
x1, y1, x2, y2 = a.bbox.extents
y, lasty = y1, y2
elif self._zoom_mode == "y":
x1, y1, x2, y2 = a.bbox.extents
x, lastx = x1, x2
self.toolmanager.trigger_tool('rubberband',
self,
data=(x, y, lastx, lasty))
def _release(self, event):
"""the release mouse button callback in zoom to rect mode"""
for zoom_id in self._ids_zoom:
self.figure.canvas.mpl_disconnect(zoom_id)
self._ids_zoom = []
if not self._xypress:
self._cancel_action()
return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, _ind, view = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x - lastx) < 5 or abs(y - lasty) < 5:
self._cancel_action()
return
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a, la):
twinx = True
if a.get_shared_y_axes().joined(a, la):
twiny = True
last_a.append(a)
if self._button_pressed == 1:
direction = 'in'
elif self._button_pressed == 3:
direction = 'out'
else:
continue
a._set_view_from_bbox((lastx, lasty, x, y), direction,
self._zoom_mode, twinx, twiny)
self._zoom_mode = None
self.toolmanager.get_tool(_views_positions).push_current()
self._cancel_action()
class ToolPan(ZoomPanBase):
"""Pan axes with left mouse, zoom with right"""
default_keymap = rcParams['keymap.pan']
description = 'Pan axes with left mouse, zoom with right'
image = 'move.png'
cursor = cursors.MOVE
radio_group = 'default'
def __init__(self, *args):
ZoomPanBase.__init__(self, *args)
self._idDrag = None
def _cancel_action(self):
self._button_pressed = None
self._xypress = []
self.figure.canvas.mpl_disconnect(self._idDrag)
self.toolmanager.messagelock.release(self)
self.toolmanager.get_tool(_views_positions).refresh_locators()
def _press(self, event):
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._cancel_action()
return
x, y = event.x, event.y
self._xypress = []
for i, a in enumerate(self.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_pan()):
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.toolmanager.messagelock(self)
self._idDrag = self.figure.canvas.mpl_connect(
'motion_notify_event', self._mouse_move)
def _release(self, event):
if self._button_pressed is None:
self._cancel_action()
return
self.figure.canvas.mpl_disconnect(self._idDrag)
self.toolmanager.messagelock.release(self)
for a, _ind in self._xypress:
a.end_pan()
if not self._xypress:
self._cancel_action()
return
self.toolmanager.get_tool(_views_positions).push_current()
self._cancel_action()
def _mouse_move(self, event):
for a, _ind in self._xypress:
# safer to use the recorded button at the _press than current
# button: # multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.toolmanager.canvas.draw_idle()
default_tools = {'home': ToolHome, 'back': ToolBack, 'forward': ToolForward,
'zoom': ToolZoom, 'pan': ToolPan,
'subplots': 'ToolConfigureSubplots',
'save': 'ToolSaveFigure',
'grid': ToolGrid,
'fullscreen': ToolFullScreen,
'quit': ToolQuit,
'allnav': ToolEnableAllNavigation,
'nav': ToolEnableNavigation,
'xscale': ToolXScale,
'yscale': ToolYScale,
'position': ToolCursorPosition,
_views_positions: ToolViewsPositions,
'cursor': 'ToolSetCursor',
'rubberband': 'ToolRubberband',
}
"""Default tools"""
default_toolbar_tools = [['navigation', ['home', 'back', 'forward']],
['zoompan', ['pan', 'zoom', 'subplots']],
['io', ['save']]]
"""Default tools in the toolbar"""
def add_tools_to_manager(toolmanager, tools=default_tools):
"""
Add multiple tools to `ToolManager`
Parameters
----------
toolmanager: ToolManager
`backend_managers.ToolManager` object that will get the tools added
tools : {str: class_like}, optional
The tools to add in a {name: tool} dict, see `add_tool` for more
info.
"""
for name, tool in six.iteritems(tools):
toolmanager.add_tool(name, tool)
def add_tools_to_container(container, tools=default_toolbar_tools):
"""
Add multiple tools to the container.
Parameters
----------
container: Container
`backend_bases.ToolContainerBase` object that will get the tools added
tools : list, optional
List in the form
[[group1, [tool1, tool2 ...]], [group2, [...]]]
Where the tools given by tool1, and tool2 will display in group1.
See `add_tool` for details.
"""
for group, grouptools in tools:
for position, tool in enumerate(grouptools):
container.add_tool(tool, group, position)
|
gpl-3.0
|
robin-lai/scikit-learn
|
examples/semi_supervised/plot_label_propagation_structure.py
|
247
|
2432
|
"""
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
|
bsd-3-clause
|
ningchi/scikit-learn
|
examples/classification/plot_digits_classification.py
|
289
|
2397
|
"""
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
|
bsd-3-clause
|
franciscogarate/pyliferisk
|
Examples/Example_4_5b.py
|
1
|
1122
|
#!/usr/bin/python
from pyliferisk import *
from pyliferisk.mortalitytables import INM05
import numpy as np
import pandas as pd
rfr = pd.read_excel('EIOPA_RFR_20161231_Term_Structures.xlsx', sheet_name='RFR_spot_no_VA',
skiprows=9, usecols='C:C', names=['Euro'])
tariff = Actuarial(nt=INM05, i=0.05)
reserve = MortalityTable(nt=INM05)
x = 32 # age
Cd = 3000 # capital death
Premium = Cd * Ax(tariff, 25) / annuity(tariff, 25, 'w', 0) #fixed at age 25
qx_vector = []
px_vector = []
for i in range(x,reserve.w + 1):
qx = ((reserve.lx[i]-reserve.lx[i+1]) / reserve.lx[x])
qx_vector.append(qx)
qx_sum = sum(qx_vector)
px_vector.append(1 - qx_sum)
def Reserve(i):
discount_factor = []
for y in range(0, reserve.w - x + 1):
if isinstance(i,float):
discount_factor.append(1 / (1 + i) ** (y + 1))
elif i == 'rfr':
discount_factor.append(1 / (1 + rfr['Euro'][y]) ** (y + 1))
APV_Premium = np.dot(Premium, px_vector)
APV_Claims = np.dot(Cd, qx_vector)
return np.dot(discount_factor, np.subtract(APV_Claims, APV_Premium)).round(2)
print(Reserve(0.0191))
print(Reserve(0.0139))
print(Reserve('rfr'))
|
gpl-3.0
|
arbuz001/sms-tools
|
lectures/05-Sinusoidal-model/plots-code/spectral-peaks.py
|
22
|
1159
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
from scipy.fftpack import fft, ifft
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512
M = 511
t = -60
w = np.hamming(M)
start = .8*fs
hN = N/2
hM = (M+1)/2
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
pmag = mX[ploc]
freqaxis = fs*np.arange(mX.size)/float(N)
plt.figure(1, figsize=(9.5, 5.5))
plt.subplot (2,1,1)
plt.plot(freqaxis, mX, 'r', lw=1.5)
plt.axis([300,2500,-70,max(mX)])
plt.plot(fs * ploc / N, pmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + spectral peaks (oboe-A4.wav)')
plt.subplot (2,1,2)
plt.plot(freqaxis,pX,'c', lw=1.5)
plt.axis([300,2500,6,14])
plt.plot(fs * ploc / N, pX[ploc], marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + spectral peaks')
plt.tight_layout()
plt.savefig('spectral-peaks.png')
plt.show()
|
agpl-3.0
|
yyjiang/scikit-learn
|
examples/neighbors/plot_regression.py
|
349
|
1402
|
"""
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
|
bsd-3-clause
|
kaichogami/scikit-learn
|
examples/cluster/plot_kmeans_stability_low_dim_dense.py
|
338
|
4324
|
"""
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
|
bsd-3-clause
|
helloTC/LearnPython
|
tensorflow_learning/linear_reg.py
|
1
|
1901
|
#!/usr/bin/env python
# coding=utf-8
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# parameters
learning_rate = 0.01
trainint_epochs = 2000
display_step = 50
# Training Data
train_X = np.array([3.3, 4.4, 5.5, 6.7, 7.0, 4.2, 9.8, 6.2, 7.6, 2.2, 7.0, 10.8, 5.3, 8.0, 5.7, 9.3, 3.1])
train_Y = np.array([1.7, 2.8, 2.1, 3.2, 1.7, 1.6, 3.4, 2.6, 2.5, 1.2, 2.8, 3.4, 1.6, 2.9, 2.4, 2.9, 1.3])
n_samples = train_X.shape[0]
# tf Graph Input
X = tf.placeholder('float')
Y = tf.placeholder('float')
# Create Model
# Set model weights
W = tf.Variable(np.random.randn(), name = 'weights')
b = tf.Variable(np.random.randn(), name = 'b')
# Construct a linear Model
activation = tf.add(tf.multiply(X, W), b)
# Minimize the squared errors
cost = tf.reduce_sum(tf.pow(activation - Y, 2))/(2*n_samples)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializating the variables
init = tf.global_variables_initializer()
# Launch the Graph
with tf.Session() as sess:
sess.run(init)
# Fit all training Data
for epoch in range(trainint_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
# Display logs per epoch step
if epoch % display_step == 0:
print('Epoch: {}'.format(epoch+1))
print('Cost={:.9f}'.format(sess.run(cost, feed_dict={X:train_X, Y:train_Y})))
print('W={}'.format(sess.run(W)))
print('b={}'.format(sess.run(b)))
print('Optimization finished!')
print('cost = {}'.format(cost, feed_dict={X:train_X, Y:train_Y}))
print('W = {}'.format(sess.run(W)))
print('b = {}'.format(sess.run(b)))
# Graphic display
plt.plot(train_X, train_Y, 'ro', label = 'Original data')
plt.plot(train_X, sess.run(W)*train_X + sess.run(b), label = 'Fitted line')
plt.legend()
plt.show()
|
mit
|
ahnitz/mpld3
|
mpld3/mplexporter/renderers/base.py
|
44
|
14355
|
import warnings
import itertools
from contextlib import contextmanager
import numpy as np
from matplotlib import transforms
from .. import utils
from .. import _py3k_compat as py3k
class Renderer(object):
@staticmethod
def ax_zoomable(ax):
return bool(ax and ax.get_navigate())
@staticmethod
def ax_has_xgrid(ax):
return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())
@staticmethod
def ax_has_ygrid(ax):
return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())
@property
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
@property
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
@property
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
@contextmanager
def draw_figure(self, fig, props):
if hasattr(self, "_current_fig") and self._current_fig is not None:
warnings.warn("figure embedded in figure: something is wrong")
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
yield
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
@contextmanager
def draw_axes(self, ax, props):
if hasattr(self, "_current_ax") and self._current_ax is not None:
warnings.warn("axes embedded in axes: something is wrong")
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
yield
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
@contextmanager
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
yield
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
# Following are the functions which should be overloaded in subclasses
def open_figure(self, fig, props):
"""
Begin commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The Figure which will contain the ensuing axes and elements
props : dictionary
The dictionary of figure properties
"""
pass
def close_figure(self, fig):
"""
Finish commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The figure which is finished being drawn.
"""
pass
def open_axes(self, ax, props):
"""
Begin commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which will contain the ensuing axes and elements
props : dictionary
The dictionary of axes properties
"""
pass
def close_axes(self, ax):
"""
Finish commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which is finished being drawn.
"""
pass
def open_legend(self, legend, props):
"""
Beging commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend that will contain the ensuing elements
props : dictionary
The dictionary of legend properties
"""
pass
def close_legend(self, legend):
"""
Finish commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend which is finished being drawn
"""
pass
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ['M'] + (data.shape[0] - 1) * ['L']
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates,
pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
@staticmethod
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if not path_transforms:
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if np.size(edgecolor) == 0:
edgecolor = ['none']
facecolor = styles['facecolor']
if np.size(facecolor) == 0:
facecolor = ['none']
elements = [paths, path_transforms, offsets,
edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms,
offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {"edgecolor": utils.color_to_hex(ec),
"facecolor": utils.color_to_hex(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles['alpha'],
"zorder": styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates,
pathcodes=pathcodes, style=style, offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style['markerpath']
pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',
'facecolor', 'zorder',
'edgewidth'])
pathstyle['dasharray'] = "10,0"
for vertex in data:
self.draw_path(data=vertices, coordinates="points",
pathcodes=pathcodes, style=pathstyle,
offset=vertex, offset_coordinates=coordinates,
mplobj=mplobj)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"""
Draw text on the image.
Parameters
----------
text : string
The text to draw
position : tuple
The (x, y) position of the text
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the text.
text_type : string or None
if specified, a type of text such as "xlabel", "ylabel", "title"
mplobj : matplotlib object
the matplotlib plot element which generated this text
"""
raise NotImplementedError()
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
"""
Draw an image.
Parameters
----------
imdata : string
base64 encoded png representation of the image
extent : list
the axes extent of the image: [xmin, xmax, ymin, ymax]
coordinates: string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the image
mplobj : matplotlib object
the matplotlib plot object which generated this image
"""
raise NotImplementedError()
|
bsd-3-clause
|
weaver-viii/h2o-3
|
h2o-py/tests/testdir_algos/kmeans/pyunit_get_modelKmeans.py
|
3
|
1090
|
import sys
sys.path.insert(1, "../../../")
import h2o
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
def get_modelKmeans(ip,port):
# Connect to a pre-existing cluster
# connect to localhost:54321
#Log.info("Importing benign.csv data...\n")
benign_h2o = h2o.import_file(path=h2o.locate("smalldata/logreg/benign.csv"))
#benign_h2o.summary()
benign_sci = np.genfromtxt(h2o.locate("smalldata/logreg/benign.csv"), delimiter=",")
# Impute missing values with column mean
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
benign_sci = imp.fit_transform(benign_sci)
for i in range(2,7):
# Log.info("H2O K-Means")
km_h2o = h2o.kmeans(x=benign_h2o, k=i)
km_h2o.show()
model = h2o.get_model(km_h2o._id)
model.show()
km_sci = KMeans(n_clusters=i, init='k-means++', n_init=1)
km_sci.fit(benign_sci)
print "sckit centers"
print km_sci.cluster_centers_
if __name__ == "__main__":
h2o.run_test(sys.argv, get_modelKmeans)
|
apache-2.0
|
tbarchyn/flow_ninja
|
flow_ninja.py
|
1
|
16444
|
# FLOW NINJA
# Copyright 2016-2017 Thomas E. Barchyn
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# flow ninja: the purpose of this is to manage collections of vector estimates and perform
# assimilations of local flow conditions.
# NOTE: this does not handle 3D flows presently, this only handles 2D flows (az and mag),
# but . . the flows can have 3D context (e.g., a z coordinate). When there is a 3D
# flow sensor available, I will update this to work in 3D.
import os
import sys
import copy
from math import *
import numpy as np
import pandas as pd
from threading import Lock
from weight_engine import *
from particle_ninja import *
# import number ninja (you need to specify this yourself!)
if os.name == 'posix':
try:
sys.path.append (os.path.abspath ('../packages/'))
except:
try:
sys.path.append (os.path.abspath ('./packages/'))
except:
pass
else:
sys.path.append ('C://Users//tom//Dropbox//python')
from number_ninja import *
class flow_ninja:
def __init__ (self, weight_engine_in = None, model = 'invdist', space_wt = 0.75, space_zerodist = 100.0,
time_zerodist = 3000.0, space_exponent = 0.2, time_exponent = 0.2, is_wind = False,
threadsafe = True):
'''
constructor creates the number ninjas to handle az and mag data
weight_engine_in = pre-defined weight engine, if unsupplied, takes the weight engine
parameters defined in other arguments
model = the model to use for flow weighting
space_wt = space weight for weight engine, the time weight is 1 - space_wt
space_zerodist = the distance whereby space influence is zero
time_zerodist = the distance whereby time influence is zero
space_exponent = the space weighting exponent
time_exponent = the time weighting exponent
is_wind = config to denote this is a wind measurement (and thus the direction of flow
is input as the direction the flow is coming from, not the direction that it
is going!). This will be stored internally as the direction it is going (as per
the semantic concept of flow).
threadsafe = if this is false, the threadlock is replaced with a fake lock, which allows easier
pickling (but obviously is not threadsafe).
'''
self.is_wind = is_wind
self.az = number_ninja (n = None, is_angle = True, log_time = True, always_return = True,
threadsafe = threadsafe)
self.mag = number_ninja (n = None, is_angle = False, log_time = True, always_return = True,
threadsafe = threadsafe)
# either manually construct weight engine, or use pre-defined one
if not weight_engine_in is None:
self.weight_engine = weight_engine_in
else:
self.weight_engine = weight_engine (model, space_wt, space_zerodist, time_zerodist,
space_exponent, time_exponent)
return
def add_data_np (self, azs, mags, xs = None, ys = None, zs = None, ts = None):
'''
method to add numpy arrays of vals, xs, ys, zs, ts. Note that this uses the
add_data method incrementally to guard against issues with the data and to
honor the pre-existing setup of the number ninja. This is by no means efficient,
and direct assignment of the arrays is another option.
azs = the azimuth values (not optional!)
mags = the magnitude values (not optional!)
xs = x positions (optional)
ys = y positions (optional)
zs = z positions (optional)
ts = time positions as a datetime object (optional, defaults to now)
'''
# set defaults
x = None
y = None
z = None
t = None
# corece to numpy arrays if these are pandas series (pandas series index from 'index', which is not safe
# to trust as the dataframe could be sliced or the index could be something random)
if type (azs) is pd.core.series.Series:
azs = np.array (azs)
if type (mags) is pd.core.series.Series:
mags = np.array (mags)
if type (xs) is pd.core.series.Series:
xs = np.array (xs)
if type (ys) is pd.core.series.Series:
ys = np.array (ys)
if type (zs) is pd.core.series.Series:
zs = np.array (zs)
if type (ts) is pd.core.series.Series:
ts = np.array (ts)
# incrementally loop over the data and append to the flow ninja
for i in range (0, azs.shape[0]):
if not np.isnan (azs[i]) and not np.isnan (mags[i]):
if not xs is None:
x = xs[i]
if not ys is None:
y = ys[i]
if not zs is None:
z = zs[i]
if not ts is None:
t = ts[i]
self.add_data (az = azs[i], mag = mags[i], x = x, y = y, z = z, t = t)
return
def add_data (self, az, mag, x = None, y = None, z = None, t = None):
'''
the method adds values to the list, with a noted x, y, z, time position
az = the azimuth of flow (in the case of wind, the direction it is coming from) (deg)
mag = the magnitude of flow
x = x position (optional)
y = y position (optional)
z = z position (optional)
t = time position as a datetime object (optional, defaults to now)
'''
# flip az if this is wind
if self.is_wind:
az = (180.0 + az) % 360.0
# add data to underlying number ninjas
self.az.add_data (az, x = x, y = y, z = z, t = t)
self.mag.add_data (mag, x = x, y = y, z = z, t = t)
return
def sample (self, test_x, test_y, test_t, test_z = None, return_index = False):
'''
method to perform weighted sample of flow conditions at a given location in
space and time. This can be used for stochastic simulations of lagrangian
style advection (among other things).
test_x = x location to sample from
test_y = y location to sample from
test_z = z location to sample from (optional)
test_t = time location to sample from
return_index = also return the sampled index
returns az and mag (and index if specified)
'''
if len (self.az.vals) > 0:
# get the weighted sample of the az, then use index to get mag
az, index = self.az.spacetime_sample (test_x = test_x, test_y = test_y, test_t = test_t,
weight_engine = self.weight_engine, test_z = test_z)
mag = float (self.mag.vals [index])
if return_index:
return (az, mag, index)
else:
return (az, mag)
else:
if return_index:
return (np.nan, np.nan, np.nan)
else:
return (np.nan, np.nan)
def calc_mean (self, test_x, test_y, test_t, test_z = None, scalar_mean = True):
'''
method to perform weighted mean in space for a given location
test_x = x location to sample from
test_y = y location to sample from
test_z = z location to sample from (optional)
test_t = time location to sample from
scalar_mean = calculate the scalar mean, if false, return the vector mean
returns az and mag
'''
# if we are returning the scalar mean, calculate the means separately
if scalar_mean:
az, influence = self.az.spacetime_mean (test_x = test_x, test_y = test_y, test_t = test_t,
weight_engine = self.weight_engine, test_z = test_z, circle_ranging = True)
mag, influence = self.mag.spacetime_mean (test_x = test_x, test_y = test_y, test_t = test_t,
weight_engine = self.weight_engine, test_z = test_z)
else:
# use the mag ninja to get the influence array and manually create vector mean (discard mean mag)
temp, influence = self.mag.spacetime_mean (test_x = test_x, test_y = test_y, test_t = test_t,
weight_engine = self.weight_engine, test_z = test_z)
# create weighted vector mean
az = np.nan
mag = np.nan
# redefine from np.nan if we are able to perform the calculation
if influence.shape[0] > 0:
influence_sum = influence.sum () # calc the sum of influence
if not np.isnan (influence_sum):
if influence_sum > 0.0:
na_mask = ~np.logical_or (np.isnan (self.az.vals), np.isnan (influence))
mask = np.logical_and ((influence > 0.0), na_mask)
az_masked = self.az.vals [mask]
mag_masked = self.mag.vals [mask]
influence_masked = influence [mask]
az_masked_radians = az_masked * np.pi / 180.0
xcp = mag_masked * np.sin (az_masked_radians)
ycp = mag_masked * np.cos (az_masked_radians)
xcp_mean = np.average (xcp, weights = influence_masked)
ycp_mean = np.average (ycp, weights = influence_masked)
az = degrees (atan2 (xcp_mean, ycp_mean))
az = az % 360.0
mag = sqrt (xcp_mean**2.0 + ycp_mean**2.0)
return (az, mag)
def assimilate (self, locs, scalar_mean = True, az_colname = 'az', mag_colname = 'mag'):
'''
method to assimilate for a pandas dataframe containing coordinates in space and time.
This is a useful method for making assimilations to view in paraview or qgis as flow glyphs
locs = pandas dataframe containing points where we need to create mean estimates for
must have 'x', 'y', 'z', 't', cols. Can be missing 'z' column if no z coords available.
scalar_mean = calculate the scalar mean (instead of the vector mean) - these two numbers
are rather different and you may choose one vs another for a given flow
assimilation task
az_colname = the aximuth column name (for appending to existing dataframes)
mag_colname = the magnitude column name (for appending to existing dataframes)
returns the dataframe with az and mag columns
'''
locs [az_colname] = np.nan
locs [mag_colname] = np.nan
# calculate weighted means
for i in range (0, locs.shape[0]):
x = locs.loc [i, 'x']
y = locs.loc [i, 'y']
t = locs.loc [i, 't']
if 'z' in locs.columns:
z = locs.loc [i, 'z']
else:
z = None
# perform calculation and append to dataframe
az, mag = self.calc_mean (test_x = x, test_y = y, test_t = t, test_z = z,
scalar_mean = scalar_mean)
locs.loc[i, az_colname] = az
locs.loc[i, mag_colname] = mag
return (locs)
def repeat_sample (self, locs, az_colname = 'az', mag_colname = 'mag'):
'''
method to repeatedly pull samples from the flow ninja
locs = pandas dataframe containing points where we need to create mean estimates for
must have 'x', 'y', 'z', 't', cols. Can be missing 'z' column if no z coords available.
az_colname = the aximuth column name (for appending to existing dataframes)
mag_colname = the magnitude column name (for appending to existing dataframes)
returns the dataframe with az and mag columns
'''
locs [az_colname] = np.nan
locs [mag_colname] = np.nan
# calculate the sample
for i in range (0, locs.shape[0]):
x = locs.loc [i, 'x']
y = locs.loc [i, 'y']
t = locs.loc [i, 't']
if 'z' in locs.columns:
z = locs.loc [i, 'z']
else:
z = None
# make a sample
az, mag = self.sample (test_x = x, test_y = y, test_t = t, test_z = z)
locs.loc[i, az_colname] = az
locs.loc[i, mag_colname] = mag
return (locs)
def timeslice (self, min_time, max_time):
'''
method to timeslice the number ninjas to limit the volume of numbers in the ninja
min_time = the minimum time to keep
max_time = the maximum time to keep
'''
self.az.timeslice (min_time, max_time)
self.mag.timeslice (min_time, max_time)
return
def add_data_file (self, filename, az_col = 'az', mag_col = 'mag'):
'''
method to add data from a file. Do note that this could be optimized for huge flows
as it presently reads in the file twice as it calls the underlying number ninja add_data_file
method. Also note all the important gotchas from the number_ninja add_data_file method. In
particular it is vital to note that this only appends data into the ninjas. It does not clear
the ninjas and replace the data.
filename = the filename to add data from
az_col = the azimuth column (defaults to az)
mag_col = the magnitude column (defaults to mag)
'''
self.az.add_data_file (filename, val_col = az_col)
self.mag.add_data_file (filename, val_col = mag_col)
return
def sort_in_time (self):
'''
method to sort the underlying ninjas in time. Note that this does an 'in place' sort, and will wipe
any context implied by ordering.
'''
self.az.sort_in_time ()
self.mag.sort_in_time ()
return
def write (self, filename):
'''
method to write out the record as a pandas dataframe
filename = filename to write out to
'''
if self.az.vals.shape[0] > 0:
df = pd.DataFrame (self.az.vals, columns = ['az'])
df['mag'] = self.mag.vals
if len (self.az.x) > 0:
df['x'] = self.az.x
if len (self.az.y) > 0:
df['y'] = self.az.y
if len (self.az.z) > 0:
df['z'] = self.az.z
if len (self.az.t) > 0:
df['t'] = self.az.t
df.to_csv (filename, index = False)
return
def deepcopy (self):
'''
method to deepcopy the individual number ninjas
returns a copy
'''
selfcopy = copy.copy (self)
selfcopy.az = self.az.deepcopy ()
selfcopy.mag = self.mag.deepcopy ()
return (selfcopy)
|
mit
|
jblackburne/scikit-learn
|
examples/tree/plot_tree_regression.py
|
95
|
1516
|
"""
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
ucb-sejits/hpgmg
|
finite-element/hpgmg-analyze.py
|
2
|
3642
|
def parse_logfile(fname):
import re
FP = r'([+-]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][+-]?\d+)?)'
PERFLINE = []
PERFLINE.append(re.compile(r'Q2 G''\[([\d ]{4})([\d ]{4})([\d ]{4})\] P\[ *(\d+) +(\d+) +(\d+)\] '+FP+r' s +'+FP+r' GF +'+FP+r' MEq/s'))
PERFLINE.append(re.compile(r'Q2 G''\[([\d ]{5})([\d ]{5})([\d ]{5})\] P\[ *(\d+) +(\d+) +(\d+)\] '+FP+r' s +'+FP+r' GF +'+FP+r' MEq/s'))
HOSTLINE = re.compile(r'.*on a ([a-z\-_0-9]+) named [^ ]+ with (\d+) processors')
Dofs = []
GFlops = []
MEqs = []
Procs = None
HostName = 'unknown'
with open(fname) as f:
while 'Starting performance sampling' not in next(f):
pass
while True:
line = next(f)
for perfline in PERFLINE:
m = re.match(perfline,line)
if m: break
if not m: break
g0,g1,g2, p0,p1,p2, time, gflops, meqs = m.groups()
g = (float(g0)*2+1)*(float(g1)*2+1)*(float(g2)*2+1)
p = int(p0)*int(p1)*int(p2)
Dofs.append(g)
GFlops.append(float(gflops))
MEqs.append(float(meqs))
if Procs is None:
Procs = p
elif p != Procs:
raise RuntimeError('Procs varies within file "%s"' % (fname,))
while True:
line = next(f)
m = re.match(HOSTLINE,line)
if m:
HostName, p = m.groups()
assert int(p) == Procs
break
return Dofs, GFlops, MEqs, HostName, Procs
def plot(args):
symbols = iter(['ro', 'bv', 'ks', 'g^', 'bx'])
import matplotlib.pyplot as plt
fig, ax1 = plt.subplots()
plt.title('HPGMG-FE Performance')
if args.perprocess:
plt.xlabel('Number of equations/process')
else:
plt.xlabel('Global number of equations')
ax2 = ax1.twinx()
#ax1.set_autoscaley_on(False)
ax1.set_ylabel('MEquations/second')
all_dofs = []
all_gflops = []
all_meqs = []
max_meqs = 0
for f in args.logfiles:
dofs, gflops, meqs, hostname, procs = parse_logfile(f)
if args.perprocess:
dofs = [d/procs for d in dofs]
all_dofs += dofs
all_gflops += gflops
all_meqs += meqs
if args.loglog:
ax1.loglog(dofs, meqs, next(symbols), label='%s np=%d'%(hostname, procs))
else:
ax1.semilogx(dofs, meqs, next(symbols), label='%s np=%d'%(hostname, procs))
flops_per_meqn = all_gflops[-1] / all_meqs[-1]
ax1.set_xlim(0.9*min(all_dofs),1.05*max(all_dofs))
ax2.set_xlim(0.9*min(all_dofs),1.05*max(all_dofs))
ax2.set_autoscaley_on(False)
if args.loglog:
ax2.set_yscale('log')
ax1.legend(loc='lower right')
else:
ax1.legend(loc='upper left')
ax1.set_ylim(0.9*min(all_meqs),1.1*max(all_meqs))
ax2.set_ylim(0.9*min(all_meqs)*flops_per_meqn,1.1*max(all_meqs)*flops_per_meqn)
ax2.set_ylabel('GFlop/s')
if args.output:
plt.savefig(args.output)
else:
plt.show()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser('FE-FAS Performance Analyzer')
parser.add_argument('-o', '--output', type=str, help='Output file')
parser.add_argument('--loglog', action='store_true', help='Use logarithmic y axis (x is always logarithmic)')
parser.add_argument('--perprocess', action='store_true', help='Use problem size per process for x axis')
parser.add_argument('logfiles', nargs='+', type=str, help='List of files to process, usually including -log_summary')
args = parser.parse_args()
plot(args)
|
bsd-2-clause
|
evgchz/scikit-learn
|
benchmarks/bench_covertype.py
|
14
|
7233
|
"""
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
if "random_state" in estimator_params:
estimator.set_params(random_state=args["random_seed"])
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
|
bsd-3-clause
|
mcdeaton13/dynamic
|
docs/sphinxext/tests/test_docscrape.py
|
12
|
14156
|
# -*- encoding:utf-8 -*-
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
from docscrape_sphinx import SphinxDocString, SphinxClassDoc
from nose.tools import *
doc_txt = '''\
numpy.multivariate_normal(mean, cov, shape=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N,N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
Warnings
--------
Certain warnings apply.
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
See Also
--------
some, other, funcs
otherfunc : relationship
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss
'''
doc = NumpyDocString(doc_txt)
def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('shape=None)')
def test_summary():
assert doc['Summary'][0].startswith('Draw values')
assert doc['Summary'][-1].endswith('covariance.')
def test_extended_summary():
assert doc['Extended Summary'][0].startswith('The multivariate normal')
def test_parameters():
assert_equal(len(doc['Parameters']), 3)
assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape'])
arg, arg_type, desc = doc['Parameters'][1]
assert_equal(arg_type, '(N,N) ndarray')
assert desc[0].startswith('Covariance matrix')
assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
def test_returns():
assert_equal(len(doc['Returns']), 1)
arg, arg_type, desc = doc['Returns'][0]
assert_equal(arg, 'out')
assert_equal(arg_type, 'ndarray')
assert desc[0].startswith('The drawn samples')
assert desc[-1].endswith('distribution.')
def test_notes():
assert doc['Notes'][0].startswith('Instead')
assert doc['Notes'][-1].endswith('definite.')
assert_equal(len(doc['Notes']), 17)
def test_references():
assert doc['References'][0].startswith('..')
assert doc['References'][-1].endswith('2001.')
def test_examples():
assert doc['Examples'][0].startswith('>>>')
assert doc['Examples'][-1].endswith('True]')
def test_index():
assert_equal(doc['index']['default'], 'random')
print doc['index']
assert_equal(len(doc['index']), 2)
assert_equal(len(doc['index']['refguide']), 2)
def non_blank_line_by_line_compare(a,b):
a = [l for l in a.split('\n') if l.strip()]
b = [l for l in b.split('\n') if l.strip()]
for n,line in enumerate(a):
if not line == b[n]:
raise AssertionError("Lines %s of a and b differ: "
"\n>>> %s\n<<< %s\n" %
(n,line,b[n]))
def test_str():
non_blank_line_by_line_compare(str(doc),
"""numpy.multivariate_normal(mean, cov, shape=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N,N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
Warnings
--------
Certain warnings apply.
See Also
--------
`some`_, `other`_, `funcs`_
`otherfunc`_
relationship
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss""")
def test_sphinx_str():
sphinx_doc = SphinxDocString(doc_txt)
non_blank_line_by_line_compare(str(sphinx_doc),
"""
.. index:: random
single: random;distributions, random;gauss
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
:Parameters:
**mean** : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
**cov** : (N,N) ndarray
Covariance matrix of the distribution.
**shape** : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
:Returns:
**out** : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
.. warning::
Certain warnings apply.
.. seealso::
:obj:`some`, :obj:`other`, :obj:`funcs`
:obj:`otherfunc`
relationship
.. rubric:: Notes
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
.. rubric:: References
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
.. only:: latex
[1]_, [2]_
.. rubric:: Examples
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
""")
doc2 = NumpyDocString("""
Returns array of indices of the maximum values of along the given axis.
Parameters
----------
a : {array_like}
Array to look in.
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis""")
def test_parameters_without_extended_description():
assert_equal(len(doc2['Parameters']), 2)
doc3 = NumpyDocString("""
my_signature(*params, **kwds)
Return this and that.
""")
def test_escape_stars():
signature = str(doc3).split('\n')[0]
assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
doc4 = NumpyDocString(
"""a.conj()
Return an array with all complex-valued elements conjugated.""")
def test_empty_extended_summary():
assert_equal(doc4['Extended Summary'], [])
doc5 = NumpyDocString(
"""
a.something()
Raises
------
LinAlgException
If array is singular.
""")
def test_raises():
assert_equal(len(doc5['Raises']), 1)
name,_,desc = doc5['Raises'][0]
assert_equal(name,'LinAlgException')
assert_equal(desc,['If array is singular.'])
def test_see_also():
doc6 = NumpyDocString(
"""
z(x,theta)
See Also
--------
func_a, func_b, func_c
func_d : some equivalent func
foo.func_e : some other func over
multiple lines
func_f, func_g, :meth:`func_h`, func_j,
func_k
:obj:`baz.obj_q`
:class:`class_j`: fubar
foobar
""")
assert len(doc6['See Also']) == 12
for func, desc, role in doc6['See Also']:
if func in ('func_a', 'func_b', 'func_c', 'func_f',
'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
assert(not desc)
else:
assert(desc)
if func == 'func_h':
assert role == 'meth'
elif func == 'baz.obj_q':
assert role == 'obj'
elif func == 'class_j':
assert role == 'class'
else:
assert role is None
if func == 'func_d':
assert desc == ['some equivalent func']
elif func == 'foo.func_e':
assert desc == ['some other func over', 'multiple lines']
elif func == 'class_j':
assert desc == ['fubar', 'foobar']
def test_see_also_print():
class Dummy(object):
"""
See Also
--------
func_a, func_b
func_c : some relationship
goes here
func_d
"""
pass
obj = Dummy()
s = str(FunctionDoc(obj, role='func'))
assert(':func:`func_a`, :func:`func_b`' in s)
assert(' some relationship' in s)
assert(':func:`func_d`' in s)
doc7 = NumpyDocString("""
Doc starts on second line.
""")
def test_empty_first_line():
assert doc7['Summary'][0].startswith('Doc starts')
def test_no_summary():
str(SphinxDocString("""
Parameters
----------"""))
def test_unicode():
doc = SphinxDocString("""
öäöäöäöäöåååå
öäöäöäööäååå
Parameters
----------
ååå : äää
ööö
Returns
-------
ååå : ööö
äää
""")
assert doc['Summary'][0] == u'öäöäöäöäöåååå'.encode('utf-8')
def test_plot_examples():
cfg = dict(use_plots=True)
doc = SphinxDocString("""
Examples
--------
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3],[4,5,6])
>>> plt.show()
""", config=cfg)
assert 'plot::' in str(doc), str(doc)
doc = SphinxDocString("""
Examples
--------
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3],[4,5,6])
plt.show()
""", config=cfg)
assert str(doc).count('plot::') == 1, str(doc)
def test_class_members():
class Dummy(object):
"""
Dummy class.
"""
def spam(self, a, b):
"""Spam\n\nSpam spam."""
pass
def ham(self, c, d):
"""Cheese\n\nNo cheese."""
pass
for cls in (ClassDoc, SphinxClassDoc):
doc = cls(Dummy, config=dict(show_class_members=False))
assert 'Methods' not in str(doc), (cls, str(doc))
assert 'spam' not in str(doc), (cls, str(doc))
assert 'ham' not in str(doc), (cls, str(doc))
doc = cls(Dummy, config=dict(show_class_members=True))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
|
mit
|
mgupta011235/TweetSafe
|
doc2vec/train_doc2vec_aws_day2.py
|
1
|
8256
|
from gensim.models import Doc2Vec
from gensim.models.doc2vec import LabeledSentence
import gensim.models.doc2vec
from nltk import download
from nltk.tokenize import PunktSentenceTokenizer
from nltk.corpus import stopwords
import pandas as pd
import cPickle as pickle
import sqlite3
import multiprocessing
import numpy as np
import time
def cleanString(incomingString):
'''
INPUT: string
OUTPUT: string with bad chars removed
'''
newstring = incomingString
#remove these chars
newstring = newstring.replace(",","")
newstring = newstring.replace("-","")
newstring = newstring.replace("!","")
newstring = newstring.replace("@","")
newstring = newstring.replace("#","")
newstring = newstring.replace("$","")
newstring = newstring.replace("%","")
newstring = newstring.replace("^","")
newstring = newstring.replace("&","")
newstring = newstring.replace("*","")
newstring = newstring.replace("(","")
newstring = newstring.replace(")","")
newstring = newstring.replace("+","")
newstring = newstring.replace("=","")
newstring = newstring.replace("?","")
newstring = newstring.replace("\'","")
newstring = newstring.replace("\"","")
newstring = newstring.replace("{","")
newstring = newstring.replace("}","")
newstring = newstring.replace("[","")
newstring = newstring.replace("]","")
newstring = newstring.replace("<","")
newstring = newstring.replace(">","")
newstring = newstring.replace("~","")
newstring = newstring.replace("`","")
newstring = newstring.replace(":","")
newstring = newstring.replace(";","")
newstring = newstring.replace("|","")
newstring = newstring.replace("\\","")
newstring = newstring.replace("/","")
return newstring
def tokenizePunc(incomingString):
'''
INPUT: string
OUTPUT: string with spaces added between puncuations
'''
newstring = incomingString
#tokenize these
newstring = newstring.replace("."," . ")
newstring = newstring.replace(","," , ")
newstring = newstring.replace("!"," !")
newstring = newstring.replace("(","( ")
newstring = newstring.replace(")"," )")
newstring = newstring.replace("?"," ? ")
newstring = newstring.replace("\""," \" ")
newstring = newstring.replace(":"," : ")
newstring = newstring.replace(";"," ; ")
newstring = newstring.replace("*"," * ")
newstring = newstring.replace("+"," + ")
newstring = newstring.replace("="," = ")
newstring = newstring.replace("{"," { ")
newstring = newstring.replace("}"," } ")
newstring = newstring.replace("["," [ ")
newstring = newstring.replace("]"," ] ")
newstring = newstring.replace("<"," < ")
newstring = newstring.replace(">"," > ")
newstring = newstring.replace("~"," ~ ")
newstring = newstring.replace("|"," | ")
newstring = newstring.replace("/"," / ")
newstring = newstring.replace("\\"," \\ ")
return newstring
def text_cleaner(wordList):
'''
INPUT: list of words
OUTPUT: List of tokenized lower case words with stopwords removed
'''
badSubStringList = ['[deleted]','&','/r/','/u/']
cleanedList = []
for word in wordList:
#if the word has a bad substring, dont add it to the output
if any(substring in word for substring in badSubStringList):
continue
#if the word is a number, replace it with a num tag
try:
newstring = cleanString(word) #5'10, --> 510
val = float(newstring)
cleanedList.append('NUMTAG')
continue
except:
pass
#if a word is a link, replace it with a link tag
if 'http://' in word:
cleanedList.append('LINKTAG')
continue
#tokenize puncuations and remove unwanted chars
newwords = tokenizePunc(word).split()
cleanedList.extend(newwords)
return cleanedList
def df_gen(df):
'''
Input: a pandas df
Output: this is a generator that gives the next row in the df
'''
numrows = len(df.index)
for row in xrange(numrows):
comment = df.iloc[row,:]
body = comment['body']
subreddit = str(comment['subreddit'])
# tokenize text
sentenceList = tokenizer.tokenize(body)
wordList = []
for sentence in sentenceList:
wordList.extend(sentence.split())
#clean text
body = text_cleaner(wordList)
# generate
# print "{}: {}".format(numrows,row)
# print "{}: {}".format(subreddit,body)
# print body
# print ""
yield LabeledSentence(body,tags=[subreddit])
def sql_gen(c):
'''
Input: sqlite3 cursor to a sqlite3 database
Output: this is a generator that gives the next query result from c
'''
# c is generated using the following code
# conn = sqlite3.connect(path2)
# c = conn.cursor()
# c.execute("SELECT subreddit, body FROM MAY2015")
for comment in c:
# try:
# subreddit = str(comment[0])
# body = comment[1]
# yield LabeledSentence(body,tags=[subreddit])
# except:
# yield None
subreddit = str(comment[0])
# tokenize text
sentenceList = tokenizer.tokenize(comment[1])
wordList = []
for sentence in sentenceList:
wordList.extend(sentence.split())
#clean text
body = text_cleaner(wordList)
# generate
yield LabeledSentence(body,tags=[subreddit])
def build_model(gen_obj):
cores = multiprocessing.cpu_count()
assert gensim.models.doc2vec.FAST_VERSION > -1
print "cores: ".format(cores)
d2v_reddit_model = Doc2Vec( dm=0,
size=300,
window=15,
negative=5,
hs=0,
min_count=2,
sample=1e-5,
workers=1)
# model below was used for testing script
# d2v_reddit_model = Doc2Vec( dm=0,
# size=3,
# window=3,
# workers=cores)
t_build_vocab_start = time.time()
print "building vocabulary..."
d2v_reddit_model.build_vocab(gen_obj)
t_build_vocab_stop = time.time()
t_train_model_start = time.time()
print "training model..."
for epoch in xrange(20):
print "epoch: {}".format(epoch)
d2v_reddit_model.train(gen_obj)
d2v_reddit_model.alpha -= 0.002 # decrease the learning rate
d2v_reddit_model.min_alpha = d2v_reddit_model.alpha # fix the learning rate, no decay
t_train_model_stop = time.time()
print "build vocab: {}".format(t_build_vocab_stop - t_build_vocab_start)
print "train model: {}".format(t_train_model_stop - t_train_model_start)
return d2v_reddit_model
if __name__ == '__main__':
print "starting..."
# print "downloading all corpora from nltk..."
# download('all-corpora')
# stopwords = stopwords.words('english')
tokenizer = PunktSentenceTokenizer()
path1 = 'labeledRedditComments.p'
path2 = '../../data/RedditMay2015Comments.sqlite'
print "loading dataframe..."
t_load_df_start = time.time()
df = pickle.load(open(path1, 'rb'))
t_load_df_stop = time.time()
#select random rows to create a random df matrix
randRows = np.random.randint(low=0,high=len(df.index),size=(200000,1))
rows = [int(row) for row in randRows]
dfsmall = df.ix[rows,:]
print "creating generator..."
mygen = df_gen(dfsmall)
# print "connecting to sql database..."
# conn = sqlite3.connect(path2)
# c = conn.cursor()
# c.execute("SELECT subreddit, body FROM MAY2015")
# mygen = sql_gen(c)
t_build_model_start = time.time()
print "building model..."
model = build_model(mygen)
t_build_model_stop = time.time()
print "load df: {}".format(t_load_df_stop - t_load_df_start)
print "build model: {}".format(t_build_model_stop - t_build_model_start)
print "saving model..."
model.save('my_model.doc2vec')
|
gpl-3.0
|
HyperloopTeam/FullOpenMDAO
|
lib/python2.7/site-packages/matplotlib/gridspec.py
|
10
|
15668
|
"""
:mod:`~matplotlib.gridspec` is a module which specifies the location
of the subplot in the figure.
``GridSpec``
specifies the geometry of the grid that a subplot will be
placed. The number of rows and number of columns of the grid
need to be set. Optionally, the subplot layout parameters
(e.g., left, right, etc.) can be tuned.
``SubplotSpec``
specifies the location of the subplot in the given *GridSpec*.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.transforms as mtransforms
import numpy as np
import warnings
class GridSpecBase(object):
"""
A base class of GridSpec that specifies the geometry of the grid
that a subplot will be placed.
"""
def __init__(self, nrows, ncols,
height_ratios=None, width_ratios=None):
"""
The number of rows and number of columns of the grid need to
be set. Optionally, the ratio of heights and widths of rows and
columns can be specified.
"""
#self.figure = figure
self._nrows , self._ncols = nrows, ncols
self.set_height_ratios(height_ratios)
self.set_width_ratios(width_ratios)
def get_geometry(self):
'get the geometry of the grid, e.g., 2,3'
return self._nrows, self._ncols
def get_subplot_params(self, fig=None):
pass
def new_subplotspec(self, loc, rowspan=1, colspan=1):
"""
create and return a SuplotSpec instance.
"""
loc1, loc2 = loc
subplotspec = self[loc1:loc1+rowspan, loc2:loc2+colspan]
return subplotspec
def set_width_ratios(self, width_ratios):
self._col_width_ratios = width_ratios
def get_width_ratios(self):
return self._col_width_ratios
def set_height_ratios(self, height_ratios):
self._row_height_ratios = height_ratios
def get_height_ratios(self):
return self._row_height_ratios
def get_grid_positions(self, fig):
"""
return lists of bottom and top position of rows, left and
right positions of columns.
"""
nrows, ncols = self.get_geometry()
subplot_params = self.get_subplot_params(fig)
left = subplot_params.left
right = subplot_params.right
bottom = subplot_params.bottom
top = subplot_params.top
wspace = subplot_params.wspace
hspace = subplot_params.hspace
totWidth = right-left
totHeight = top-bottom
# calculate accumulated heights of columns
cellH = totHeight/(nrows + hspace*(nrows-1))
sepH = hspace*cellH
if self._row_height_ratios is not None:
netHeight = cellH * nrows
tr = float(sum(self._row_height_ratios))
cellHeights = [netHeight*r/tr for r in self._row_height_ratios]
else:
cellHeights = [cellH] * nrows
sepHeights = [0] + ([sepH] * (nrows-1))
cellHs = np.add.accumulate(np.ravel(list(zip(sepHeights, cellHeights))))
# calculate accumulated widths of rows
cellW = totWidth/(ncols + wspace*(ncols-1))
sepW = wspace*cellW
if self._col_width_ratios is not None:
netWidth = cellW * ncols
tr = float(sum(self._col_width_ratios))
cellWidths = [netWidth*r/tr for r in self._col_width_ratios]
else:
cellWidths = [cellW] * ncols
sepWidths = [0] + ([sepW] * (ncols-1))
cellWs = np.add.accumulate(np.ravel(list(zip(sepWidths, cellWidths))))
figTops = [top - cellHs[2*rowNum] for rowNum in range(nrows)]
figBottoms = [top - cellHs[2*rowNum+1] for rowNum in range(nrows)]
figLefts = [left + cellWs[2*colNum] for colNum in range(ncols)]
figRights = [left + cellWs[2*colNum+1] for colNum in range(ncols)]
return figBottoms, figTops, figLefts, figRights
def __getitem__(self, key):
"""
create and return a SuplotSpec instance.
"""
nrows, ncols = self.get_geometry()
total = nrows*ncols
if isinstance(key, tuple):
try:
k1, k2 = key
except ValueError:
raise ValueError("unrecognized subplot spec")
if isinstance(k1, slice):
row1, row2, _ = k1.indices(nrows)
else:
if k1 < 0:
k1 += nrows
if k1 >= nrows or k1 < 0 :
raise IndexError("index out of range")
row1, row2 = k1, k1+1
if isinstance(k2, slice):
col1, col2, _ = k2.indices(ncols)
else:
if k2 < 0:
k2 += ncols
if k2 >= ncols or k2 < 0 :
raise IndexError("index out of range")
col1, col2 = k2, k2+1
num1 = row1*ncols + col1
num2 = (row2-1)*ncols + (col2-1)
# single key
else:
if isinstance(key, slice):
num1, num2, _ = key.indices(total)
num2 -= 1
else:
if key < 0:
key += total
if key >= total or key < 0 :
raise IndexError("index out of range")
num1, num2 = key, None
return SubplotSpec(self, num1, num2)
class GridSpec(GridSpecBase):
"""
A class that specifies the geometry of the grid that a subplot
will be placed. The location of grid is determined by similar way
as the SubplotParams.
"""
def __init__(self, nrows, ncols,
left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None,
width_ratios=None, height_ratios=None):
"""
The number of rows and number of columns of the
grid need to be set. Optionally, the subplot layout parameters
(e.g., left, right, etc.) can be tuned.
"""
#self.figure = figure
self.left=left
self.bottom=bottom
self.right=right
self.top=top
self.wspace=wspace
self.hspace=hspace
GridSpecBase.__init__(self, nrows, ncols,
width_ratios=width_ratios,
height_ratios=height_ratios)
#self.set_width_ratios(width_ratios)
#self.set_height_ratios(height_ratios)
_AllowedKeys = ["left", "bottom", "right", "top", "wspace", "hspace"]
def update(self, **kwargs):
"""
Update the current values. If any kwarg is None, default to
the current value, if set, otherwise to rc.
"""
for k, v in six.iteritems(kwargs):
if k in self._AllowedKeys:
setattr(self, k, v)
else:
raise AttributeError("%s is unknown keyword" % (k,))
from matplotlib import _pylab_helpers
from matplotlib.axes import SubplotBase
for figmanager in six.itervalues(_pylab_helpers.Gcf.figs):
for ax in figmanager.canvas.figure.axes:
# copied from Figure.subplots_adjust
if not isinstance(ax, SubplotBase):
# Check if sharing a subplots axis
if ax._sharex is not None and isinstance(ax._sharex, SubplotBase):
if ax._sharex.get_subplotspec().get_gridspec() == self:
ax._sharex.update_params()
ax.set_position(ax._sharex.figbox)
elif ax._sharey is not None and isinstance(ax._sharey,SubplotBase):
if ax._sharey.get_subplotspec().get_gridspec() == self:
ax._sharey.update_params()
ax.set_position(ax._sharey.figbox)
else:
ss = ax.get_subplotspec().get_topmost_subplotspec()
if ss.get_gridspec() == self:
ax.update_params()
ax.set_position(ax.figbox)
def get_subplot_params(self, fig=None):
"""
return a dictionary of subplot layout parameters. The default
parameters are from rcParams unless a figure attribute is set.
"""
from matplotlib.figure import SubplotParams
import copy
if fig is None:
kw = dict([(k, rcParams["figure.subplot."+k]) \
for k in self._AllowedKeys])
subplotpars = SubplotParams(**kw)
else:
subplotpars = copy.copy(fig.subplotpars)
update_kw = dict([(k, getattr(self, k)) for k in self._AllowedKeys])
subplotpars.update(**update_kw)
return subplotpars
def locally_modified_subplot_params(self):
return [k for k in self._AllowedKeys if getattr(self, k)]
def tight_layout(self, fig, renderer=None, pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Adjust subplot parameters to give specified padding.
Parameters:
pad : float
padding between the figure edge and the edges of subplots, as a fraction of the font-size.
h_pad, w_pad : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
rect : if rect is given, it is interpreted as a rectangle
(left, bottom, right, top) in the normalized figure
coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
from .tight_layout import (get_subplotspec_list,
get_tight_layout_figure,
get_renderer)
subplotspec_list = get_subplotspec_list(fig.axes, grid_spec=self)
if None in subplotspec_list:
warnings.warn("This figure includes Axes that are not "
"compatible with tight_layout, so its "
"results might be incorrect.")
if renderer is None:
renderer = get_renderer(fig)
kwargs = get_tight_layout_figure(fig, fig.axes, subplotspec_list,
renderer,
pad=pad, h_pad=h_pad, w_pad=w_pad,
rect=rect,
)
self.update(**kwargs)
class GridSpecFromSubplotSpec(GridSpecBase):
"""
GridSpec whose subplot layout parameters are inherited from the
location specified by a given SubplotSpec.
"""
def __init__(self, nrows, ncols,
subplot_spec,
wspace=None, hspace=None,
height_ratios=None, width_ratios=None):
"""
The number of rows and number of columns of the grid need to
be set. An instance of SubplotSpec is also needed to be set
from which the layout parameters will be inherited. The wspace
and hspace of the layout can be optionally specified or the
default values (from the figure or rcParams) will be used.
"""
self._wspace=wspace
self._hspace=hspace
self._subplot_spec = subplot_spec
GridSpecBase.__init__(self, nrows, ncols,
width_ratios=width_ratios,
height_ratios=height_ratios)
def get_subplot_params(self, fig=None):
"""
return a dictionary of subplot layout parameters.
"""
if fig is None:
hspace = rcParams["figure.subplot.hspace"]
wspace = rcParams["figure.subplot.wspace"]
else:
hspace = fig.subplotpars.hspace
wspace = fig.subplotpars.wspace
if self._hspace is not None:
hspace = self._hspace
if self._wspace is not None:
wspace = self._wspace
figbox = self._subplot_spec.get_position(fig, return_all=False)
left, bottom, right, top = figbox.extents
from matplotlib.figure import SubplotParams
sp = SubplotParams(left=left,
right=right,
bottom=bottom,
top=top,
wspace=wspace,
hspace=hspace)
return sp
def get_topmost_subplotspec(self):
'get the topmost SubplotSpec instance associated with the subplot'
return self._subplot_spec.get_topmost_subplotspec()
class SubplotSpec(object):
"""
specifies the location of the subplot in the given *GridSpec*.
"""
def __init__(self, gridspec, num1, num2=None):
"""
The subplot will occupy the num1-th cell of the given
gridspec. If num2 is provided, the subplot will span between
num1-th cell and num2-th cell.
The index stars from 0.
"""
rows, cols = gridspec.get_geometry()
total = rows*cols
self._gridspec = gridspec
self.num1 = num1
self.num2 = num2
def get_gridspec(self):
return self._gridspec
def get_geometry(self):
"""
get the subplot geometry, e.g., 2,2,3. Unlike SuplorParams,
index is 0-based
"""
rows, cols = self.get_gridspec().get_geometry()
return rows, cols, self.num1, self.num2
def get_position(self, fig, return_all=False):
"""
update the subplot position from fig.subplotpars
"""
gridspec = self.get_gridspec()
nrows, ncols = gridspec.get_geometry()
figBottoms, figTops, figLefts, figRights = \
gridspec.get_grid_positions(fig)
rowNum, colNum = divmod(self.num1, ncols)
figBottom = figBottoms[rowNum]
figTop = figTops[rowNum]
figLeft = figLefts[colNum]
figRight = figRights[colNum]
if self.num2 is not None:
rowNum2, colNum2 = divmod(self.num2, ncols)
figBottom2 = figBottoms[rowNum2]
figTop2 = figTops[rowNum2]
figLeft2 = figLefts[colNum2]
figRight2 = figRights[colNum2]
figBottom = min(figBottom, figBottom2)
figLeft = min(figLeft, figLeft2)
figTop = max(figTop, figTop2)
figRight = max(figRight, figRight2)
figbox = mtransforms.Bbox.from_extents(figLeft, figBottom,
figRight, figTop)
if return_all:
return figbox, rowNum, colNum, nrows, ncols
else:
return figbox
def get_topmost_subplotspec(self):
'get the topmost SubplotSpec instance associated with the subplot'
gridspec = self.get_gridspec()
if hasattr(gridspec, "get_topmost_subplotspec"):
return gridspec.get_topmost_subplotspec()
else:
return self
def __eq__(self, other):
# check to make sure other has the attributes
# we need to do the comparison
if not (hasattr(other, '_gridspec') and
hasattr(other, 'num1') and
hasattr(other, 'num2')):
return False
return all((self._gridspec == other._gridspec,
self.num1 == other.num1,
self.num2 == other.num2))
def __hash__(self):
return (hash(self._gridspec) ^
hash(self.num1) ^
hash(self.num2))
|
gpl-2.0
|
bbatha/csc200
|
brachistochrone/brachistochrone.py
|
1
|
5249
|
#!/usr/bin/env python
from __future__ import division
from random import Random
from time import time
from math import cos
from math import sqrt
from math import sin
from math import radians
import matplotlib.pyplot as plt
num_intervals = 100
def fitness(candidates):
best = []
fit = 9999999999999999
dur = 0
for cs in candidates:
dur = duration(cs)
if dur < fit:
fit = dur
best = cs
return best
def find_optimal(initial, num_iter, rand):
optimal = initial[:]
for i in range(num_iter):
candidates = generate_candidates(optimal, rand)
optimal = fitness(candidates)
return optimal
def generate_candidates(initial, rand):
candidates = [initial]
intermediate = []
for i in range(1,num_intervals):
intermediate = initial[:]
smooth = rand.uniform(0, initial[i-1])
for j in range(8):
if j == 0:
intermediate[i] -= smooth
continue
if i - j > 1:
intermediate[i - j] -= smooth
if i + j < num_intervals - 1:
intermediate[i + j] -= smooth
intermediate = [i if i >= 0.0 else 0.0 for i in intermediate]
candidates.append(intermediate)
return candidates
#From
# M. Borschbach and W. Dreckman. On the Role of an Evolutionary Solution for the
# Brachistonchrone-Problem. IEEE Congress on Evolutionary Computation:2188-2193. 2007.
def duration(candidate):
g = 9.81
d = 0.0
h = 0.0
v = 0.0
t_total = 0.0
time = 0.0
x = [(float(i) / num_intervals) for i in range(num_intervals)]
x.append(1.0)
x.reverse()
for i in range(0,len(candidate)-1):
t = sqrt((x[i+1] - x[i]) ** 2 + (candidate[i+1] - candidate[i])**2)
t /= sqrt(1.0 - candidate[i]) + sqrt(1.0 - candidate[i+1])
t *= 2.0/sqrt(2.0*9.81)
t_total += t
return t_total
def frange(x, y, step):
while x + 0.000000005 < y:
yield x
x += step
def actual(a):
x = []
y = []
for theta in frange(0.0, 90.0, 90.0/101):
theta = radians(theta)
x.append(a/2.0 * (theta - sin(theta)))
y.append(a/2.0 * (1.0 - cos(theta)))
return x, y
def brachistochrone():
initial = [(float(i) / num_intervals) for i in range(num_intervals)]
initial.append(1.0)
initial.reverse()
rand = Random()
rand.seed(int(time()))
real = initial[:]
k = find_optimal(initial[:], 1000, rand)
print duration(k)
tenk = find_optimal(k[:], 10000, rand)
print duration(tenk)
#hundk = find_optimal(tenk[:], 100000, rand)
#print duration(hundk)
initial.reverse()
plt.plot(initial, k, '-', lw=2)
plt.plot(initial, tenk, '-', lw=2)
#plt.plot(initial, hundk, '-', lw=2)
plt.plot(initial, initial, '-', lw=2)
real = actual(2.0)
initial.reverse()
plt.plot(initial, real[1], '-', lw=2)
print duration(real[1])
print duration(initial)
plt.title('Brachistochrone')
plt.grid(True)
plt.show()
if __name__ == '__main__':
brachistone()
#After 10000 populations the result was 0.59s
#[1.0, 0.9554142019493003, 0.9064481423601488, 0.8840591642179618,
#0.8593001269269679, 0.8413300455037984, 0.8194133482738329, 0.796515698769301,
#0.7781990313228083, 0.7710949773566217, 0.7531046934610919, 0.73550810146929,
#0.7178017980481116, 0.7003887579985918, 0.6868605101474995, 0.6712463133586177,
#0.6580053708617286, 0.6434829629676455, 0.6313507071812814, 0.61873052571968,
#0.6055098760455644, 0.5928753200297969, 0.5806140190503736, 0.5703183251330384,
#0.5666449673412327, 0.5563184999745053, 0.5455015028219167, 0.535174861815575,
#0.5241287336023774, 0.5165222953771171, 0.5056708906516544, 0.4958321972071281,
#0.4849945455701746, 0.47530807208275083, 0.4673796778005949, 0.4568634606759744,
#0.4472643672583676, 0.4381811132518001, 0.42748595854577887,
#0.42186585599782794, 0.41358163352748184, 0.4029972628521903,
#0.39516735135562175, 0.38591023983789435, 0.3774534976282572,
#0.3672530093512634, 0.3591207372914839, 0.34888482168001494,
#0.33975499689678085, 0.33191714536959416, 0.325241247038768, 0.3178531707739997,
#0.3094945740494273, 0.2986756974041093, 0.2952015454713591, 0.2863632813063608,
#0.2801607295477259, 0.27199006138838333, 0.2622283800758324, 0.2556442656343049,
#0.2468901724607104, 0.24077862930089103, 0.23263699288809134,
#0.22596313542241447, 0.21798005495006645, 0.209446400523037,
#0.20209999679642346, 0.19504101388663567, 0.187533783621952, 0.1834843997931665,
#0.17599245337995206, 0.16831010131415083, 0.16107741569142187,
#0.15467548012492882, 0.1493976946582648, 0.14262900463907646,
#0.13580331682812874, 0.12855176814038677, 0.12169293020743852,
#0.11704067235760217, 0.11022421226420229, 0.10392401617583326,
#0.09793313424945715, 0.0908110181394189, 0.08632390909135426,
#0.07925509340217016, 0.07333766338334304, 0.06794629504540615,
#0.06084083846725948, 0.055683066131442464, 0.05005748559969681,
#0.04340654671502322, 0.03709088741738346, 0.03396111501521751,
#0.029741917899339998, 0.023474878972501164, 0.01865990171559162,
#0.014310567766850916, 0.00906630685673991, 0.005802267363736822, 0.0]
|
bsd-2-clause
|
mlyundin/scikit-learn
|
examples/linear_model/plot_robust_fit.py
|
238
|
2414
|
"""
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
|
bsd-3-clause
|
richardseifert/Hydra_pipeline
|
libs/calib.py
|
1
|
4036
|
from fitstools import manage_dtype, mask_fits, assign_header
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
import cosmics
def calibrate(image, bias, fiber_mask=None, lacosmic=True):
image = bias_correct(image, bias, fiber_mask)
image = dark_correct(image)
image = mask_badpixels(image)
if lacosmic:
image = remove_cosmics(image)
return image
def bias_correct(image, bias, fiber_mask=None):
@manage_dtype(preserve=True)
def bc_helper(image, bias, fiber_mask=None):
#reduced = (image-median(masked image)) - (bias-median(bias))
if type(fiber_mask) != type(None):
masked_image = mask_fits(image, fiber_mask, maskval=0, fillval=np.nan)
image = (image - np.nanmedian(masked_image)) - (bias - np.median(bias))
else:
image = image - bias
return image
bias_subtracted_image = bc_helper(image, bias, fiber_mask)
if type(bias_subtracted_image) == fits.hdu.hdulist.HDUList:
bias_subtracted_image = assign_header(bias_subtracted_image, image[0].header)
bias_subtracted_image[0].header['COMMENT'] = 'Bias corrected.'
return bias_subtracted_image
def dark_correct(image, exptime=None):
dark_map = fits.open('calib/master_calib/dark_fit.fits')[0].data
header = image[0].header
gain = header['GAIN']
dark_map /= gain
if exptime == None and type(image) == fits.hdu.hdulist.HDUList:
try:
exptime = image[0].header['EXPTIME']
except KeyError:
exptime = 0.0
else:
raise ValueError('Cannot determine exposure time for dark subtraction.')
@manage_dtype(preserve=True)
def dc_helper(image, dark_map, exptime):
image = image - exptime*dark_map
return image
dark_subtracted_image = dc_helper(image, dark_map, exptime)
if type(dark_subtracted_image) == fits.hdu.hdulist.HDUList:
dark_subtracted_image = assign_header(dark_subtracted_image, image[0].header)
dark_subtracted_image[0].header['COMMENT'] = 'Bias corrected.'
return dark_subtracted_image
def mask_badpixels(image):
bad_mask = fits.open('calib/master_calib/badmask.fits')
@manage_dtype(preserve=True)
def mbp_helper(image, bad_mask):
image = mask_fits(image, bad_mask, maskval=1.0, fillval=np.nan)
return image
bad_masked_image = mbp_helper(image, bad_mask)
if type(bad_masked_image) == fits.hdu.hdulist.HDUList:
bad_masked_image = assign_header(bad_masked_image, image[0].header)
bad_masked_image[0].header['COMMENT'] = 'Bad pixels masked.'
return bad_masked_image
@manage_dtype(preserve=True, use_args=[0], with_header=True)
def remove_cosmics(image, gain=None, readnoise=None, sigclip=5.0, sigfrac=0.5, objlim=20.0):
image, header = image[:2]
fname_noext = header['FILENAME'][:-5] if 'FILENAME' in header else 'image'
if gain==None and 'GAIN' in header:
gain = header['GAIN']
if readnoise==None and 'RDNOISE' in header:
readnoise = header['RDNOISE']
if gain==None:
raise KeyError('Cannot determine image gain from information given.')
if readnoise==None:
raise KeyError('Cannot determine image readnoise from information given.')
c = cosmics.cosmicsimage(image, gain=gain, readnoise=readnoise, sigclip=sigclip, sigfrac=sigfrac, objlim=objlim, verbose=False)
c.run(maxiter=5)
cosmics_mask = c.mask
#cosmics.tofits('plots/lacosmic/'+fname_noext+'_cmask.fits', np.transpose(cosmics_mask), header)
#cosmics.tofits('plots/lacosmic/'+fname_noext+'_before.fits', np.transpose(image), header)
cosmics_masked_image = mask_fits(image, cosmics_mask, maskval=0.0, fillval=np.nan)
#cosmics.tofits('plots/lacosmic/'+fname_noext+'_after.fits', np.transpose(cosmics_masked_image), header)
if type(cosmics_masked_image) == fits.hdu.hdulist.HDUList:
cosmics_masked_image = assign_header(cosmics_masked_image, image[0].header)
cosmics_masked_image[0].header['COMMENT'] = 'Cosmic rays masked.'
return cosmics_masked_image,header
|
mit
|
cainiaocome/scikit-learn
|
examples/semi_supervised/plot_label_propagation_structure.py
|
247
|
2432
|
"""
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
|
bsd-3-clause
|
mdhaber/scipy
|
scipy/misc/common.py
|
20
|
9678
|
"""
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from numpy import arange, newaxis, hstack, prod, array, frombuffer, load
__all__ = ['central_diff_weights', 'derivative', 'ascent', 'face',
'electrocardiogram']
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative.
Assumes equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Parameters
----------
Np : int
Number of points for the central derivative.
ndiv : int, optional
Number of divisions. Default is 1.
Returns
-------
w : ndarray
Weights for an Np-point central derivative. Its size is `Np`.
Notes
-----
Can be inaccurate for a large number of points.
Examples
--------
We can calculate a derivative value of a function.
>>> from scipy.misc import central_diff_weights
>>> def f(x):
... return 2 * x**2 + 3
>>> x = 3.0 # derivative point
>>> h = 0.1 # differential step
>>> Np = 3 # point number for central derivative
>>> weights = central_diff_weights(Np) # weights for first derivative
>>> vals = [f(x + (i - Np/2) * h) for i in range(Np)]
>>> sum(w * v for (w, v) in zip(weights, vals))/h
11.79999999999998
This value is close to the analytical solution:
f'(x) = 4x, so f'(3) = 12
References
----------
.. [1] https://en.wikipedia.org/wiki/Finite_difference
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho,ho+1.0)
x = x[:,newaxis]
X = x**0.0
for k in range(1,Np):
X = hstack([X,x**k])
w = prod(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the nth derivative of a function at a point.
Given a function, use a central difference formula with spacing `dx` to
compute the nth derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which the nth derivative is found.
dx : float, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> from scipy.misc import derivative
>>> def f(x):
... return x**3 + x**2
>>> derivative(f, 1.0, dx=1e-6)
4.9999999999217337
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n == 1:
if order == 3:
weights = array([-1,0,1])/2.0
elif order == 5:
weights = array([1,-8,0,8,-1])/12.0
elif order == 7:
weights = array([-1,9,-45,0,45,-9,1])/60.0
elif order == 9:
weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0
else:
weights = central_diff_weights(order,1)
elif n == 2:
if order == 3:
weights = array([1,-2.0,1])
elif order == 5:
weights = array([-1,16,-30,16,-1])/12.0
elif order == 7:
weights = array([2,-27,270,-490,270,-27,2])/180.0
elif order == 9:
weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0
else:
weights = central_diff_weights(order,2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k]*func(x0+(k-ho)*dx,*args)
return val / prod((dx,)*n,axis=0)
def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.misc
>>> ascent = scipy.misc.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle
import os
fname = os.path.join(os.path.dirname(__file__),'ascent.dat')
with open(fname, 'rb') as f:
ascent = array(pickle.load(f))
return ascent
def face(gray=False):
"""
Get a 1024 x 768, color image of a raccoon face.
raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
Parameters
----------
gray : bool, optional
If True return 8-bit grey-scale image, otherwise return a color image
Returns
-------
face : ndarray
image of a racoon face
Examples
--------
>>> import scipy.misc
>>> face = scipy.misc.face()
>>> face.shape
(768, 1024, 3)
>>> face.max()
255
>>> face.dtype
dtype('uint8')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(face)
>>> plt.show()
"""
import bz2
import os
with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f:
rawdata = f.read()
data = bz2.decompress(rawdata)
face = frombuffer(data, dtype='uint8')
face.shape = (768, 1024, 3)
if gray is True:
face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8')
return face
def electrocardiogram():
"""
Load an electrocardiogram as an example for a 1-D signal.
The returned signal is a 5 minute long electrocardiogram (ECG), a medical
recording of the heart's electrical activity, sampled at 360 Hz.
Returns
-------
ecg : ndarray
The electrocardiogram in millivolt (mV) sampled at 360 Hz.
Notes
-----
The provided signal is an excerpt (19:35 to 24:35) from the `record 208`_
(lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on
PhysioNet [2]_. The excerpt includes noise induced artifacts, typical
heartbeats as well as pathological changes.
.. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208
.. versionadded:: 1.1.0
References
----------
.. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database.
IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001).
(PMID: 11446209); :doi:`10.13026/C2F305`
.. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank,
PhysioToolkit, and PhysioNet: Components of a New Research Resource
for Complex Physiologic Signals. Circulation 101(23):e215-e220;
:doi:`10.1161/01.CIR.101.23.e215`
Examples
--------
>>> from scipy.misc import electrocardiogram
>>> ecg = electrocardiogram()
>>> ecg
array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385])
>>> ecg.shape, ecg.mean(), ecg.std()
((108000,), -0.16510875, 0.5992473991177294)
As stated the signal features several areas with a different morphology.
E.g., the first few seconds show the electrical activity of a heart in
normal sinus rhythm as seen below.
>>> import matplotlib.pyplot as plt
>>> fs = 360
>>> time = np.arange(ecg.size) / fs
>>> plt.plot(time, ecg)
>>> plt.xlabel("time in s")
>>> plt.ylabel("ECG in mV")
>>> plt.xlim(9, 10.2)
>>> plt.ylim(-1, 1.5)
>>> plt.show()
After second 16, however, the first premature ventricular contractions, also
called extrasystoles, appear. These have a different morphology compared to
typical heartbeats. The difference can easily be observed in the following
plot.
>>> plt.plot(time, ecg)
>>> plt.xlabel("time in s")
>>> plt.ylabel("ECG in mV")
>>> plt.xlim(46.5, 50)
>>> plt.ylim(-2, 1.5)
>>> plt.show()
At several points large artifacts disturb the recording, e.g.:
>>> plt.plot(time, ecg)
>>> plt.xlabel("time in s")
>>> plt.ylabel("ECG in mV")
>>> plt.xlim(207, 215)
>>> plt.ylim(-2, 3.5)
>>> plt.show()
Finally, examining the power spectrum reveals that most of the biosignal is
made up of lower frequencies. At 60 Hz the noise induced by the mains
electricity can be clearly observed.
>>> from scipy.signal import welch
>>> f, Pxx = welch(ecg, fs=fs, nperseg=2048, scaling="spectrum")
>>> plt.semilogy(f, Pxx)
>>> plt.xlabel("Frequency in Hz")
>>> plt.ylabel("Power spectrum of the ECG in mV**2")
>>> plt.xlim(f[[0, -1]])
>>> plt.show()
"""
import os
file_path = os.path.join(os.path.dirname(__file__), "ecg.dat")
with load(file_path) as file:
ecg = file["ecg"].astype(int) # np.uint16 -> int
# Convert raw output of ADC to mV: (ecg - adc_zero) / adc_gain
ecg = (ecg - 1024) / 200.0
return ecg
|
bsd-3-clause
|
gsmaxwell/phase_offset_rx
|
gnuradio-core/src/examples/pfb/channelize.py
|
17
|
6621
|
#!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 2000000 # number of samples to use
self._fs = 9000 # initial sampling rate
self._M = 9 # Number of channels to channelize
# Create a set of taps for the PFB channelizer
self._taps = gr.firdes.low_pass_2(1, self._fs, 475.50, 50,
attenuation_dB=100, window=gr.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
# Create a set of signals at different frequencies
# freqs lists the frequencies of the signals that get stored
# in the list "signals", which then get summed together
self.signals = list()
self.add = gr.add_cc()
freqs = [-4070, -3050, -2030, -1010, 10, 1020, 2040, 3060, 4080]
for i in xrange(len(freqs)):
self.signals.append(gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freqs[i], 1))
self.connect(self.signals[i], (self.add,i))
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = blks2.pfb_channelizer_ccf(self._M, self._taps, 1)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = gr.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Use this to play with the channel mapping
#self.pfb.set_channel_map([5,6,7,8,0,1,2,3,4])
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(gr.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
Ns = 1000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(X))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs / tb._M
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
arjoly/scikit-learn
|
examples/applications/plot_tomography_l1_reconstruction.py
|
81
|
5461
|
"""
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x].astype(np.float64)
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
|
bsd-3-clause
|
jungla/ICOM-fluidity-toolbox
|
Detectors/offline_advection/plot_dispersion_Tr_2.py
|
1
|
1576
|
#!~/python
import fluidity_tools
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
import myfun
import numpy as np
import os
import fio
import csv
import lagrangian_stats
label = 'm_25_2_512'
#label = 'm_25_1_particles'
dayi = 0 #10*24*2
dayf = 269 #10*24*4
days = 1
#label = sys.argv[1]
#basename = sys.argv[2]
#dayi = int(sys.argv[3])
#dayf = int(sys.argv[4])
#days = int(sys.argv[5])
try: os.stat('./plot/'+label)
except OSError: os.mkdir('./plot/'+label)
time = np.asarray(range(dayi,dayf,days))
timeTr = (time)*1200 + 48*3600 - 1200
depths = [1,5,17]
Trid = [1,2,4]
D_Tr = np.zeros([len(time),len(depths)])
# Tracer second moment
for z in range(len(depths)):
print z
f0 = open('D_Tracer_'+str(Trid[z])+'_CG_'+label+'.csv','r')
r0 = csv.reader(f0)
vals = []
for row in r0:
time,val = row[0].split(', ')
vals.append(float(val))
D_Tr[:,z] = np.asarray(vals[dayi:dayf:days])
f0.close()
# plot dispersion Tr
nl = len(depths)
pTr, = plt.plot(timeTr/3600.,D_Tr[:,0],color=[0,0,0],linewidth=2)
z = 1
pTr5, = plt.plot(timeTr/3600.,D_Tr[:,z],color=[z/float(nl),z/float(nl),z/float(nl)],linewidth=2)
z = 2
pTr17, = plt.plot(timeTr/3600.,D_Tr[:,z],color=[z/float(nl),z/float(nl),z/float(nl)],linewidth=2)
plt.gca().set_yscale('log')
#plt.gca().set_xscale('log')
plt.xlabel('Time [days]')
plt.xlim([48, 140])
plt.ylabel('Dispersion [m^2]')
plt.legend((pTr,pTr5,pTr17),('Tr 1m','Tr 5m','Tr 17m'),loc=4,fontsize=12)
plt.savefig('./plot/'+label+'/D_Tr_'+label+'.eps')
print './plot/'+label+'/D_Tr_'+label+'.eps'
plt.close()
|
gpl-2.0
|
anthrotype/freetype-py
|
examples/glyph-vector.py
|
3
|
2915
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
Show how to access glyph outline description.
'''
from freetype import *
if __name__ == '__main__':
import numpy
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
face = Face('./Vera.ttf')
face.set_char_size( 48*64 )
face.load_char('S')
slot = face.glyph
outline = slot.outline
points = numpy.array(outline.points, dtype=[('x',float), ('y',float)])
x, y = points['x'], points['y']
figure = plt.figure(figsize=(8,10))
axis = figure.add_subplot(111)
#axis.scatter(points['x'], points['y'], alpha=.25)
start, end = 0, 0
VERTS, CODES = [], []
# Iterate over each contour
for i in range(len(outline.contours)):
end = outline.contours[i]
points = outline.points[start:end+1]
points.append(points[0])
tags = outline.tags[start:end+1]
tags.append(tags[0])
segments = [ [points[0],], ]
for j in range(1, len(points) ):
segments[-1].append(points[j])
if tags[j] & (1 << 0) and j < (len(points)-1):
segments.append( [points[j],] )
verts = [points[0], ]
codes = [Path.MOVETO,]
for segment in segments:
if len(segment) == 2:
verts.extend(segment[1:])
codes.extend([Path.LINETO])
elif len(segment) == 3:
verts.extend(segment[1:])
codes.extend([Path.CURVE3, Path.CURVE3])
else:
verts.append(segment[1])
codes.append(Path.CURVE3)
for i in range(1,len(segment)-2):
A,B = segment[i], segment[i+1]
C = ((A[0]+B[0])/2.0, (A[1]+B[1])/2.0)
verts.extend([ C, B ])
codes.extend([ Path.CURVE3, Path.CURVE3])
verts.append(segment[-1])
codes.append(Path.CURVE3)
VERTS.extend(verts)
CODES.extend(codes)
start = end+1
# Draw glyph lines
path = Path(VERTS, CODES)
glyph = patches.PathPatch(path, facecolor='.75', lw=1)
# Draw "control" lines
for i, code in enumerate(CODES):
if code == Path.CURVE3:
CODES[i] = Path.LINETO
path = Path(VERTS, CODES)
patch = patches.PathPatch(path, ec='.5', fill=False, ls='dashed', lw=1 )
axis.add_patch(patch)
axis.add_patch(glyph)
axis.set_xlim(x.min()-100, x.max()+100)
plt.xticks([])
axis.set_ylim(y.min()-100, y.max()+100)
plt.yticks([])
plt.show()
|
bsd-3-clause
|
yipenggao/moose
|
python/peacock/PostprocessorViewer/plugins/PostprocessorSelectPlugin.py
|
4
|
6624
|
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import itertools
from PyQt5 import QtCore, QtWidgets
from PostprocessorPlugin import PostprocessorPlugin
from LineGroupWidget import LineGroupWidget
import mooseutils
class PostprocessorSelectPlugin(QtWidgets.QWidget, PostprocessorPlugin):
"""
Widget that contains the toggles for plotting the individual postprocessor data.
This builds a scrollable box containing LineGroupWidget objects, these toggles control the visibility
and style of the postprocessor line plots.
"""
#: pyqtSignal: Emitted when plot is refreshed, contains the x/y/y2 axis variable names
variablesChanged = QtCore.pyqtSignal(list, list, list)
#: pyqtSignal: Emitted when the LineGroupWidgets change the plot.
axesModified = QtCore.pyqtSignal()
def __init__(self):
super(PostprocessorSelectPlugin, self).__init__()
# Setup this widget
policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.MinimumExpanding)
policy.setVerticalStretch(100) # We want this widget to be as big as possible vertically
self.setSizePolicy(policy)
# An iteratable color cycle for setting the default line style and color
self.color_cycle = None
# Member variables
self._groups = [] # list of ListGroupWidget objects
# The box and layout that will contain the line toggles
self.LineGroups = QtWidgets.QFrame()
self.LineGroupsLayout = QtWidgets.QVBoxLayout()
self.LineGroupsLayout.setSpacing(10);
self.LineGroupsLayout.setContentsMargins(0, 10, 10, 0);
self.LineGroups.setLayout(self.LineGroupsLayout)
# Creates the area that will be scrollable
self.Scroll = QtWidgets.QScrollArea()
self.Scroll.setWidget(self.LineGroups)
# Main layout to contain the scroll area
self.MainLayout = QtWidgets.QVBoxLayout()
self.MainLayout.setContentsMargins(0, 10, 0, 10)
self.MainLayout.addWidget(self.Scroll)
self.setLayout(self.MainLayout)
# Call the setup methods
self.setup()
def onSetData(self, data):
"""
Called when new data is being supplied to the widget.
Args:
data[list]: A list of PostprocessorDataWidget files.
"""
# Remove existing widgets
current_groups = {}
filenames = [d.filename() for d in data]
for group in self._groups:
group.clear()
if group.filename() not in filenames:
self.LineGroupsLayout.removeWidget(group)
group.setParent(None)
group.disconnect()
else:
current_groups[group.filename()] = group
self._groups = []
self.color_cycle = itertools.product(['-', '--', '-.', ':'], plt.cm.Paired(np.linspace(0, 1, 11)))
# Create the group widgets for each available variable
for d in data:
if d.filename() in current_groups and not current_groups[d.filename()].sameData(d):
group = current_groups[d.filename()]
self.LineGroupsLayout.removeWidget(group)
group.setParent(None)
group.disconnect()
self._newGroup(d)
elif d.filename() in current_groups:
group = current_groups[d.filename()]
group.setData(self.axes(), d)
self._groups.append(group)
self.updateVariables()
else:
self._newGroup(d)
self.updateGeometry()
def _newGroup(self, d):
group = LineGroupWidget(self.axes(), d, self.color_cycle)
self.LineGroupsLayout.addWidget(group)
self._groups.append(group)
group.initialized.connect(self.updateGeometry)
group.variablesChanged.connect(self.updateVariables)
group.axesModified.connect(self.axesModified)
def onTimeChanged(self, time):
"""
Update the time in the GroupLineWidgets.
"""
for group in self._groups:
group.plot(time=time)
def onCurrentChanged(self, index):
"""
Enables/disables the update timer base on the active state of the tab.
"""
active = self._index == index
for group in self._groups:
group._data.setTimerActive(active)
@QtCore.pyqtSlot()
def updateVariables(self):
"""
Updates the complete list of active variables for x/y axis labels.
"""
n = len(self._groups)
x_vars = [[]]*n
y_vars = [[]]*n
y2_vars = [[]]*n
for i in range(n):
group = self._groups[i]
if group.isValid():
x, y, y2 = group.getAxisLabels()
x_vars[i] = [x]
y_vars[i] = y
y2_vars[i] = y2
self.variablesChanged.emit(x_vars, y_vars, y2_vars)
def repr(self):
"""
Produce the script items for this widget.
"""
output = []
imports = []
for group in self._groups:
out, imp = group.repr()
output += out
imports += imp
return output, imports
def _setupScroll(self, qobject):
"""
Setup method for the scroll area widget.
"""
qobject.setWidgetResizable(True)
qobject.setFrameShape(QtWidgets.QFrame.NoFrame)
qobject.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
def _setupLineToggleGroupBox(self, qobject):
"""
Setup method for the group box storing the line toggle widgets.
"""
qobject.setAutoFillBackground(True)
def main(filenames, reader=mooseutils.VectorPostprocessorReader):
"""
Create widgets for running PostprocessorSelectPlugin
"""
"""
Run FigurePlugin by itself.
"""
from peacock.PostprocessorViewer.PostprocessorViewer import PostprocessorViewer
from FigurePlugin import FigurePlugin
widget = PostprocessorViewer(reader, timeout=None, plugins=[FigurePlugin, PostprocessorSelectPlugin])
widget.onSetFilenames(filenames)
control = widget.currentWidget().PostprocessorSelectPlugin
window = widget.currentWidget().FigurePlugin
window.setFixedSize(QtCore.QSize(625, 625))
widget.show()
return control, widget, window
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
filenames = ['../../../tests/input/vpp_*.csv']
_, widget, _ = main(filenames)
app.exec_()
os.remove('tmp_001.csv')
|
lgpl-2.1
|
robcarver17/pysystemtrade
|
sysquant/returns.py
|
1
|
10116
|
import pandas as pd
import numpy as np
from systems.accounts.curves.account_curve_group import accountCurveGroup
from syscore.genutils import flatten_list
from syscore.dateutils import ROOT_BDAYS_INYEAR
from syscore.pdutils import listOfDataFrames
SINGLE_NAME= "asset"
class dictOfSR(dict):
def apply_cost_multiplier(self,
cost_multiplier: float = 1.0) -> 'dictOfSR':
column_names = list(self.keys())
multiplied_dict_of_cost_SR = dict([
(column,
self[column] * cost_multiplier
) for column in column_names
])
multiplied_dict_of_cost_SR = dictOfSR(multiplied_dict_of_cost_SR)
return multiplied_dict_of_cost_SR
class dictOfSRacrossAssets(dict):
def get_pooled_SR(self, asset_name) -> dictOfSR:
column_names = self.get_column_names_for_asset(asset_name)
column_SR_dict = dict([
(column, self.get_avg_SR_for_column_name_across_dict(column))
for column in column_names])
column_SR_dict = dictOfSR(column_SR_dict)
return column_SR_dict
def get_avg_SR_for_column_name_across_dict(self, column: str) -> float:
list_of_SR = [dict_for_asset[column] for dict_for_asset in self.values()]
avg_SR = np.mean(list_of_SR)
return avg_SR
def get_column_names_for_asset(self, asset_name) -> list:
return list(self.get_SR_dict_for_asset(asset_name).keys())
def get_SR_dict_for_asset(self, asset_name) -> dictOfSR:
return self[asset_name]
class returnsForOptimisation(pd.DataFrame):
def __init__(self, *args, frequency: str = "W", pooled_length: int = 1, **kwargs):
super().__init__(*args, **kwargs)
self._frequency = frequency
self._pooled_length = pooled_length
@property
def frequency(self):
return self._frequency
@property
def pooled_length(self):
return self._pooled_length
class dictOfReturnsForOptimisation(dict):
def get_column_names(self) -> list:
## all names should match so shouldn't matter
column_names = list(self.values())[0].keys()
return column_names
def equalise_returns(self):
avg_return = self.get_average_return()
asset_names = self.keys()
for asset in asset_names:
self[asset] = _equalise_average_returns_for_df(self[asset], avg_return = avg_return)
def get_average_return(self) -> float:
column_names = self.get_column_names()
avg_return_by_column = [
_get_average_return_in_dict_for_column(self, column)
for column in column_names]
avg_return = np.nanmean(avg_return_by_column)
return avg_return
def adjust_returns_for_SR_costs(self,
dict_of_SR_costs: dictOfSR) -> 'dictOfReturnsForOptimisation':
net_returns_dict = dict([(asset_name,
_adjust_df_for_SR_costs(self[asset_name], dict_of_SR_costs)
)
for asset_name in self.keys()
])
net_returns_dict = dictOfReturnsForOptimisation(net_returns_dict)
return net_returns_dict
def single_resampled_set_of_returns(self, frequency: str) -> returnsForOptimisation:
returns_as_list = listOfDataFrames(self.values())
pooled_length = len(returns_as_list)
returns_as_list_downsampled = returns_as_list.resample_sum(frequency)
returns_as_list_common_ts = returns_as_list_downsampled.reindex_to_common_index()
returns_for_optimisation = returns_as_list_common_ts.stacked_df_with_added_time_from_list()
returns_for_optimisation = returnsForOptimisation(returns_for_optimisation,
frequency = frequency,
pooled_length=pooled_length)
return returns_for_optimisation
def _adjust_df_for_SR_costs(gross_returns: pd.DataFrame,
dict_of_SR_costs: dictOfSR):
net_returns_as_dict= dict([(
column_name, _adjust_df_column_for_SR_costs(gross_returns,
dict_of_SR_costs,
column_name))
for column_name in gross_returns.columns])
net_returns_as_df = pd.DataFrame(net_returns_as_dict, index = gross_returns.index)
return net_returns_as_df
def _adjust_df_column_for_SR_costs(gross_returns: pd.DataFrame,
dict_of_SR_costs: dictOfSR,
column_name:str):
# Returns always business days
daily_gross_returns_for_column = gross_returns[column_name]
daily_gross_return_std = daily_gross_returns_for_column.std()
daily_SR_cost = dict_of_SR_costs[column_name] / ROOT_BDAYS_INYEAR
daily_returns_cost = daily_SR_cost * daily_gross_return_std
daily_returns_cost_as_list = [daily_returns_cost] * len(gross_returns.index)
daily_returns_cost_as_ts = pd.Series(daily_returns_cost_as_list, index = gross_returns.index)
net_returns = daily_gross_returns_for_column + daily_returns_cost_as_ts
return net_returns
def _get_average_return_in_dict_for_column(returns_dict: dictOfReturnsForOptimisation,
column: str) -> float:
## all daily data so can take an average
series_of_returns = [returns_series[column].values
for returns_series in returns_dict.values()]
all_returns = flatten_list(series_of_returns)
return np.nanmean(all_returns)
def _equalise_average_returns_for_df(return_df: pd.DataFrame,
avg_return: float = 0.0) -> pd.DataFrame:
# preserve 'noise' so standard deviation constant
return_df = return_df.apply(_equalise_average_returns_for_df_column,
axis=0, avg_return=avg_return)
return return_df
def _equalise_average_returns_for_df_column(return_data:pd.Series,
avg_return: float = 0.0) -> pd.Series:
current_mean= np.nanmean(return_data.values)
mean_adjustment = avg_return - current_mean
new_data = return_data + mean_adjustment
return new_data
class dictOfReturnsForOptimisationWithCosts(dict):
def __init__(self, dict_of_returns):
dict_of_returns = _turn_singular_account_curve_into_dict(dict_of_returns)
super().__init__(dict_of_returns)
def get_returns_for_asset_as_single_dict(self, asset_name, type: str="gross")-> dictOfReturnsForOptimisation:
returns_for_asset = self[asset_name]
typed_returns = getattr(returns_for_asset, type)
new_returns_dict = {SINGLE_NAME: typed_returns}
new_returns_dict = dictOfReturnsForOptimisation(new_returns_dict)
return new_returns_dict
def get_returns_for_all_assets(self, type: str="gross") -> dictOfReturnsForOptimisation:
gross_returns_dict= dict(
[(code, getattr(self[code], type))
for code in self.keys()]
)
gross_returns_dict = dictOfReturnsForOptimisation(gross_returns_dict)
return gross_returns_dict
def dict_of_SR(self, type: str) -> dictOfSRacrossAssets:
dict_of_SR = dict([(code,
returns_for_optimisation.get_annual_SR_dict(type))
for code, returns_for_optimisation in self.items()
])
dict_of_SR = dictOfSRacrossAssets(dict_of_SR)
return dict_of_SR
def get_annual_SR_dict_for_asset(self, asset_name: str, type: str = "gross") -> dictOfSR:
returns_this_asset = self[asset_name]
SR_dict = returns_this_asset.get_annual_SR_dict(type)
return SR_dict
class returnsForOptimisationWithCosts(object):
def __init__(self, account_curve_group: accountCurveGroup):
self._from_account_curve_group_to_returns_for_optimisation(account_curve_group)
def _from_account_curve_group_to_returns_for_optimisation(self,
account_curve_group: accountCurveGroup):
for type in ['gross', 'costs']:
account_curve = getattr(account_curve_group, type).to_frame()
account_curve = account_curve.resample("1B").sum()
# avoid understating vol
account_curve[account_curve==0.0] = np.nan
setattr(self, type, account_curve)
def get_annual_SR_dict(self, type="gross") -> dictOfSR:
relevant_curve = getattr(self, type)
list_of_columns = list(relevant_curve.columns)
SR_dict = dict([
(column_name,
_get_annual_SR_for_returns_for_optimisation(
self, column_name, type=type)
) for column_name in list_of_columns])
SR_dict = dictOfSR(SR_dict)
return SR_dict
def _turn_singular_account_curve_into_dict(dict_of_returns) -> dict:
if _singular_account_curve(dict_of_returns):
return {SINGLE_NAME: dict_of_returns}
else:
return dict_of_returns
def _singular_account_curve(dict_of_returns) -> bool:
if type(dict_of_returns) is not dict:
return True
else:
return False
def _get_annual_SR_for_returns_for_optimisation(returns_for_optimisation: returnsForOptimisationWithCosts,
column_name: str,
type: str="gross") -> float:
gross_curve = returns_for_optimisation.gross[column_name]
if type=="gross":
daily_return = gross_curve.mean()
elif type=="costs":
cost_curve = returns_for_optimisation.costs[column_name]
daily_return = cost_curve.mean()
else:
raise Exception()
daily_std = gross_curve.std()
return annual_SR_from_daily_returns(daily_return, daily_std)
def annual_SR_from_daily_returns(daily_return, daily_std):
return ROOT_BDAYS_INYEAR * daily_return/daily_std
|
gpl-3.0
|
pianomania/scikit-learn
|
examples/preprocessing/plot_robust_scaling.py
|
85
|
2698
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
|
bsd-3-clause
|
vibhorag/scikit-learn
|
examples/exercises/plot_cv_diabetes.py
|
231
|
2527
|
"""
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
|
bsd-3-clause
|
bryan-lunt/py_pointcloud_reader
|
MeshEditor.py
|
1
|
1555
|
# coding: utf-8
import sys
sys.path.append("./src/")
from pypointcloud import *
import scipy as S
results = None
with open("./D_mel_wt__atlas_r2.vpc") as infile:
results = read_vpc(infile)
d = results[1]
x = d[:,0]
y = d[:,1]
z = d[:,2]
colnum = results[0]["column"].index("eve__3")-1
colors = S.vstack([d[:,colnum],S.zeros(d.shape[0]),S.zeros(d.shape[0])]).T
colors -= colors.min()
colors*=S.power(colors.max(),-1.0)
ap_line = None
x_min = x.min()
x_max = x.max()
z_min = z.min()
z_max = z.max()
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.widgets import Button
from EditableSpline import *
N_SPLINES = 9
points_per_spline = 5
cutsplines = list()
try:
mesh_x = S.loadtxt("mesh_x.txt")
mesh_y = S.loadtxt("mesh_y.txt")
for i in range(mesh_x.shape[0]):
cutsplines.append(S.vstack([mesh_x[i],mesh_y[i]]).T)
except:
x_pos = S.linspace(x_min,x_max,N_SPLINES)
cutsplines = list()
for i in range(N_SPLINES):
cutsplines.append(S.vstack([S.ones(points_per_spline)*x_pos[i],S.linspace(z_min,z_max,points_per_spline)]).T)
editable_spline_list = list()
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.scatter(x,z,s=45.0,c=colors,alpha=0.5)
multi = MultiSpline(ax,cutsplines)
multi.connect()
multi.update_line()
plt.show()
mesh_x = S.array([i.data[:,0] for i in multi.splines])
mesh_y = S.array([i.data[:,1] for i in multi.splines])
S.savetxt("mesh_x.txt",mesh_x)
S.savetxt("mesh_y.txt",mesh_y)
S.savetxt("all_mesh.txt",S.array(multi.datas))
|
gpl-3.0
|
tjhei/burnman
|
examples/example_seismic.py
|
5
|
6354
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_seismic
---------------
Shows the various ways to input seismic models (:math:`V_s, V_p, V_{\phi}, \\rho`) as a
function of depth (or pressure) as well as different velocity model libraries
available within Burnman:
1. PREM :cite:`dziewonski1981`
2. STW105 :cite:`kustowski2008`
3. AK135 :cite:`kennett1995`
4. IASP91 :cite:`kennett1991`
This example will first calculate or read in a seismic model and plot the
model along the defined pressure range. The example also illustrates how to import a seismic model of your choice, here shown by importing AK135 :cite:`kennett1995`.
*Uses:*
* :doc:`seismic`
*Demonstrates:*
* Utilization of library seismic models within BurnMan
* Input of user-defined seismic models
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
# hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1, os.path.abspath('..'))
import burnman
if __name__ == "__main__":
# List of seismic 1D models
models = [burnman.seismic.PREM(), burnman.seismic.STW105(),
burnman.seismic.AK135(), burnman.seismic.IASP91()]
colors = ['r', 'b', 'm', 'k']
# Variables to plot
variables = ['pressure', 'gravity', 'v_p', 'v_s', 'v_phi', 'density']
units = ['Pa', 'm/s^2', 'm/s', 'm/s', 'm/s', 'kg/m^3', 'Pa', 'm/s^2']
plt.figure(figsize=(10, 9))
# Run through models and variables
for variable_index in range(len(variables)):
ax = plt.subplot(3, 2, variable_index + 1)
for model_index in range(len(models)):
# specify where we want to evaluate, here we map from pressure to depth
# 1. format p = np.arange (starting pressure, ending pressure, pressure step) (in Pa)
# p = np.arange(1.0e9,360.0e9,1.e9)
# depths = np.array([models[model_index].depth(pr) for pr in p])
# 2. we could also just specify some depth levels directly like this
# depths = np.arange(700e3,2800e3,100e3)
# 3. we could also use the data points where the seismic model is specified over a depth range, this will bring out any discontinuities
# this is the preferred way to plot seismic discontinuities
# correctly
depths = models[model_index].internal_depth_list(
mindepth=0, maxdepth=6371e3)
# now evaluate everything at the given depths levels (using linear interpolation)
# try to get and plot values for given model, if this fails the
# variable is likely not defined for that model
try:
values = getattr(models[model_index], variables[variable_index])(depths)
plt.plot(depths / 1.e3, values, color=colors[
model_index], linestyle='-', label=models[model_index].__class__.__name__)
except:
# write out warning that the variable failed for given
# model
print(
variables[variable_index] + ' is not defined for ' + models[model_index].__class__.__name__)
plt.title(variables[variable_index])
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
if variable_index == 3:
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
if variable_index > 3:
plt.xlabel('depth in km')
plt.ylabel(units[variable_index])
plt.gca().set_xticks([660, 2891, 5150])
plt.show()
# Alternatively one is able to evaluate all the variables for a model in a
# single line
pressure, gravity, v_p, v_s, v_phi, density = models[0].evaluate(
['pressure', 'gravity', 'v_p', 'v_s', 'v_phi', 'density'], models[0].internal_depth_list(mindepth=-1.e3, maxdepth=6372.1e3))
# The following shows how to read in your own model from a file
# Model needs to be defined with increasing depth and decreasing radius.
# In this case the table is switched.
class ak135_table(burnman.seismic.SeismicTable):
def __init__(self):
burnman.seismic.SeismicTable.__init__(self)
# In format: radius, pressure, density, v_p, v_s
table = burnman.tools.read_table(
"input_seismic/ak135_lowermantle.txt")
table = np.array(table)
self.table_radius = table[:, 0][::-1]
self.table_pressure = table[:, 1][::-1]
self.table_density = table[:, 2][::-1]
self.table_vp = table[:, 3][::-1]
self.table_vs = table[:, 4][::-1]
# self.table_depth needs to be defined and needs to be increasing
self.table_depth = self.earth_radius - self.table_radius
ak = ak135_table()
# specify where we want to evaluate, here we map from pressure to depth
depths = np.linspace(700e3, 2800e3, 40)
# now evaluate everything at the given depths levels (using interpolation)
pressures, density, v_p, v_s, v_phi = ak.evaluate(
['pressure', 'density', 'v_p', 'v_s', 'v_phi'], depths)
# plot vs and vp and v_phi (note that v_phi is computed!)
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.title('ak135')
plt.plot(depths / 1.e3, v_p / 1.e3, '+-r', label='v_p')
plt.plot(depths / 1.e3, v_s / 1.e3, '+-b', label='v_s')
plt.plot(depths / 1.e3, v_phi / 1.e3, '--g', label='v_phi')
plt.legend(loc='lower left')
plt.xlabel('depth in km')
plt.ylabel('km/s')
# plot pressure,density vs depth from prem:
plt.subplot(1, 2, 2)
plt.title('ak135')
plt.plot(depths / 1.e3, pressures / 1.e9, '-r', label='pressure')
plt.ylabel('GPa')
plt.xlabel('depth in km')
plt.legend(loc='upper left')
plt.twinx()
plt.ylabel('g/cc')
plt.plot(depths / 1.e3, density / 1.e3, '-b', label='density')
plt.legend(loc='lower right')
plt.show()
|
gpl-2.0
|
edhuckle/statsmodels
|
statsmodels/sandbox/tools/try_mctools.py
|
34
|
1944
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 30 15:20:45 2011
@author: josef
"""
from statsmodels.compat.python import lrange
import numpy as np
from scipy import stats
from statsmodels.sandbox.tools.mctools import StatTestMC
from statsmodels.sandbox.stats.diagnostic import (
acorr_ljungbox, unitroot_adf)
def normalnoisesim(nobs=500, loc=0.0):
return (loc+np.random.randn(nobs))
def lb(x):
s,p = acorr_ljungbox(x, lags=4)
return np.r_[s, p]
mc1 = StatTestMC(normalnoisesim, lb)
mc1.run(5000, statindices=lrange(4))
print(mc1.summary_quantiles([1,2,3], stats.chi2([2,3,4]).ppf,
varnames=['lag 1', 'lag 2', 'lag 3'],
title='acorr_ljungbox'))
print('\n\n')
frac = [0.01, 0.025, 0.05, 0.1, 0.975]
crit = stats.chi2([2,3,4]).ppf(np.atleast_2d(frac).T)
print(mc1.summary_cdf([1,2,3], frac, crit,
varnames=['lag 1', 'lag 2', 'lag 3'],
title='acorr_ljungbox'))
print(mc1.cdf(crit, [1,2,3])[1])
#----------------------
def randwalksim(nobs=500, drift=0.0):
return (drift+np.random.randn(nobs)).cumsum()
def adf20(x):
return unitroot_adf(x, 2, trendorder=0, autolag=None)
print(adf20(np.random.randn(100)))
mc2 = StatTestMC(randwalksim, adf20)
mc2.run(10000, statindices=[0,1])
frac = [0.01, 0.05, 0.1]
#bug
crit = np.array([-3.4996365338407074, -2.8918307730370025, -2.5829283377617176])[:,None]
print(mc2.summary_cdf([0], frac, crit,
varnames=['adf'],
title='adf'))
#bug
#crit2 = np.column_stack((crit, frac))
#print mc2.summary_cdf([0, 1], frac, crit,
# varnames=['adf'],
# title='adf')
print(mc2.quantiles([0]))
print(mc2.cdf(crit, [0]))
doplot=1
if doplot:
import matplotlib.pyplot as plt
mc1.plot_hist([3],stats.chi2([4]).pdf)
plt.title('acorr_ljungbox - MC versus chi2')
plt.show()
|
bsd-3-clause
|
Jozhogg/iris
|
docs/iris/example_code/General/projections_and_annotations.py
|
6
|
5249
|
"""
Plotting in different projections
=================================
This example shows how to overlay data and graphics in different projections,
demonstrating various features of Iris, Cartopy and matplotlib.
We wish to overlay two datasets, defined on different rotated-pole grids.
To display both together, we make a pseudocoloured plot of the first, overlaid
with contour lines from the second.
We also add some lines and text annotations drawn in various projections.
We plot these over a specified region, in two different map projections.
"""
import cartopy.crs as ccrs
import iris
import iris.plot as iplt
import numpy as np
import matplotlib.pyplot as plt
# Define a Cartopy 'ordinary' lat-lon coordinate reference system.
crs_latlon = ccrs.PlateCarree()
def make_plot(projection_name, projection_crs):
# Create a matplotlib Figure.
fig = plt.figure()
# Add a matplotlib Axes, specifying the required display projection.
# NOTE: specifying 'projection' (a "cartopy.crs.Projection") makes the
# resulting Axes a "cartopy.mpl.geoaxes.GeoAxes", which supports plotting
# in different coordinate systems.
ax = plt.axes(projection=projection_crs)
# Set display limits to include a set region of latitude * longitude.
# (Note: Cartopy-specific).
ax.set_extent((-80.0, 20.0, 10.0, 80.0), crs=crs_latlon)
# Add coastlines and meridians/parallels (Cartopy-specific).
ax.coastlines(linewidth=0.75, color='navy')
ax.gridlines(crs=crs_latlon, linestyle='-')
# Plot the first dataset as a pseudocolour filled plot.
maindata_filepath = iris.sample_data_path('rotated_pole.nc')
main_data = iris.load_cube(maindata_filepath)
# NOTE: iplt.pcolormesh calls "pyplot.pcolormesh", passing in a coordinate
# system with the 'transform' keyword: This enables the Axes (a cartopy
# GeoAxes) to reproject the plot into the display projection.
iplt.pcolormesh(main_data, cmap='RdBu_r')
# Overplot the other dataset (which has a different grid), as contours.
overlay_filepath = iris.sample_data_path('space_weather.nc')
overlay_data = iris.load_cube(overlay_filepath, 'total electron content')
# NOTE: as above, "iris.plot.contour" calls "pyplot.contour" with a
# 'transform' keyword, enabling Cartopy reprojection.
iplt.contour(overlay_data, 20,
linewidths=2.0, colors='darkgreen', linestyles='-')
# Draw a margin line, some way in from the border of the 'main' data...
# First calculate rectangle corners, 7% in from each corner of the data.
x_coord, y_coord = main_data.coord(axis='x'), main_data.coord(axis='y')
x_start, x_end = np.min(x_coord.points), np.max(x_coord.points)
y_start, y_end = np.min(y_coord.points), np.max(y_coord.points)
margin = 0.07
margin_fractions = np.array([margin, 1.0 - margin])
x_lower, x_upper = x_start + (x_end - x_start) * margin_fractions
y_lower, y_upper = y_start + (y_end - y_start) * margin_fractions
box_x_points = x_lower + (x_upper - x_lower) * np.array([0, 1, 1, 0, 0])
box_y_points = y_lower + (y_upper - y_lower) * np.array([0, 0, 1, 1, 0])
# Get the Iris coordinate sytem of the X coordinate (Y should be the same).
cs_data1 = x_coord.coord_system
# Construct an equivalent Cartopy coordinate reference system ("crs").
crs_data1 = cs_data1.as_cartopy_crs()
# Draw the rectangle in this crs, with matplotlib "pyplot.plot".
# NOTE: the 'transform' keyword specifies a non-display coordinate system
# for the plot points (as used by the "iris.plot" functions).
plt.plot(box_x_points, box_y_points, transform=crs_data1,
linewidth=2.0, color='white', linestyle='--')
# Mark some particular places with a small circle and a name label...
# Define some test points with latitude and longitude coordinates.
city_data = [('London', 51.5072, 0.1275),
('Halifax, NS', 44.67, -63.61),
('Reykjavik', 64.1333, -21.9333)]
# Place a single marker point and a text annotation at each place.
for name, lat, lon in city_data:
plt.plot(lon, lat, marker='o', markersize=7.0, markeredgewidth=2.5,
markerfacecolor='black', markeredgecolor='white',
transform=crs_latlon)
# NOTE: the "plt.annotate call" does not have a "transform=" keyword,
# so for this one we transform the coordinates with a Cartopy call.
at_x, at_y = ax.projection.transform_point(lon, lat,
src_crs=crs_latlon)
plt.annotate(
name, xy=(at_x, at_y), xytext=(30, 20), textcoords='offset points',
color='black', backgroundcolor='white', size='large',
arrowprops=dict(arrowstyle='->', color='white', linewidth=2.5))
# Add a title, and display.
plt.title('A pseudocolour plot on the {} projection,\n'
'with overlaid contours.'.format(projection_name))
iplt.show()
def main():
# Demonstrate with two different display projections.
make_plot('Equidistant Cylindrical', ccrs.PlateCarree())
make_plot('North Polar Stereographic', ccrs.NorthPolarStereo())
if __name__ == '__main__':
main()
|
lgpl-3.0
|
DiamondLightSource/FTIR-Filtering-Experimental
|
2 Class FFT (filter and no filter)/src/excelFiles.py
|
1
|
3481
|
'''
Created on 9 Jul 2015
@author: flb41892
'''
from myClasses import fft
from myClasses import fftfilter
import h5py
import pylab
import matplotlib.pyplot as plt
import pandas as pd
#############################################
#USER CODE BELOW
#VARIABLES
xl = pd.ExcelFile('/home/flb41892/data/ExcelFringes/' + '1.xlsx')
data = xl.parse('Sheet1', index_col = None, na_values = {'NA'})
data1 = xl.parse('a0', index_col = None, na_values = {'NA'})
interi = data.as_matrix()
interf = data1.as_matrix()
ind = interi[:,0]
initial = interi[:,1]
final =interf[:,1]
ft = fft()
ft2 = fftfilter()
f = h5py.File("/home/flb41892/data/fringe/Spectrum3.nxs","r") #file to analyse
#s = f["entry1/instrument/interferometer/sample_interferogram_changed_scan"][...] #signal on which to perform FFT and find the single channel spectrum
s = initial
res = f['/entry1/instrument/interferometer/opus_parameters/acquisition_changed/resolution'][...] #This extracts the resolution of your scan.
highfold = f['/entry1/instrument/interferometer/opus_parameters/instrument_changed/high_folding_limit'][...]
# com = f["entry1/instrument/interferometer/sample_changed_scan"][...]# this is the FT of the same file, as performed by Opus commercial software
refer = f['/entry1/instrument/interferometer/reference_changed_scan'][...] #reference scan
renergy = f['/entry1/instrument/interferometer/reference_changed_energy'][...] # energy axis of reference scan
fw = 1100 #defines the filter width. The filter is composed of a pair of symmetric, inverse Blackman Harris 3 (-67bD side lobe) filters, used to eliminate secondary fringes
#The filters are zero filled in the middle to sweep out a variable region of the spectrum. Select number of points you want to eliminate with the 'fw' parameter
fmin = 0.0 #the value of this parameter determines the minimum value of the filter, eg if fmin =0.0, some of the points will be completely eliminated. if fmin>0, points will be dampened only.
dv = 27.5 # dv = half of period of oscillatory fringes in absorbance spectrum/intensity spectrum, along the energy axis.
# NB. Needs to be in units of cm-1!
#When you input dv, the program will use that information to position the inverse Blackman Harris filter to eliminate
#the oscillations in your spectrum.
#######################################
#COMMANDS
#Non oscillatory data
#command that outputs 2 arrays, 1st array is the wavenumber axis [cm-1] and the second array is the single channel spectrum
schannel = ft.singleChannel(s,highfold) #use this function if you have a single, raw interferogram
schannel2 = ft.singleChannel2(s,highfold) #use this function if you have a double sided interferogram
#NB. the high folding limit must be in cm-1
#absorb = ft.absorbance(schannel2, refer, renergy)
#########################################
#Oscillatory data
schanneloscil = ft2.singleChannel(s, fw, fmin,highfold,dv) #use this function if you have a single, oscillatory, raw interferogram
#
schannel2oscil = ft2.singleChannel2(s, fw, fmin,highfold,dv) #use this function if you have a double sided, oscillatory, interferogram
#NB. the high folding limit must be in cm-1
absorboscil = ft2.absorbance(schanneloscil, refer, renergy)
# example plotting tool below
pylab.plot(schannel2oscil[1],schannel2oscil[0],'g')
pylab.plot(schannel2[1],schannel2[0],'r')
pylab.xlabel("wavenumber [cm-1] ", fontsize = 17 )
pylab.ylabel("Intensity Spectrum [AU]", fontsize = 17)
plt.show()
|
apache-2.0
|
RUBi-ZA/MD-TASK
|
delta_networks.py
|
1
|
5111
|
#!/usr/bin/env python
#
# Compare network measurements such as BC and L by plotting a wild-type vs mutants heatmap
#
# Script distributed under GNU GPL 3.0
#
# Author: David Brown
# Date: 17-11-2016
from natsort import natsorted
import numpy as np
from lib.cli import CLI
from lib.utils import Logger
import os, sys, argparse, matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def plot(num_plots, plot_num, data, data_std, initial_x, title, x_label, y_label):
y_ticks = data.shape[0]
num_nodes = data.shape[1]
plt.subplot(num_plots * 2, 1, plot_num * 2 - 1)
plt.imshow(data, cmap='hot', interpolation='nearest', extent=[initial_x, initial_x + num_nodes, y_ticks, 1])
plt.title("%s" % title, fontsize=18)
plt.xlabel(x_label, fontsize=16)
plt.ylabel(y_label, fontsize=16)
plt.colorbar()
plt.subplot(num_plots * 2, 1, plot_num * 2)
plt.imshow(data_std, cmap='hot', interpolation='nearest', extent=[initial_x, initial_x + num_nodes, y_ticks, 1])
plt.title("%s (Std Dev)" % title, fontsize=18)
plt.xlabel(x_label, fontsize=16)
plt.ylabel(y_label, fontsize=16)
plt.colorbar()
def main(args):
reference = np.loadtxt(args.reference)
reference_std = np.loadtxt(args.reference_std)
alternatives = natsorted(args.alternatives)
alternatives_std = natsorted(args.alternatives_std)
if len(alternatives) != len(alternatives_std):
log.error("The number of files supplied to the --alternatives argument differs from the number supplied to --alternatives-std")
sys.exit(1)
if len(alternatives) < 2:
log.error("At least 2 files must be supplied to the alternatives argument")
sys.exit(1)
num_nodes = reference.shape[0]
y_ticks = []
y_data = np.zeros((len(alternatives), num_nodes))
y_data_std = np.zeros((len(alternatives), num_nodes))
for i, a in enumerate(alternatives):
alternative = np.loadtxt(a)
alternative_std = np.loadtxt(alternatives_std[i])
alt_nodes = alternative.shape[0]
if alt_nodes != num_nodes:
num_nodes = min(alt_nodes, num_nodes)
log.info("Trimming data to %d nodes per network" % num_nodes)
y_data = y_data[:,:num_nodes]
y_data_std = y_data_std[:,:num_nodes]
reference = reference[:num_nodes]
alternative = alternative[:num_nodes]
reference_std = reference[:num_nodes]
alternative_std = alternative[:num_nodes]
difference = alternative - reference
difference_std = alternative_std - reference_std
if args.absolute:
difference = np.absolute(difference)
difference_std = np.absolute(difference_std)
y_data[i,:] = difference
y_data_std[i,:] = difference_std
y_ticks.append(".".join(os.path.basename(a).split(".")[:-1]))
log.info("Plotting heat map: %s.png\n" % args.prefix)
if args.split_pos:
plt.subplots(figsize=(30, 16))
plot(2, 1, y_data[:,:args.split_pos], y_data_std[:,:args.split_pos], args.initial_x_1, args.title_1, args.x_label, args.y_label)
plot(2, 2, y_data[:,args.split_pos:], y_data_std[:,args.split_pos:], args.initial_x_2, args.title_2, args.x_label, args.y_label)
else:
plt.subplots(figsize=(30, 3))
plot(1, 1, y_data, y_data_std, args.initial_x, args.title, args.x_label, args.y_label)
plt.savefig("%s.png" % args.prefix, dpi=300, bbox_inches='tight')
plt.close()
log = Logger()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--reference", help="The reference network (.dat)")
parser.add_argument("--reference-std", help="The reference standard deviation network (.dat) - should be in identical order as alternative networks")
parser.add_argument("--alternatives", help="The alternative networks (.dat)", nargs="*")
parser.add_argument("--alternatives-std", help="The alternative standard deviation networks (.dat) - should be in identical order as alternative networks", nargs="*", default=None)
parser.add_argument("--title", help="Plot title")
parser.add_argument("--x-label", help="Label for x-axis")
parser.add_argument("--y-label", help="Label for y-axis")
parser.add_argument("--initial-x", type=int, help="Start value for x-axis", default=1)
parser.add_argument("--split-pos", type=int, help="Position to split the network at (for large networks)", default=None)
parser.add_argument("--initial-x-1", type=int, help="Start value for x-axis on first graph", default=1)
parser.add_argument("--initial-x-2", type=int, help="Start value for x-axis on second graph", default=1)
parser.add_argument("--title-1", help="Title for first graph")
parser.add_argument("--title-2", help="Title for second graph")
parser.add_argument("--absolute", help="Set this flag to use absolute values in the heat map", action='store_true', default=False)
parser.add_argument("--prefix", help="Prefix for output file", default="output")
CLI(parser, main, log)
|
gpl-3.0
|
vanpact/scipy
|
scipy/signal/fir_filter_design.py
|
40
|
20637
|
"""Functions for FIR filter design."""
from __future__ import division, print_function, absolute_import
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
from . import sigtools
__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',
'firwin', 'firwin2', 'remez']
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
numtaps : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and
stopband (or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""
Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta : float
The beta parameter for the kaiser window.
See Also
--------
kaiser_beta, kaiser_atten
Notes
-----
There are several ways to obtain the Kaiser window:
- ``signal.kaiser(numtaps, beta, sym=0)``
- ``signal.get_window(beta, numtaps)``
- ``signal.get_window(('kaiser', beta), numtaps)``
The empirical equations discovered by Kaiser are used.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response
filter. The filter will have linear phase; it will be Type I if
`numtaps` is odd and Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None, optional
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values, optional
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool, optional
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool, optional
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
- 0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True)
- `nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise
nyq : float, optional
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : (numtaps,) ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
See also
--------
scipy.signal.firwin2
Examples
--------
Low-pass from 0 to f:
>>> from scipy import signal
>>> numtaps = 3
>>> f = 0.1
>>> signal.firwin(numtaps, f)
array([ 0.06799017, 0.86401967, 0.06799017])
Use a specific window function:
>>> signal.firwin(numtaps, f, window='nuttall')
array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04])
High-pass ('stop' from 0 to f):
>>> signal.firwin(numtaps, f, pass_zero=False)
array([-0.00859313, 0.98281375, -0.00859313])
Band-pass:
>>> f1, f2 = 0.1, 0.2
>>> signal.firwin(numtaps, [f1, f2], pass_zero=False)
array([ 0.06301614, 0.88770441, 0.06301614])
Band-stop:
>>> signal.firwin(numtaps, [f1, f2])
array([-0.00801395, 1.0160279 , -0.00801395])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]):
>>> f3, f4 = 0.3, 0.4
>>> signal.firwin(numtaps, [f1, f2, f3, f4])
array([-0.01376344, 1.02752689, -0.01376344])
Multi-band (passbands are [f1, f2] and [f3,f4]):
>>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
array([ 0.04890915, 0.91284326, 0.04890915])
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most "
"one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be "
"greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies "
"must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width) / nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
# is even, and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of
# a passband.
bands = cutoff.reshape(-1, 2)
# Build up the coefficients.
alpha = 0.5 * (numtaps - 1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from .signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0,
antisymmetric=False):
"""
FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`.
freq : array_like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array_like
The filter gains at the frequency sampling points. Certain
constraints to gain values, depending on the filter type, are applied,
see Notes for details.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float, optional
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
antisymmetric : bool, optional
Whether resulting impulse response is symmetric/antisymmetric.
See Notes for more details.
Returns
-------
taps : ndarray
The filter coefficients of the FIR filter, as a 1-D array of length
`numtaps`.
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The type of filter is determined by
the value of 'numtaps` and `antisymmetric` flag.
There are four possible combinations:
- odd `numtaps`, `antisymmetric` is False, type I filter is produced
- even `numtaps`, `antisymmetric` is False, type II filter is produced
- odd `numtaps`, `antisymmetric` is True, type III filter is produced
- even `numtaps`, `antisymmetric` is True, type IV filter is produced
Magnitude response of all but type I filters are subjects to following
constraints:
- type II -- zero at the Nyquist frequency
- type III -- zero at zero and Nyquist frequencies
- type IV -- zero at zero frequency
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> from scipy import signal
>>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s') %
(numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if antisymmetric:
if numtaps % 2 == 0:
ftype = 4
else:
ftype = 3
else:
if numtaps % 2 == 0:
ftype = 2
else:
ftype = 1
if ftype == 2 and gain[-1] != 0.0:
raise ValueError("A Type II filter must have zero gain at the "
"Nyquist rate.")
elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):
raise ValueError("A Type III filter must have zero gain at zero "
"and Nyquist rates.")
elif ftype == 4 and gain[0] != 0.0:
raise ValueError("A Type IV filter must have zero gain at zero rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
if ftype > 2:
shift *= 1j
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from .signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
if ftype == 3:
out[out.size // 2] = 0.0
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> from scipy import signal
>>> bpass = signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', "
"or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
|
bsd-3-clause
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/doc/mpl_examples/pylab_examples/multiline.py
|
12
|
1224
|
#!/usr/bin/env python
from pylab import *
#from matplotlib.pyplot import *
#from numpy import arange
if 1:
figure(figsize=(7, 4))
ax = subplot(121)
ax.set_aspect(1)
plot(arange(10))
xlabel('this is a xlabel\n(with newlines!)')
ylabel('this is vertical\ntest', multialignment='center')
#ylabel('this is another!')
text(2, 7,'this is\nyet another test',
rotation=45,
horizontalalignment = 'center',
verticalalignment = 'top',
multialignment = 'center')
grid(True)
subplot(122)
text(0.29, 0.7, "Mat\nTTp\n123", size=18,
va="baseline", ha="right", multialignment="left",
bbox=dict(fc="none"))
text(0.34, 0.7, "Mag\nTTT\n123", size=18,
va="baseline", ha="left", multialignment="left",
bbox=dict(fc="none"))
text(0.95, 0.7, "Mag\nTTT$^{A^A}$\n123", size=18,
va="baseline", ha="right", multialignment="left",
bbox=dict(fc="none"))
xticks([0.2, 0.4, 0.6, 0.8, 1.],
["Jan\n2009","Feb\n2009","Mar\n2009", "Apr\n2009", "May\n2009"])
axhline(0.7)
title("test line spacing for multiline text")
subplots_adjust(bottom=0.25, top=0.8)
draw()
show()
|
mit
|
dsavoiu/kafe2
|
examples/006_advanced_errors/04_shockley.py
|
1
|
4370
|
#!/usr/bin/env python3
# -*- coding: utf8 -*-
"""
Fit of Shockley equation to I-U characteristic of a diode
=========================================================
This is a practical example with a non-trivial covariance matrix
with independent and correlated relative uncertainties in the x-
and y-direction. *kafe2* supports constructing the full covariance
matrix with the method
``add_error(err_val=?, axis=?, correlation=0, relative=False, reference='data')``,
which allows the user to specify the components of the uncertainties
one after the other. The resulting individual covariance matrices
are all added to form the full covariance matrix used in the fit.
Here, we take as an example a typical digital ammeter or voltmeter.
Device characteristics are specified as 4000 Counts, +/-(0.5% + 3 digits),
where the calibration uncertainty is correlated among all measurements,
while the digitisation uncertainties are independent. There often also
is an additional, independent noise component.
The code in this example shows how these uncertainty components for
a set of voltage and current measurements with a diode are specified.
Most of the code is needed to specify the uncertainties, the fit
of the Shockley equation and the output of the results is very similar
to the other examples discussed already.
"""
from kafe2 import Fit, Plot, XYContainer
import numpy as np
import matplotlib.pyplot as plt
# model function to fit
def Shockley(U, I_s=0.5, U0=0.03):
"""Parametrisation of a diode characteristic
U0 should be limited such that U/U0<150 to avoid
exceeding the 64 bit floating point range
Args:
- U: Voltage (V)
- I_s: reverse saturation current (nA)
- U0: thermal voltage (V) * emission coefficient
Returns:
- float I: diode current (mA)
"""
return 1E-6 * I_s * np.exp((U / U0) - 1.)
# measurements:
# voltmeter characteristics:
# - voltage, measurement range 2V
voltages = [0.450, 0.470, 0.490, 0.510, 0.530,
0.550, 0.560, 0.570, 0.580, 0.590, 0.600, 0.610, 0.620, 0.630,
0.640, 0.645, 0.650, 0.655, 0.660, 0.665]
# - current: 2 measurements in range 200µA, 12 in range 20mA and 6 in range 200mA
currents = [0.056, 0.198,
0.284, 0.404, 0.739, 1.739, 1.962, 2.849, 3.265, 5.706, 6.474, 7.866, 11.44, 18.98,
23.35, 27.96, 38.61, 46.73, 49.78, 57.75]
# create a data container
diode_data = XYContainer(x_data=voltages, y_data=currents)
diode_data.label = 'I vs. U'
diode_data.axis_labels = ['Voltage (V)', 'Current (mA)']
# --- calculate uncertainty components
# - precision voltmeter: 4000 Counts, +/-(0.5% + 3 digits)
# - range 2 V
crel_U = 0.005
Udigits = 3
Urange = 2
Ucounts = 4000
deltaU = Udigits * Urange / Ucounts
# - noise contribution delta U = 0.005 V
deltaU_noise = 0.005
# add voltage uncertainties to data object
diode_data.add_error(axis='x', err_val=deltaU)
diode_data.add_error(axis='x', err_val=deltaU_noise)
# note: relative uncertainties w.r.t. model to be added to fit object later
# - precision ammeter: 2000 Counts, +/-(1.0% + 3 digits)
# - measurement ranges 200µA, 20mA und 200mA
crel_I = 0.010
Idigits = 3
Icounts = 2000
Irange1 = 0.2
Irange2 = 20
Irange3 = 200
deltaI = np.asarray(2 * [Idigits * Irange1 / Icounts] +
12 * [Idigits * Irange2 / Icounts] +
6 * [Idigits * Irange3 / Icounts])
# noise contribution delta I = 0.050 mA
deltaI_noise = 0.050
# add current uncertainties to data object
diode_data.add_error(axis='y', err_val=deltaI)
diode_data.add_error(axis='y', err_val=deltaI_noise)
# note: relative uncertainties w.r.t. model to be added to fit object
# --- start of fit
# create Fit object
ShockleyFit = Fit(diode_data, model_function=Shockley)
ShockleyFit.model_label = 'Shockley equation'
# add relative errors with reference to model
ShockleyFit.add_error(axis='x', err_val=crel_U, correlation=1.,
relative=True, reference='model')
ShockleyFit.add_error(axis='y', err_val=crel_I, correlation=1.,
relative=True, reference='model')
# to avoid overflow of 64 bit floats, limit U0
ShockleyFit.limit_parameter('U0', lower=0.005)
ShockleyFit.do_fit()
# create plot object
plotShockleyFit = Plot(ShockleyFit)
plotShockleyFit.plot(asymmetric_parameter_errors=True)
plt.show()
|
gpl-3.0
|
sbtlaarzc/vispy
|
vispy/ext/_bundled/mplutils.py
|
17
|
11507
|
"""
Utility Routines for Working with Matplotlib Objects
====================================================
"""
import itertools
import io
import base64
import numpy as np
import warnings
import matplotlib
from matplotlib.colors import colorConverter
from matplotlib.path import Path
from matplotlib.markers import MarkerStyle
from matplotlib.transforms import Affine2D
from matplotlib import ticker
def color_to_hex(color):
"""Convert matplotlib color code to hex color code"""
if color is None or colorConverter.to_rgba(color)[3] == 0:
return 'none'
else:
rgb = colorConverter.to_rgb(color)
return '#{0:02X}{1:02X}{2:02X}'.format(*(int(255 * c) for c in rgb))
def _many_to_one(input_dict):
"""Convert a many-to-one mapping to a one-to-one mapping"""
return dict((key, val)
for keys, val in input_dict.items()
for key in keys)
LINESTYLES = _many_to_one({('solid', '-', (None, None)): 'none',
('dashed', '--'): "6,6",
('dotted', ':'): "2,2",
('dashdot', '-.'): "4,4,2,4",
('', ' ', 'None', 'none'): None})
def get_dasharray(obj):
"""Get an SVG dash array for the given matplotlib linestyle
Parameters
----------
obj : matplotlib object
The matplotlib line or path object, which must have a get_linestyle()
method which returns a valid matplotlib line code
Returns
-------
dasharray : string
The HTML/SVG dasharray code associated with the object.
"""
if obj.__dict__.get('_dashSeq', None) is not None:
return ','.join(map(str, obj._dashSeq))
else:
ls = obj.get_linestyle()
dasharray = LINESTYLES.get(ls, 'not found')
if dasharray == 'not found':
warnings.warn("line style '{0}' not understood: "
"defaulting to solid line.".format(ls))
dasharray = LINESTYLES['solid']
return dasharray
PATH_DICT = {Path.LINETO: 'L',
Path.MOVETO: 'M',
Path.CURVE3: 'S',
Path.CURVE4: 'C',
Path.CLOSEPOLY: 'Z'}
def SVG_path(path, transform=None, simplify=False):
"""Construct the vertices and SVG codes for the path
Parameters
----------
path : matplotlib.Path object
transform : matplotlib transform (optional)
if specified, the path will be transformed before computing the output.
Returns
-------
vertices : array
The shape (M, 2) array of vertices of the Path. Note that some Path
codes require multiple vertices, so the length of these vertices may
be longer than the list of path codes.
path_codes : list
A length N list of single-character path codes, N <= M. Each code is
a single character, in ['L','M','S','C','Z']. See the standard SVG
path specification for a description of these.
"""
if transform is not None:
path = path.transformed(transform)
vc_tuples = [(vertices if path_code != Path.CLOSEPOLY else [],
PATH_DICT[path_code])
for (vertices, path_code)
in path.iter_segments(simplify=simplify)]
if not vc_tuples:
# empty path is a special case
return np.zeros((0, 2)), []
else:
vertices, codes = zip(*vc_tuples)
vertices = np.array(list(itertools.chain(*vertices))).reshape(-1, 2)
return vertices, list(codes)
def get_path_style(path, fill=True):
"""Get the style dictionary for matplotlib path objects"""
style = {}
style['alpha'] = path.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['edgecolor'] = color_to_hex(path.get_edgecolor())
if fill:
style['facecolor'] = color_to_hex(path.get_facecolor())
else:
style['facecolor'] = 'none'
style['edgewidth'] = path.get_linewidth()
style['dasharray'] = get_dasharray(path)
style['zorder'] = path.get_zorder()
return style
def get_line_style(line):
"""Get the style dictionary for matplotlib line objects"""
style = {}
style['alpha'] = line.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['color'] = color_to_hex(line.get_color())
style['linewidth'] = line.get_linewidth()
style['dasharray'] = get_dasharray(line)
style['zorder'] = line.get_zorder()
return style
def get_marker_style(line):
"""Get the style dictionary for matplotlib marker objects"""
style = {}
style['alpha'] = line.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['facecolor'] = color_to_hex(line.get_markerfacecolor())
style['edgecolor'] = color_to_hex(line.get_markeredgecolor())
style['edgewidth'] = line.get_markeredgewidth()
style['marker'] = line.get_marker()
markerstyle = MarkerStyle(line.get_marker())
markersize = line.get_markersize()
markertransform = (markerstyle.get_transform() +
Affine2D().scale(markersize, -markersize))
style['markerpath'] = SVG_path(markerstyle.get_path(),
markertransform)
style['markersize'] = markersize
style['zorder'] = line.get_zorder()
return style
def get_text_style(text):
"""Return the text style dict for a text instance"""
style = {}
style['alpha'] = text.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['fontsize'] = text.get_size()
style['color'] = color_to_hex(text.get_color())
style['halign'] = text.get_horizontalalignment() # left, center, right
style['valign'] = text.get_verticalalignment() # baseline, center, top
style['malign'] = text._multialignment # text alignment when '\n' in text
style['rotation'] = text.get_rotation()
style['zorder'] = text.get_zorder()
return style
def get_axis_properties(axis):
"""Return the property dictionary for a matplotlib.Axis instance"""
props = {}
label1On = axis._major_tick_kw.get('label1On', True)
if isinstance(axis, matplotlib.axis.XAxis):
if label1On:
props['position'] = "bottom"
else:
props['position'] = "top"
elif isinstance(axis, matplotlib.axis.YAxis):
if label1On:
props['position'] = "left"
else:
props['position'] = "right"
else:
raise ValueError("{0} should be an Axis instance".format(axis))
# Use tick values if appropriate
locator = axis.get_major_locator()
props['nticks'] = len(locator())
if isinstance(locator, ticker.FixedLocator):
props['tickvalues'] = list(locator())
else:
props['tickvalues'] = None
# Find tick formats
formatter = axis.get_major_formatter()
if isinstance(formatter, ticker.NullFormatter):
props['tickformat'] = ""
elif isinstance(formatter, ticker.FixedFormatter):
props['tickformat'] = list(formatter.seq)
elif not any(label.get_visible() for label in axis.get_ticklabels()):
props['tickformat'] = ""
else:
props['tickformat'] = None
# Get axis scale
props['scale'] = axis.get_scale()
# Get major tick label size (assumes that's all we really care about!)
labels = axis.get_ticklabels()
if labels:
props['fontsize'] = labels[0].get_fontsize()
else:
props['fontsize'] = None
# Get associated grid
props['grid'] = get_grid_style(axis)
return props
def get_grid_style(axis):
gridlines = axis.get_gridlines()
if axis._gridOnMajor and len(gridlines) > 0:
color = color_to_hex(gridlines[0].get_color())
alpha = gridlines[0].get_alpha()
dasharray = get_dasharray(gridlines[0])
return dict(gridOn=True,
color=color,
dasharray=dasharray,
alpha=alpha)
else:
return {"gridOn": False}
def get_figure_properties(fig):
return {'figwidth': fig.get_figwidth(),
'figheight': fig.get_figheight(),
'dpi': fig.dpi}
def get_axes_properties(ax):
props = {'axesbg': color_to_hex(ax.patch.get_facecolor()),
'axesbgalpha': ax.patch.get_alpha(),
'bounds': ax.get_position().bounds,
'dynamic': ax.get_navigate(),
'axison': ax.axison,
'frame_on': ax.get_frame_on(),
'axes': [get_axis_properties(ax.xaxis),
get_axis_properties(ax.yaxis)]}
for axname in ['x', 'y']:
axis = getattr(ax, axname + 'axis')
domain = getattr(ax, 'get_{0}lim'.format(axname))()
lim = domain
if isinstance(axis.converter, matplotlib.dates.DateConverter):
scale = 'date'
try:
import pandas as pd
from pandas.tseries.converter import PeriodConverter
except ImportError:
pd = None
if (pd is not None and isinstance(axis.converter,
PeriodConverter)):
_dates = [pd.Period(ordinal=int(d), freq=axis.freq)
for d in domain]
domain = [(d.year, d.month - 1, d.day,
d.hour, d.minute, d.second, 0)
for d in _dates]
else:
domain = [(d.year, d.month - 1, d.day,
d.hour, d.minute, d.second,
d.microsecond * 1E-3)
for d in matplotlib.dates.num2date(domain)]
else:
scale = axis.get_scale()
if scale not in ['date', 'linear', 'log']:
raise ValueError("Unknown axis scale: "
"{0}".format(axis[axname].get_scale()))
props[axname + 'scale'] = scale
props[axname + 'lim'] = lim
props[axname + 'domain'] = domain
return props
def iter_all_children(obj, skipContainers=False):
"""
Returns an iterator over all childen and nested children using
obj's get_children() method
if skipContainers is true, only childless objects are returned.
"""
if hasattr(obj, 'get_children') and len(obj.get_children()) > 0:
for child in obj.get_children():
if not skipContainers:
yield child
# could use `yield from` in python 3...
for grandchild in iter_all_children(child, skipContainers):
yield grandchild
else:
yield obj
def get_legend_properties(ax, legend):
handles, labels = ax.get_legend_handles_labels()
visible = legend.get_visible()
return {'handles': handles, 'labels': labels, 'visible': visible}
def image_to_base64(image):
"""
Convert a matplotlib image to a base64 png representation
Parameters
----------
image : matplotlib image object
The image to be converted.
Returns
-------
image_base64 : string
The UTF8-encoded base64 string representation of the png image.
"""
ax = image.axes
binary_buffer = io.BytesIO()
# image is saved in axes coordinates: we need to temporarily
# set the correct limits to get the correct image
lim = ax.axis()
ax.axis(image.get_extent())
image.write_png(binary_buffer)
ax.axis(lim)
binary_buffer.seek(0)
return base64.b64encode(binary_buffer.read()).decode('utf-8')
|
bsd-3-clause
|
wanggang3333/scikit-learn
|
sklearn/decomposition/tests/test_sparse_pca.py
|
142
|
5990
|
# Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_not_mac_os
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_not_mac_os()
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
bsd-3-clause
|
conversationai/conversationai-models
|
annotator_models/trainer/dawid_skene.py
|
1
|
22663
|
"""Description: Given unreliable ratings of items classes by multiple raters, determine the most likely true class for each item, class marginals, and individual error rates for each rater, using Expectation Maximization
References:
( Dawid and Skene (1979). Maximum Likelihood Estimation of Observer
Error-Rates Using the EM Algorithm. Journal of the Royal Statistical Society.
Series C (Applied Statistics), Vol. 28, No. 1, pp. 20-28.
"""
import argparse
import logging
import math
import sys
import time
import numpy as np
import pandas as pd
from scipy import stats
import tensorflow as tf
FLAGS = None
np.set_printoptions(precision=2)
def run(items,
raters,
classes,
counts,
label,
psuedo_count,
tol=1,
max_iter=25,
init='average'):
"""
Run the Dawid-Skene estimator on response data
Input:
responses: a pandas DataFrame of ratings where each row is a rating from
some rater ('_worker_id') on some item ('_unit_id')
tol: tolerance required for convergence of EM
max_iter: maximum number of iterations of EM
"""
# initialize
iteration = 0
converged = False
old_class_marginals = None
old_error_rates = None
# item_classes is a matrix of estimates of true item classes of size
# [items, classes]
item_classes = initialize(counts)
[nItems, nRaters, nClasses] = np.shape(counts)
logging.info('Iter\tlog-likelihood\tdelta-CM\tdelta-Y_hat')
while not converged:
iteration += 1
start_iter = time.time()
# M-step - updated error rates and class marginals given new
# distribution over true item classes
old_item_classes = item_classes
(class_marginals, error_rates) = m_step(counts, item_classes, psuedo_count)
# E-step - calculate expected item classes given error rates and
# class marginals
item_classes = e_step_verbose(counts, class_marginals, error_rates)
# check likelihood
log_L = calc_likelihood(counts, class_marginals, error_rates)
# calculate the number of seconds the last iteration took
iter_time = time.time() - start_iter
# check for convergence
if old_class_marginals is not None:
class_marginals_diff = np.sum(
np.abs(class_marginals - old_class_marginals))
item_class_diff = np.sum(np.abs(item_classes - old_item_classes))
logging.info('{0}\t{1:.1f}\t{2:.4f}\t\t{3:.2f}\t({4:3.2f} secs)'.format(
iteration, log_L, class_marginals_diff, item_class_diff, iter_time))
if (class_marginals_diff < tol and item_class_diff < tol) \
or iteration > max_iter:
converged = True
else:
logging.info('{0}\t{1:.1f}'.format(iteration, log_L))
# update current values
old_class_marginals = class_marginals
old_error_rates = error_rates
return class_marginals, error_rates, item_classes
def load_data(path, unit_id, worker_id, label):
logging.info('Loading data from {0}'.format(path))
with tf.gfile.Open(path, 'rb') as fileobj:
df = pd.read_csv(fileobj, encoding='utf-8')
# only keep necessary columns
df = df[[unit_id, worker_id, label]]
return df
def initialize(counts):
"""
Get initial estimates for the true item classes using counts
see equation 3.1 in Dawid-Skene (1979)
Input:
counts: counts of the number of times each response was given
by each rater for each item: [items x raters x classes]. Note
in the crowd rating example, counts will be a 0/1 matrix.
Returns:
item_classes: matrix of estimates of true item classes:
[items x responses]
"""
[nItems, nRaters, nClasses] = np.shape(counts)
# sum over raters
response_sums = np.sum(counts, 1)
# create an empty array
item_classes = np.zeros([nItems, nClasses])
# for each item, take the average number of ratings in each class
for p in range(nItems):
item_classes[p, :] = response_sums[p, :] / np.sum(
response_sums[p, :], dtype=float)
return item_classes
def m_step(counts, item_classes, psuedo_count):
"""
Get estimates for the prior class probabilities (p_j) and the error
rates (pi_jkl) using MLE with current estimates of true item classes
See equations 2.3 and 2.4 in Dawid-Skene (1979)
Input:
counts: Array of how many times each rating was given by each rater
for each item
item_classes: Matrix of current assignments of items to classes
psuedo_count: A psuedo count used to smooth the error rates. For each
rater k
and for each class i and class j, we pretend rater k has rated
psuedo_count examples with class i when class j was the true class.
Returns:
p_j: class marginals [classes]
pi_kjl: error rates - the probability of rater k giving
response l for an item in class j [observers, classes, classes]
"""
[nItems, nRaters, nClasses] = np.shape(counts)
# compute class marginals
class_marginals = np.sum(item_classes, axis=0) / float(nItems)
# compute error rates for each rater, each predicted class
# and each true class
error_rates = np.matmul(counts.T, item_classes) + psuedo_count
# reorder axes so its of size [nItems x nClasses x nClasses]
error_rates = np.einsum('abc->bca', error_rates)
# divide each row by the sum of the error rates over all observation classes
sum_over_responses = np.sum(error_rates, axis=2)[:, :, None]
# for cases where an annotator has never used a label, set their sum over
# responses for that label to 1 to avoid nan when we divide. The result will
# be error_rate[k, i, j] is 0 if annotator k never used label i.
sum_over_responses[sum_over_responses == 0] = 1
error_rates = np.divide(error_rates, sum_over_responses)
return (class_marginals, error_rates)
def m_step_verbose(counts, item_classes, psuedo_count):
"""
This method is the verbose (i.e. not vectorized) version of the m_step.
It is currently not used because the vectorized version is faster, but we
leave it here for future debugging.
Get estimates for the prior class probabilities (p_j) and the error
rates (pi_jkl) using MLE with current estimates of true item classes
See equations 2.3 and 2.4 in Dawid-Skene (1979)
Input:
counts: Array of how many times each rating was given by each rater
for each item
item_classes: Matrix of current assignments of items to classes
psuedo_count: A psuedo count used to smooth the error rates. For each
rater k
and for each class i and class j, we pretend rater k has rated
psuedo_count examples with class i when class j was the true class.
Returns:
p_j: class marginals [classes]
pi_kjl: error rates - the probability of rater k giving
response l for an item in class j [observers, classes, classes]
"""
[nItems, nRaters, nClasses] = np.shape(counts)
# compute class marginals
class_marginals = np.sum(item_classes, 0) / float(nItems)
# compute error rates for each rater, each predicted class
# and each true class
error_rates = np.zeros([nRaters, nClasses, nClasses])
for k in range(nRaters):
for j in range(nClasses):
for l in range(nClasses):
error_rates[k, j, l] = np.dot(item_classes[:,j], counts[:,k,l]) \
+ psuedo_count
# normalize by summing over all observation classes
sum_over_responses = np.sum(error_rates[k, j, :])
if sum_over_responses > 0:
error_rates[k, j, :] = error_rates[k, j, :] / float(sum_over_responses)
return (class_marginals, error_rates)
def e_step(counts_tiled, class_marginals, error_rates):
"""
Determine the probability of each item belonging to each class,
given current ML estimates of the parameters from the M-step
See equation 2.5 in Dawid-Skene (1979)
Inputs:
counts_tiled: A matrix of how many times each rating was given
by each rater for each item, repeated for each class to make matrix
multiplication fasterr. Size: [nItems, nRaters, nClasses, nClasses]
class_marginals: probability of a random item belonging to each class.
Size: [nClasses]
error_rates: probability of rater k assigning a item in class j
to class l. Size [nRaters, nClasses, nClasses]
Returns:
item_classes: Soft assignments of items to classes
[items x classes]
"""
[nItems, _, nClasses, _] = np.shape(counts_tiled)
error_rates_tiled = np.tile(error_rates, (nItems, 1, 1, 1))
power = np.power(error_rates_tiled, counts_tiled)
# Note, multiplying over axis 1 and then 2 is substantially faster than
# the equivalent np.prod(power, axis=(1,3)
item_classes = class_marginals * np.prod(np.prod(power, axis=1), axis=2)
# normalize error rates by dividing by the sum over all classes
item_sum = np.sum(item_classes, axis=1, keepdims=True)
item_classes = np.divide(item_classes, np.tile(item_sum, (1, nClasses)))
return item_classes
def e_step_verbose(counts, class_marginals, error_rates):
"""
This method is the verbose (i.e. not vectorized) version of
the e_step. It is actually faster than the vectorized e_step
function (16 seconds vs 25 seconds respectively on 10k ratings).
Determine the probability of each item belonging to each class,
given current ML estimates of the parameters from the M-step
See equation 2.5 in Dawid-Skene (1979)
Inputs:
counts: Array of how many times each rating was given
by each rater for each item
class_marginals: probability of a random item belonging to each class
error_rates: probability of rater k assigning a item in class j
to class l [raters, classes, classes]
Returns:
item_classes: Soft assignments of items to classes
[items x classes]
"""
[nItems, nRaters, nClasses] = np.shape(counts)
item_classes = np.zeros([nItems, nClasses])
for i in range(nItems):
for j in range(nClasses):
estimate = class_marginals[j]
estimate *= np.prod(np.power(error_rates[:, j, :], counts[i, :, :]))
item_classes[i, j] = estimate
# normalize error rates by dividing by the sum over all classes
item_sum = np.sum(item_classes, axis=1, keepdims=True)
item_classes = np.divide(item_classes, np.tile(item_sum, (1, nClasses)))
return item_classes
def calc_likelihood(counts, class_marginals, error_rates):
"""
Calculate the likelihood given the current parameter estimates
This should go up monotonically as EM proceeds
See equation 2.7 in Dawid-Skene (1979)
Inputs:
counts: Array of how many times each response was received
by each rater from each item
class_marginals: probability of a random item belonging to each class
error_rates: probability of rater k assigning a item in class j
to class l [raters, classes, classes]
Returns:
Likelihood given current parameter estimates
"""
[nItems, nRaters, nClasses] = np.shape(counts)
log_L = 0.0
for i in range(nItems):
item_likelihood = 0.0
for j in range(nClasses):
class_prior = class_marginals[j]
item_class_likelihood = np.prod(
np.power(error_rates[:, j, :], counts[i, :, :]))
item_class_posterior = class_prior * item_class_likelihood
item_likelihood += item_class_posterior
temp = log_L + np.log(item_likelihood)
if np.isnan(temp) or np.isinf(temp):
logging.info('{0}, {1}, {2}'.format(i, log_L, np.log(item_likelihood),
temp))
sys.exit()
log_L = temp
return log_L
def random_initialization(counts):
"""
Similar to initialize() above, except choose one initial class for each
item, weighted in proportion to the counts.
Input:
counts: counts of the number of times each response was received
by each rater from each item: [items x raters x classes]
Returns:
item_classes: matrix of estimates of true item classes:
[items x responses]
"""
[nItems, nRaters, nClasses] = np.shape(counts)
response_sums = np.sum(counts, 1)
# create an empty array
item_classes = np.zeros([nItems, nClasses])
# for each item, choose a random initial class, weighted in proportion
# to the counts from all raters
for p in range(nItems):
weights = response_sums[p, :] / np.sum(response_sums[p, :], dtype=float)
item_classes[p, np.random.choice(np.arange(nClasses), p=weights)] = 1
return item_classes
def majority_voting(counts):
"""
An alternative way to initialize assignment of items to classes
i.e Get initial estimates for the true item classes using majority voting
Input:
counts: Counts of the number of times each response was received
by each rater from each item: [items x raters x classes]
Returns:
item_classes: matrix of initial estimates of true item classes:
[items x responses]
"""
[nItems, nRaters, nClasses] = np.shape(counts)
# sum over observers
response_sums = np.sum(counts, 1)
# create an empty array
item_classes = np.zeros([nItems, nClasses])
# take the most frequent class for each item
for p in range(nItems):
indices = np.argwhere(response_sums[p, :] == np.max(response_sums[p, :]))
# in the case of ties, take the lowest valued label (could be randomized)
item_classes[p, np.min(indices)] = 1
return item_classes
def parse_item_classes(df, label, item_classes, index_to_unit_id_map,
index_to_y_map, unit_id, worker_id, comment_text_path):
"""
Given the original data df, the predicted item_classes, and
the data mappings, returns a DataFrame with the fields:
* _unit_index: the 0,1,...nItems index
* _unit_id: the original item ID
* {LABEL}_hat: the predicted probability of the item being labeled 1 as
learned from the Dawid-Skene algorithm
* {LABEL}_mean: the mean of the original ratings
"""
LABEL_HAT = '{}_hat'.format(label)
LABEL_MEAN = '{}_mean'.format(label)
ROUND_DEC = 8
_, N_ClASSES = np.shape(item_classes)
df_predictions = pd.DataFrame()
# Add columns for predictions for each class
col_names = []
for k in range(N_ClASSES):
# y is the original value of the class. When we train, we re-map
# all the classes to 0,1,....K. But our data has classes like
# -2,-1,0,1,2. In that case, of k is 0, then y would be -2
y = index_to_y_map[k]
col_name = '{0}_{1}'.format(LABEL_HAT, y)
col_names.append(col_name)
df_predictions[col_name] = [round(i[k], ROUND_DEC) for i in item_classes]
# To get a prediction of the mean label, multiply our predictions with the
# true y values.
y_values = list(index_to_y_map.values())
col_name = '{0}_hat_mean'.format(label)
df_predictions[col_name] = np.dot(df_predictions[col_names], list(y_values))
# Use the _unit_index to map to the original _unit_id
df_predictions['_unit_index'] = range(len(item_classes))
df_predictions[unit_id] = df_predictions['_unit_index']\
.apply(lambda i: index_to_unit_id_map[i])
# Calculate the y_mean from the original data and join on _unit_id
# Add a column for the mean predictions
df[label] = df[label].astype(float)
mean_labels = df.groupby(unit_id, as_index=False)[label]\
.mean()\
.round(ROUND_DEC)\
.rename(index=int, columns={label: LABEL_MEAN})
df_predictions = pd.merge(mean_labels, df_predictions, on=unit_id)
# join with data that contains the item-level comment text
if comment_text_path:
with tf.gfile.Open(comment_text_path, 'r') as fileobj:
logging.info(
'Loading comment text data from {}'.format(comment_text_path))
df_comments = pd.read_csv(fileobj)
# drop duplicate comments
df_comments = df_comments.drop_duplicates(subset=unit_id)
df_predictions = df_predictions.merge(df_comments, on=unit_id)
return df_predictions
def parse_error_rates(df, error_rates, index_to_worker_id_map, index_to_y_map,
unit_id, worker_id):
"""
Given the original data DataFrame, the predicted error_rates and the
mappings
between the indexes and ids, returns a DataFrame with the fields:
* _worker_index: the 0,1,...nItems index
* _worker_id: the original item ID
* _error_rate_{k}_{k}: probability the worker would choose class k when
the true class is k (for accurate workers, these numbers are high).
"""
columns = [worker_id, '_worker_index']
df_error_rates = pd.DataFrame()
# add the integer _worker_index
df_error_rates['_worker_index'] = index_to_worker_id_map.keys()
# add the original _worker_id
df_error_rates[worker_id] = [j for (i, j) in index_to_worker_id_map.items()]
# add annotation counts for each worker
worker_counts = df.groupby(
by=worker_id, as_index=False)[unit_id]\
.count()\
.rename(index=int, columns={unit_id: 'n_annotations'})
df_error_rates = pd.merge(df_error_rates, worker_counts, on=worker_id)
# add the diagonal error rates, which are the per-class accuracy rates,
# for each class k, we add a column for p(rater will pick k | item's true class is k)
# y_label is the original y value in the data and y_index is the
# integer we mapped it to, i.e. 0, 1, ..., |Y|
for y_index, y_label in index_to_y_map.items():
col_name = 'accuracy_rate_{0}'.format(y_label)
df_error_rates[col_name] = [e[y_index, y_index] for e in error_rates]
return df_error_rates
def main(FLAGS):
logging.basicConfig(level=logging.INFO)
# load data, each row is an annotation
n_examples = FLAGS.n_examples
label = FLAGS.label
unit_id = FLAGS.unit_id_col
worker_id = FLAGS.worker_id_col
comment_text_path = FLAGS.comment_text_path
df = load_data(FLAGS.data_path, unit_id, worker_id, label)[0:n_examples]
logging.info('Running on {0} examples for label {1}'.format(len(df), label))
# convert rater, item and label IDs to integers starting at 0
#
# * worker_id_to_index_map: _worker_id -> index
# * index_to_worker_id_map: index -> worker
# * unit_id_to_index_map: _unit_id -> index
# * index_to_unit_id_map: index -> _unit_id
# * y_to_index_map: label -> index
# * index_to_y_map: index -> label
worker_id_to_index_map = {
w: i for (i, w) in enumerate(df[worker_id].unique())
}
index_to_worker_id_map = {i: w for (w, i) in worker_id_to_index_map.items()}
unit_id_to_index_map = {w: i for (i, w) in enumerate(df[unit_id].unique())}
index_to_unit_id_map = {i: w for (w, i) in unit_id_to_index_map.items()}
y_to_index_map = {w: i for (i, w) in enumerate(df[label].unique())}
index_to_y_map = {i: w for (w, i) in y_to_index_map.items()}
# create list of unique raters, items and labels
raters = list(df[worker_id].apply(lambda x: worker_id_to_index_map[x]))
items = list(df[unit_id].apply(lambda x: unit_id_to_index_map[x]))
y = list(df[label].apply(lambda x: y_to_index_map[x]))
nClasses = len(df[label].unique())
nItems = len(df[unit_id].unique())
nRaters = len(df[worker_id].unique())
counts = np.zeros([nItems, nRaters, nClasses])
# convert responses to counts
for i, item_index in enumerate(items):
rater_index = raters[i]
y_index = y[i]
counts[item_index, rater_index, y_index] += 1
raters_unique = index_to_worker_id_map.keys()
items_unique = index_to_unit_id_map.keys()
classes_unique = index_to_y_map.keys()
logging.info('num items: {0}'.format(len(items_unique)))
logging.info('num raters: {0}'.format(len(raters_unique)))
logging.info('num classes: {0}'.format(len(classes_unique)))
# run EM
start = time.time()
class_marginals, error_rates, item_classes = run(
items_unique,
raters_unique,
classes_unique,
counts,
label,
FLAGS.pseudo_count,
tol=FLAGS.tolerance,
max_iter=FLAGS.max_iter)
end = time.time()
logging.info('training time: {0:.4f} seconds'.format(end - start))
# join comment_text, old labels and new labels
df_predictions = parse_item_classes(df, label, item_classes,
index_to_unit_id_map, index_to_y_map,
unit_id, worker_id, comment_text_path)
# join rater error_rates
df_error_rates = parse_error_rates(df, error_rates, index_to_worker_id_map,
index_to_y_map, unit_id, worker_id)
# write predictions and error_rates out as CSV
n = len(df)
prediction_path = '{0}/predictions_{1}_{2}.csv'.format(
FLAGS.job_dir, label, n)
error_rates_path = '{0}/error_rates_{1}_{2}.csv'.format(
FLAGS.job_dir, label, n)
logging.info('Writing predictions to {}'.format(prediction_path))
with tf.gfile.Open(prediction_path, 'w') as fileobj:
df_predictions.to_csv(fileobj, index=False, encoding='utf-8')
logging.info('Writing error rates to {}'.format(error_rates_path))
with tf.gfile.Open(error_rates_path, 'w') as fileobj:
df_error_rates.to_csv(fileobj, index=False, encoding='utf-8')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-path',
help='The path to data to run on, local or in Cloud Storage.')
parser.add_argument(
'--comment-text-path',
help='The path to comment text, local or in Cloud Storage.')
parser.add_argument(
'--worker-id-col', help='Column name of worker id.', default='_worker_id')
parser.add_argument(
'--unit-id-col', help='Column name of unit id.', default='_comment_id')
parser.add_argument(
'--n_examples',
help='The number of annotations to use.',
default=10000000,
type=int)
parser.add_argument(
'--label',
help='The label to train on, e.g. "obscene" or "threat"',
default='obscene')
parser.add_argument(
'--job-dir',
type=str,
default='',
help='The directory where the job is staged.')
parser.add_argument(
'--max-iter',
help='The max number of iteration to run.',
type=int,
default=25)
parser.add_argument(
'--pseudo-count',
help='The pseudo count to smooth error rates.',
type=float,
default=1.0)
parser.add_argument(
'--tolerance',
help='Stop training when variables change less than this value.',
type=int,
default=1)
FLAGS = parser.parse_args()
print('FLAGS', FLAGS)
main(FLAGS)
|
apache-2.0
|
brianwgoldman/LengthBiasCGP
|
util.py
|
1
|
3474
|
'''
Collection of utility functions with no other obvious home.
'''
from itertools import izip, cycle
import json
import os
def diff_count(data1, data2):
'''
Count the number of differences is two sets of data
'''
return sum(x != y for x, y in izip(data1, data2))
def load_configurations(filenames, file_method=open):
'''
Given a list of files containing json encoded dictionaries, combined
the data into a single dictionary.
Parameters:
- ``filenames``: The list of files paths.
- ``file_method``: The method to open the file with. Defaults to ``open``.
Can be used to read compressed configurations.
'''
result = {}
for filename in filenames:
with file_method(filename, 'r') as f:
result.update(json.load(f))
return result
def save_configuration(filename, data, file_method=open):
'''
Write a dictionary to the specified file in json format.
Parameters
- ``filename``: The path to write to.
- ``data``: The data to be written.
- ``file_method``: The method to open the file with. Defaults to ``open``.
Can be used to write compressed configurations.
'''
with file_method(filename, 'w') as f:
json.dump(data, f)
def save_list(filename, data, file_method=open):
'''
Write a list of dictionaries to the file in a more human readable way.
Parameters
- ``filename``: The path to write to.
- ``data``: The list of dictionaries to be written.
- ``file_method``: The method to open the file with. Defaults to ``open``.
Can be used to write compressed configurations.
'''
with file_method(filename, 'w') as f:
f.write('[' + os.linesep)
for lineNumber, line in enumerate(data):
json.dump(line, f)
if lineNumber != len(data) - 1:
f.write(",")
f.write(os.linesep)
f.write(']' + os.linesep)
def find_median(data):
'''
Returns the median of the data.
'''
ordered = sorted(data)
length = len(data)
middle = length // 2
if length % 2 == 1:
return ordered[middle]
else:
return (ordered[middle] + ordered[middle - 1]) / 2.0
def median_deviation(data, median=None):
'''
Returns the median and the median absolute deviation of the data.
Parameters:
- ``data``: The data to find the medians of.
- ``median``: If the median is already known you can pass it in to save
time.
'''
if median is None:
median = find_median(data)
return median, find_median([abs(x - median) for x in data])
def set_fonts():
'''
Configures matplotlib to use only Type 1 fonts, and sets the figure size
such that those fonts will be legible when the figure is inserted in
a publication.
'''
import matplotlib
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
matplotlib.pyplot.figure(figsize=(7, 5))
# Generator used when plotting to cylce through the different line styles
linecycler = cycle(["-", "--", "-.", ":"])
# Dictionary converter from original name to name used in paper
pretty_name = {"normal": "Normal",
"reorder": "Reorder",
"dag": "DAG", }
# Specifies what order lines should appear in graphs
line_order = {'normal': 1,
'reorder': 2,
'dag': 3,
}
|
bsd-2-clause
|
ChristopheVuillot/qiskit-sdk-py
|
qiskit/tools/qcvv/fitters.py
|
1
|
4730
|
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Basic plotting methods using matplotlib.
These include methods to plot Bloch vectors, histograms, and quantum spheres.
"""
import matplotlib.pyplot as plt
import numpy as np
# function used to fit the exponetial decay
def exp_fit_fun(x, a, tau, c):
return a * np.exp(-x/tau) + c
# function used to fit the decay cosine
def osc_fit_fun(x, a, tau, f, phi, c):
return a * np.exp(-x/tau)*np.cos(2*np.pi*f*x+phi) + c
# function used to fit rb
def rb_fit_fun(x, a, alpha, b):
return a * alpha**x + b
# Functions used by randomized benchmarking.
def plot_coherence(xdata, ydata, std_error, fit, fit_function, xunit, exp_str,
qubit_label):
"""Plot coherence data.
Args:
xdata
ydata
std_error
fit
fit_function
xunit
exp_str
qubit_label
"""
plt.errorbar(xdata, ydata, std_error, marker='.',
markersize=9, c='b', linestyle='')
plt.plot(xdata, fit_function(xdata, *fit), c='r', linestyle='--',
label=(exp_str + '= %s %s' % (str(round(fit[1])), xunit)))
plt.xticks(fontsize=14, rotation=70)
plt.yticks(fontsize=14)
plt.xlabel('time [%s]' % (xunit), fontsize=16)
plt.ylabel('P(1)', fontsize=16)
plt.title(exp_str + ' measurement of Q$_{%s}$' % (str(qubit_label)), fontsize=18)
plt.legend(fontsize=12)
plt.grid(True)
plt.show()
def shape_rb_data(raw_rb):
"""Take the raw rb data and convert it into averages and std dev
Args
raw_rb = m x n x l list where m is the number of seeds, n is the number of Clifford sequences and
l is the number of qubits
Return:
rb_data: 2 x n x l list where index 0 is the mean over seeds, 1 is the std dev over seeds
"""
rb_data = []
rb_data.append(np.mean(raw_rb,0))
rb_data.append(np.std(raw_rb,0))
return rb_data
def rb_epc(fit,rb_pattern):
"""Take the rb fit data and convert it into EPC (error per Clifford)
Args
fit = dictionary of the fit quanties (A,alpha,B) with the keys 'qn' where n is the qubit
and subkeys 'fit', e.g. {'q0':{'fit': [1,0,0.9],'fiterr': [0,0,0]}}}
rb_pattern = (see randomized benchmarking functions). Pattern which specifies which qubits
performing RB with which qubits. E.g. [[1],[0,2]] is Q1 doing 1Q RB simultaneously with
Q0/Q2 doing 2Q RB
Return:
fit: updates the passed in fit dictionary with the epc
"""
for patterns in rb_pattern:
for qubit in patterns:
fitalpha = fit['q%d'%qubit]['fit'][1]
fitalphaerr = fit['q%d'%qubit]['fiterr'][1]
nrb = 2**len(patterns)
fit['q%d'%qubit]['fit_calcs'] = {}
fit['q%d'%qubit]['fit_calcs']['epc'] = [(nrb-1)/nrb*(1-fitalpha),fitalphaerr/fitalpha]
fit['q%d'%qubit]['fit_calcs']['epc'][1] *= fit['q%d'%qubit]['fit_calcs']['epc'][0]
return fit
def plot_rb_data(xdata, ydatas, yavg, yerr, fit, survival_prob, ax=None, show_plt=1):
"""Plot randomized benchmarking data.
xdata = list of subsequence lengths
ydatas = list of lists of survival probabilities for each sequence
yavg = mean of the survival probabilities at each sequence length
yerr = error of the survival
fit = fit parameters
survival_prob = function that computes survival probability
ax: plot axis (if passed in)
"""
if ax is None:
plt.figure()
ax = plt.gca()
# Plot the result for each sequence
for ydata in ydatas:
ax.plot(xdata, ydata, color='gray', linestyle='none', marker='x')
# Plot the mean with error bars
ax.errorbar(xdata, yavg, yerr=yerr, color='r', linestyle='--', linewidth=3)
# Plot the fit
ax.plot(xdata, survival_prob(xdata, *fit), color='blue', linestyle='-', linewidth=2)
ax.tick_params(labelsize=14)
#ax.tick_params(axis='x',labelrotation=70)
ax.set_xlabel('Clifford Length', fontsize=16)
ax.set_ylabel('Z', fontsize=16)
ax.grid(True)
if show_plt:
plt.show()
|
apache-2.0
|
lewisc/spark-tk
|
regression-tests/sparktkregtests/testcases/dicom/dicom_svd_test.py
|
11
|
2573
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests svd on dicom frame"""
import unittest
from sparktk import dtypes
from sparktkregtests.lib import sparktk_test
from numpy.linalg import svd
from numpy.testing import assert_almost_equal
class SVDDicomTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(SVDDicomTest, self).setUp()
dataset = self.get_file("dicom_uncompressed")
dicom = self.context.dicom.import_dcm(dataset)
self.frame = dicom.pixeldata
def test_svd(self):
"""Test the output of svd"""
self.frame.matrix_svd("imagematrix")
#get pandas frame of the output
results = self.frame.to_pandas(self.frame.count())
#compare U,V and s matrices for each image against numpy's output
for i, row in results.iterrows():
actual_U = row['U_imagematrix']
actual_V = row['Vt_imagematrix']
actual_s = row['SingularVectors_imagematrix']
#expected ouput using numpy's svd
U, s, V = svd(row['imagematrix'])
assert_almost_equal(actual_U, U, decimal=4, err_msg="U incorrect")
assert_almost_equal(actual_V, V, decimal=4, err_msg="V incorrect")
assert_almost_equal(
actual_s[0], s, decimal=4, err_msg="Singual vectors incorrect")
def test_invalid_column_name(self):
"""Test behavior for invalid column name"""
with self.assertRaisesRegexp(
Exception, "column ERR was not found"):
self.frame.matrix_svd("ERR")
def test_invalid_param(self):
"""Test behavior for invalid parameter"""
with self.assertRaisesRegexp(
Exception, "svd\(\) takes exactly 2 arguments"):
self.frame.matrix_svd("imagematrix", True)
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
ryandougherty/mwa-capstone
|
MWA_Tools/build/matplotlib/lib/mpl_examples/api/collections_demo.py
|
3
|
4171
|
#!/usr/bin/env python
'''Demonstration of LineCollection, PolyCollection, and
RegularPolyCollection with autoscaling.
For the first two subplots, we will use spirals. Their
size will be set in plot units, not data units. Their positions
will be set in data units by using the "offsets" and "transOffset"
kwargs of the LineCollection and PolyCollection.
The third subplot will make regular polygons, with the same
type of scaling and positioning as in the first two.
The last subplot illustrates the use of "offsets=(xo,yo)",
that is, a single tuple instead of a list of tuples, to generate
successively offset curves, with the offset given in data
units. This behavior is available only for the LineCollection.
'''
import matplotlib.pyplot as P
from matplotlib import collections, axes, transforms
from matplotlib.colors import colorConverter
import numpy as N
nverts = 50
npts = 100
# Make some spirals
r = N.array(range(nverts))
theta = N.array(range(nverts)) * (2*N.pi)/(nverts-1)
xx = r * N.sin(theta)
yy = r * N.cos(theta)
spiral = zip(xx,yy)
# Make some offsets
rs = N.random.RandomState([12345678])
xo = rs.randn(npts)
yo = rs.randn(npts)
xyo = zip(xo, yo)
# Make a list of colors cycling through the rgbcmyk series.
colors = [colorConverter.to_rgba(c) for c in ('r','g','b','c','y','m','k')]
fig = P.figure()
a = fig.add_subplot(2,2,1)
col = collections.LineCollection([spiral], offsets=xyo,
transOffset=a.transData)
trans = fig.dpi_scale_trans + transforms.Affine2D().scale(1.0/72.0)
col.set_transform(trans) # the points to pixels transform
# Note: the first argument to the collection initializer
# must be a list of sequences of x,y tuples; we have only
# one sequence, but we still have to put it in a list.
a.add_collection(col, autolim=True)
# autolim=True enables autoscaling. For collections with
# offsets like this, it is neither efficient nor accurate,
# but it is good enough to generate a plot that you can use
# as a starting point. If you know beforehand the range of
# x and y that you want to show, it is better to set them
# explicitly, leave out the autolim kwarg (or set it to False),
# and omit the 'a.autoscale_view()' call below.
# Make a transform for the line segments such that their size is
# given in points:
col.set_color(colors)
a.autoscale_view() # See comment above, after a.add_collection.
a.set_title('LineCollection using offsets')
# The same data as above, but fill the curves.
a = fig.add_subplot(2,2,2)
col = collections.PolyCollection([spiral], offsets=xyo,
transOffset=a.transData)
trans = transforms.Affine2D().scale(fig.dpi/72.0)
col.set_transform(trans) # the points to pixels transform
a.add_collection(col, autolim=True)
col.set_color(colors)
a.autoscale_view()
a.set_title('PolyCollection using offsets')
# 7-sided regular polygons
a = fig.add_subplot(2,2,3)
col = collections.RegularPolyCollection(7,
sizes = N.fabs(xx)*10.0, offsets=xyo,
transOffset=a.transData)
trans = transforms.Affine2D().scale(fig.dpi/72.0)
col.set_transform(trans) # the points to pixels transform
a.add_collection(col, autolim=True)
col.set_color(colors)
a.autoscale_view()
a.set_title('RegularPolyCollection using offsets')
# Simulate a series of ocean current profiles, successively
# offset by 0.1 m/s so that they form what is sometimes called
# a "waterfall" plot or a "stagger" plot.
a = fig.add_subplot(2,2,4)
nverts = 60
ncurves = 20
offs = (0.1, 0.0)
yy = N.linspace(0, 2*N.pi, nverts)
ym = N.amax(yy)
xx = (0.2 + (ym-yy)/ym)**2 * N.cos(yy-0.4) * 0.5
segs = []
for i in range(ncurves):
xxx = xx + 0.02*rs.randn(nverts)
curve = zip(xxx, yy*100)
segs.append(curve)
col = collections.LineCollection(segs, offsets=offs)
a.add_collection(col, autolim=True)
col.set_color(colors)
a.autoscale_view()
a.set_title('Successive data offsets')
a.set_xlabel('Zonal velocity component (m/s)')
a.set_ylabel('Depth (m)')
# Reverse the y-axis so depth increases downward
a.set_ylim(a.get_ylim()[::-1])
P.show()
|
gpl-2.0
|
ctogle/grapeipm_support
|
wu_locate.py
|
1
|
5419
|
#!/usr/bin/python2.7
import argparse,contextlib,io,sys,os,json,time,pdb
import matplotlib.pyplot as plt
import wu_gather
baseurl_lonlat = 'http://api.wunderground.com/api/%s/geolookup/q/%s,%s.json'
make_url_lonlat = lambda u,x,y : baseurl_lonlat % (u,x,y)
# added this function cause it looks like Wunderground defies convention and has lat first
# Converts params given as x,y to y,x
make_url_latlon = lambda u,x,y : baseurl_lonlat % (u,x,y)
baseurl_state = 'http://api.wunderground.com/api/%s/geolookup/q/%s.json'
make_url_state = lambda u,s : baseurl_state % (u,s)
locstring = lambda slocs : '|'.join([k+':'+','.join(slocs[k]) for k in slocs])
def plot_stationpoints(pts,lon,lat,bndf = 10.0):
print('Calling fn plot_stationpoints()')
'''Useful function for plotting the locations of stations around a lon,lat point.'''
print('... found %d nearby stations to lon,lat %s,%s ...' % (len(pts),lon,lat))
xs,ys = zip(*pts)
xmin,xmax = min(xs),max(xs)
ymin,ymax = min(ys),max(ys)
xrng,yrng = xmax-xmin,ymax-ymin
plt.xlim((min(xs)-xrng/bndf,max(xs)+xrng/bndf))
plt.ylim((min(ys)-yrng/bndf,max(ys)+yrng/bndf))
plt.plot([lon],[lat],marker = '*',color = 'red')
for p in pts[1:]:plt.plot([p[0]],[p[1]],marker = 'o',color = 'g')
plt.show()
@contextlib.contextmanager
def nostdout():
print('Calling fn nostdout()')
'''Context manager that supresses stdout.'''
save_stdout = sys.stdout
sys.stdout = io.BytesIO()
yield
sys.stdout = save_stdout
def query(url,outpath):
print('Calling fn query(%s, %s)' % (url, outpath))
'''Download json file from url, save at output, and return associated data.'''
if wu_gather.urlfetch(url,outpath):lastcall = time.time()
with open(outpath) as f:data = json.load(f)
return data
def lonlat(cache,apikey,lon,lat):
print('Calling fn lonlat()')
'''Fetch list of up to 50 station locations within 40 km of a longitude,latitude.
"The nearby Personal Weather Stations returned in the feed represent the closest
stations within a 40km radius, with a max number of stations returned of 50."'''
outpath = wu_gather.make_outpath(cache,'LonLat_'+str(lon),str(lat),'X')
url = make_url_latlon(apikey,lat,lon)
data = query(url,outpath)
pts = []
stationlocs = {}
nearby = data['location']['nearby_weather_stations']
for stationtype in nearby:
nearbystations = nearby[stationtype]['station']
for station in nearbystations:
# lon and lat are switched, as wunderground seems to be in err...
#pts.append((float(station['lon']),float(station['lat'])))
pts.append((float(station['lat']),float(station['lon'])))
if stationtype == 'airport':sloc = station['state'],station['city']
elif stationtype == 'pws':sloc = 'PWS',station['id']
else:
emsg = '... stationtype %s is not supported ...'
# this wont print on stdout with nostdout()...
raise ValueError(emsg % stationtype)
if sloc[0] in stationlocs:stationlocs[sloc[0]].append(sloc[1])
else:stationlocs[sloc[0]] = [sloc[1]]
#plot_stationpoints(pts,float(lon),float(lat))
return stationlocs
def state(cache,apikey,state):
print('Calling fn state()')
'''Fetch state wide list of station locations (one per city).'''
outpath = wu_gather.make_outpath(cache,state,'X','X')
url = make_url_state(apikey,state)
print('... Searching %s with key %s, final URL: %s' % (state,apikey,url))
data = query(url,outpath)
stationlocs = {}
print(data)
for r in data['response']['results']:
state,city = r['state'],r['city']
if state in stationlocs:stationlocs[state].append(city)
else:stationlocs[state] = [city]
return stationlocs
if __name__ == '__main__':
'''
This can be used with wu_gather.py:
./wu_gather.py [wu_gather.py OPTIONS] -l "`./wu_locate.py [wu_locate.py OPTIONS]`"
stdout is supressed aside from the resulting valid -l option for wu_gather.py.
NOTE: This generally includes errors consequent of invalid input to wu_locate.py ...
'''
parser = argparse.ArgumentParser(formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument('configfile',
help = 'specify a parsing configuration file')
parser.add_argument('--state',default = None,
help = 'specify a state')
parser.add_argument('--longitude',default = None,
help = 'specify a longitude coordinate')
parser.add_argument('--latitude',default = None,
help = 'specify a latitude coordinate')
parser.add_argument('-u','--apikey',
help = 'specify a user api key for weather underground')
parser.add_argument('-d','--cache',
help = 'specify a directory to store raw weather underground data')
cfg = wu_gather.parse_config(parser.parse_args())
if not os.path.exists(cfg.cache):os.mkdir(cfg.cache)
if cfg.state:
#with nostdout():
stationlocs = state(cfg.cache,cfg.apikey,cfg.state)
elif cfg.longitude and cfg.latitude:
#with nostdout():
stationlocs = lonlat(cfg.cache,cfg.apikey,cfg.longitude,cfg.latitude)
else:
emsg = '... need either --state option or --longitude and --latitude options ...'
raise ValueError(emsg)
print(locstring(stationlocs))
|
mit
|
liangz0707/scikit-learn
|
examples/mixture/plot_gmm_selection.py
|
248
|
3223
|
"""
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
|
bsd-3-clause
|
bkj/wit
|
wit/dep/forum-example.py
|
2
|
2418
|
import pandas as pd
import urllib2
from pprint import pprint
from matplotlib import pyplot as plt
import sys
sys.path.append('/Users/BenJohnson/projects/what-is-this/wit/')
from wit import *
# --
# Config + Init
num_features = 500 # Character
max_len = 250 # Character
formatter = KerasFormatter(num_features, max_len)
# --
# Load and format data
in_store = pd.HDFStore('/Users/BenJohnson/projects/what-is-this/qpr/gun_leaves_20151118_v2.h5',complevel=9, complib='bzip2')
source = in_store.keys()[1]
df = in_store[source]
in_store.close()
# Subset to frequent paths
chash = df.hash.value_counts()
keep = list(chash[chash > 100].index)
df = df[df.hash.apply(lambda x: x in keep)]
# --
# Training
pairwise_train = PairwiseData(df)
train = pairwise_train.make_strat(n_pos = 50, neg_prop = .001) # Downsampling negative examples, otherwise negative set is very large
# Format for keras training
trn, levs = formatter.format_symmetric(train, ['obj1', 'obj2'], 'match')
# Compile and train classifier
classifier = SiameseClassifier(trn, levs)
classifier.fit(nb_epoch = 15, batch_size = 128)
preds = classifier.predict(trn['x'])[:,1]
preds.shape = (preds.shape[0], )
assert(preds.shape[0] == (train.shape[0] * 2))
train['preds'] = preds[:train.shape[0]]
self_sims, sims = make_self_sims(train)
self_sims
# -- Application
# Predict on random pairs of entries
test = strat_pairs(df, n_nonmatch = 20, n_match = 20)
tst, levs = formatter.format_symmetric(test, ['obj1', 'obj2'], 'match')
preds = classifier.predict(tst['x'])[:,1]
preds.shape = (preds.shape[0], )
assert(preds.shape[0] == (test.shape[0] * 2))
test['preds'] = preds[:test.shape[0]]
# Examining results
self_sims, sims = make_self_sims(test)
# Column Equivalency classes
sims[sims.sim > .8]
equivs, uequivs = make_equiv(test, THRESH = .9)
eqv = uequivs.values()
print_eqv(eqv)
# --
import theano
tst['x'][1] = tst['x'][0]
preds = classifier.predict(tst['x'])
glo1 = theano.function([model.layers[0].layers[0].layers[0].input],
model.layers[0].layers[0].layers[2].get_output(train=False))
glo2 = theano.function([model.layers[0].layers[1].layers[0].input],
model.layers[0].layers[1].layers[2].get_output(train=False))
lo1 = glo1(tst['x'][0][0:10])
lo2 = glo2(tst['x'][0][0:10])
sum(lo1[0] * lo1[0])
|
apache-2.0
|
apache/spark
|
python/pyspark/pandas/data_type_ops/num_ops.py
|
6
|
21133
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from typing import cast, Any, Union
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex
from pyspark.pandas.base import column_op, IndexOpsMixin, numpy_column_op
from pyspark.pandas.data_type_ops.base import (
DataTypeOps,
is_valid_operand_for_numeric_arithmetic,
transform_boolean_operand_to_numeric,
_as_bool_type,
_as_categorical_type,
_as_other_type,
_as_string_type,
)
from pyspark.pandas.internal import InternalField
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import extension_dtypes, pandas_on_spark_type
from pyspark.sql import functions as F
from pyspark.sql.column import Column
from pyspark.sql.types import (
BooleanType,
StringType,
TimestampType,
)
class NumericOps(DataTypeOps):
"""The class for binary operations of numeric pandas-on-Spark objects."""
@property
def pretty_name(self) -> str:
return "numerics"
def add(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("string addition can only be applied to string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("addition can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return column_op(Column.__add__)(left, right)
def sub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("subtraction can not be applied to string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("subtraction can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return column_op(Column.__sub__)(left, right)
def mod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("modulo can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("modulo can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def mod(left: Column, right: Any) -> Column:
return ((left % right) + right) % right
return column_op(mod)(left, right)
def pow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("exponentiation can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("exponentiation can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def pow_func(left: Column, right: Any) -> Column:
return F.when(left == 1, left).otherwise(Column.__pow__(left, right))
return column_op(pow_func)(left, right)
def radd(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("string addition can only be applied to string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("addition can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right)
return column_op(Column.__radd__)(left, right)
def rsub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("subtraction can not be applied to string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("subtraction can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right)
return column_op(Column.__rsub__)(left, right)
def rmul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("multiplication can not be applied to a string literal.")
if not isinstance(right, numbers.Number):
raise TypeError("multiplication can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right)
return column_op(Column.__rmul__)(left, right)
def rpow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("exponentiation can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("exponentiation can not be applied to given types.")
def rpow_func(left: Column, right: Any) -> Column:
return F.when(SF.lit(right == 1), right).otherwise(Column.__rpow__(left, right))
right = transform_boolean_operand_to_numeric(right)
return column_op(rpow_func)(left, right)
def rmod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("modulo can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("modulo can not be applied to given types.")
def rmod(left: Column, right: Any) -> Column:
return ((right % left) + left) % left
right = transform_boolean_operand_to_numeric(right)
return column_op(rmod)(left, right)
# TODO(SPARK-36003): Implement unary operator `invert` as below
def invert(self, operand: IndexOpsLike) -> IndexOpsLike:
raise NotImplementedError("Unary ~ can not be applied to %s." % self.pretty_name)
def neg(self, operand: IndexOpsLike) -> IndexOpsLike:
from pyspark.pandas.base import column_op
return cast(IndexOpsLike, column_op(Column.__neg__)(operand))
def abs(self, operand: IndexOpsLike) -> IndexOpsLike:
from pyspark.pandas.base import column_op
return cast(IndexOpsLike, column_op(F.abs)(operand))
def lt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__lt__)(left, right)
def le(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__le__)(left, right)
def ge(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__ge__)(left, right)
def gt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__gt__)(left, right)
class IntegralOps(NumericOps):
"""
The class for binary operations of pandas-on-Spark objects with spark types:
LongType, IntegerType, ByteType and ShortType.
"""
@property
def pretty_name(self) -> str:
return "integrals"
def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("multiplication can not be applied to a string literal.")
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, TimestampType):
raise TypeError("multiplication can not be applied to date times.")
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType):
return column_op(SF.repeat)(right, left)
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("multiplication can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return column_op(Column.__mul__)(left, right)
def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("division can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def truediv(left: Column, right: Any) -> Column:
return F.when(
SF.lit(right != 0) | SF.lit(right).isNull(), left.__div__(right)
).otherwise(SF.lit(np.inf).__div__(left))
return numpy_column_op(truediv)(left, right)
def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("division can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def floordiv(left: Column, right: Any) -> Column:
return F.when(SF.lit(right is np.nan), np.nan).otherwise(
F.when(
SF.lit(right != 0) | SF.lit(right).isNull(), F.floor(left.__div__(right))
).otherwise(SF.lit(np.inf).__div__(left))
)
return numpy_column_op(floordiv)(left, right)
def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("division can not be applied to given types.")
def rtruediv(left: Column, right: Any) -> Column:
return F.when(left == 0, SF.lit(np.inf).__div__(right)).otherwise(
SF.lit(right).__truediv__(left)
)
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return numpy_column_op(rtruediv)(left, right)
def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("division can not be applied to given types.")
def rfloordiv(left: Column, right: Any) -> Column:
return F.when(SF.lit(left == 0), SF.lit(np.inf).__div__(right)).otherwise(
F.floor(SF.lit(right).__div__(left))
)
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return numpy_column_op(rfloordiv)(left, right)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
return _as_bool_type(index_ops, dtype)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype, null_str=str(np.nan))
else:
return _as_other_type(index_ops, dtype, spark_type)
class FractionalOps(NumericOps):
"""
The class for binary operations of pandas-on-Spark objects with spark types:
FloatType, DoubleType.
"""
@property
def pretty_name(self) -> str:
return "fractions"
def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("multiplication can not be applied to a string literal.")
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, TimestampType):
raise TypeError("multiplication can not be applied to date times.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("multiplication can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return column_op(Column.__mul__)(left, right)
def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("division can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def truediv(left: Column, right: Any) -> Column:
return F.when(
SF.lit(right != 0) | SF.lit(right).isNull(), left.__div__(right)
).otherwise(
F.when(SF.lit(left == np.inf) | SF.lit(left == -np.inf), left).otherwise(
SF.lit(np.inf).__div__(left)
)
)
return numpy_column_op(truediv)(left, right)
def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("division can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def floordiv(left: Column, right: Any) -> Column:
return F.when(SF.lit(right is np.nan), np.nan).otherwise(
F.when(
SF.lit(right != 0) | SF.lit(right).isNull(), F.floor(left.__div__(right))
).otherwise(
F.when(SF.lit(left == np.inf) | SF.lit(left == -np.inf), left).otherwise(
SF.lit(np.inf).__div__(left)
)
)
)
return numpy_column_op(floordiv)(left, right)
def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("division can not be applied to given types.")
def rtruediv(left: Column, right: Any) -> Column:
return F.when(left == 0, SF.lit(np.inf).__div__(right)).otherwise(
SF.lit(right).__truediv__(left)
)
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return numpy_column_op(rtruediv)(left, right)
def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("division can not be applied to given types.")
def rfloordiv(left: Column, right: Any) -> Column:
return F.when(SF.lit(left == 0), SF.lit(np.inf).__div__(right)).otherwise(
F.when(SF.lit(left) == np.nan, np.nan).otherwise(
F.floor(SF.lit(right).__div__(left))
)
)
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return numpy_column_op(rfloordiv)(left, right)
def isnull(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops._with_new_scol(
index_ops.spark.column.isNull() | F.isnan(index_ops.spark.column),
field=index_ops._internal.data_fields[0].copy(
dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False
),
)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
scol = F.when(
index_ops.spark.column.isNull() | F.isnan(index_ops.spark.column),
SF.lit(True),
).otherwise(index_ops.spark.column.cast(spark_type))
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=InternalField(dtype=dtype),
)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype, null_str=str(np.nan))
else:
return _as_other_type(index_ops, dtype, spark_type)
class DecimalOps(FractionalOps):
"""
The class for decimal operations of pandas-on-Spark objects with spark type:
DecimalType.
"""
@property
def pretty_name(self) -> str:
return "decimal"
def invert(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("Unary ~ can not be applied to %s." % self.pretty_name)
def lt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("< can not be applied to %s." % self.pretty_name)
def le(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("<= can not be applied to %s." % self.pretty_name)
def ge(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("> can not be applied to %s." % self.pretty_name)
def gt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError(">= can not be applied to %s." % self.pretty_name)
def isnull(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops._with_new_scol(
index_ops.spark.column.isNull(),
field=index_ops._internal.data_fields[0].copy(
dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False
),
)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
return _as_bool_type(index_ops, dtype)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype, null_str=str(np.nan))
else:
return _as_other_type(index_ops, dtype, spark_type)
class IntegralExtensionOps(IntegralOps):
"""
The class for binary operations of pandas-on-Spark objects with one of the
- spark types:
LongType, IntegerType, ByteType and ShortType
- dtypes:
Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
"""
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return col.astype(self.dtype)
class FractionalExtensionOps(FractionalOps):
"""
The class for binary operations of pandas-on-Spark objects with one of the
- spark types:
FloatType, DoubleType and DecimalType
- dtypes:
Float32Dtype, Float64Dtype
"""
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return col.astype(self.dtype)
|
apache-2.0
|
cjhak/b2share
|
invenio/legacy/webstat/engine.py
|
6
|
105916
|
# This file is part of Invenio.
# Copyright (C) 2007, 2008, 2010, 2011, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
__revision__ = "$Id$"
__lastupdated__ = "$Date$"
import calendar, commands, datetime, time, os, cPickle, random, cgi
from operator import itemgetter
from invenio.config import CFG_TMPDIR, \
CFG_SITE_URL, \
CFG_SITE_NAME, \
CFG_BINDIR, \
CFG_CERN_SITE, \
CFG_BIBCIRCULATION_ITEM_STATUS_CANCELLED, \
CFG_BIBCIRCULATION_ITEM_STATUS_CLAIMED, \
CFG_BIBCIRCULATION_ITEM_STATUS_IN_PROCESS, \
CFG_BIBCIRCULATION_ITEM_STATUS_NOT_ARRIVED, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_ORDER, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF, \
CFG_BIBCIRCULATION_ITEM_STATUS_OPTIONAL, \
CFG_BIBCIRCULATION_REQUEST_STATUS_DONE, \
CFG_BIBCIRCULATION_ILL_STATUS_CANCELLED
from invenio.modules.indexer.tokenizers.BibIndexJournalTokenizer import CFG_JOURNAL_TAG
from invenio.utils.url import redirect_to_url
from invenio.legacy.search_engine import perform_request_search, \
get_collection_reclist, \
get_most_popular_field_values, \
search_pattern
from invenio.legacy.bibrecord import get_fieldvalues
from invenio.legacy.dbquery import run_sql, \
wash_table_column_name
from invenio.legacy.websubmit.admin_dblayer import get_docid_docname_alldoctypes
from invenio.legacy.bibcirculation.utils import book_title_from_MARC, \
book_information_from_MARC
from invenio.legacy.bibcirculation.db_layer import get_id_bibrec, \
get_borrower_data
from invenio.legacy.websearch.webcoll import CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE
from invenio.utils.date import convert_datetext_to_datestruct, convert_datestruct_to_dategui
from invenio.legacy.bibsched.bibtask import get_modified_records_since
WEBSTAT_SESSION_LENGTH = 48 * 60 * 60 # seconds
WEBSTAT_GRAPH_TOKENS = '-=#+@$%&XOSKEHBC'
# KEY EVENT TREND SECTION
def get_keyevent_trend_collection_population(args, return_sql=False):
"""
Returns the quantity of documents in Invenio for
the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
if args.get('collection', 'All') == 'All':
sql_query_g = _get_sql_query("creation_date", args['granularity'],
"bibrec")
sql_query_i = "SELECT COUNT(id) FROM bibrec WHERE creation_date < %s"
initial_quantity = run_sql(sql_query_i, (lower, ))[0][0]
return _get_keyevent_trend(args, sql_query_g, initial_quantity=initial_quantity,
return_sql=return_sql, sql_text=
"Previous count: %s<br />Current count: %%s" % (sql_query_i),
acumulative=True)
else:
ids = get_collection_reclist(args['collection'])
if len(ids) == 0:
return []
g = get_keyevent_trend_new_records(args, return_sql, True)
sql_query_i = "SELECT id FROM bibrec WHERE creation_date < %s"
if return_sql:
return "Previous count: %s<br />Current count: %s" % (sql_query_i % lower, g)
initial_quantity = len(filter(lambda x: x[0] in ids, run_sql(sql_query_i, (lower, ))))
return _get_trend_from_actions(g, initial_quantity, args['t_start'],
args['t_end'], args['granularity'], args['t_format'], acumulative=True)
def get_keyevent_trend_new_records(args, return_sql=False, only_action=False):
"""
Returns the number of new records uploaded during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
if args.get('collection', 'All') == 'All':
return _get_keyevent_trend(args, _get_sql_query("creation_date", args['granularity'],
"bibrec"),
return_sql=return_sql)
else:
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
ids = get_collection_reclist(args['collection'])
if len(ids) == 0:
return []
sql = _get_sql_query("creation_date", args["granularity"], "bibrec",
extra_select=", id", group_by=False, count=False)
if return_sql:
return sql % (lower, upper)
recs = run_sql(sql, (lower, upper))
if recs:
def add_count(i_list, element):
""" Reduce function to create a dictionary with the count of ids
for each date """
if i_list and element == i_list[-1][0]:
i_list[-1][1] += 1
else:
i_list.append([element, 1])
return i_list
action_dates = reduce(add_count,
map(lambda x: x[0], filter(lambda x: x[1] in ids, recs)),
[])
else:
action_dates = []
if only_action:
return action_dates
return _get_trend_from_actions(action_dates, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_search_frequency(args, return_sql=False):
"""
Returns the number of searches (of any kind) carried out
during the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
return _get_keyevent_trend(args, _get_sql_query("date", args["granularity"],
"query INNER JOIN user_query ON id=id_query"),
return_sql=return_sql)
def get_keyevent_trend_comments_frequency(args, return_sql=False):
"""
Returns the number of comments (of any kind) carried out
during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
if args.get('collection', 'All') == 'All':
sql = _get_sql_query("date_creation", args["granularity"],
"cmtRECORDCOMMENT")
else:
sql = _get_sql_query("date_creation", args["granularity"],
"cmtRECORDCOMMENT", conditions=
_get_collection_recids_for_sql_query(args['collection']))
return _get_keyevent_trend(args, sql, return_sql=return_sql)
def get_keyevent_trend_search_type_distribution(args, return_sql=False):
"""
Returns the number of searches carried out during the given
timestamp range, but also partion them by type Simple and
Advanced.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# SQL to determine all simple searches:
simple = _get_sql_query("date", args["granularity"],
"query INNER JOIN user_query ON id=id_query",
conditions="urlargs LIKE '%%p=%%'")
# SQL to determine all advanced searches:
advanced = _get_sql_query("date", args["granularity"],
"query INNER JOIN user_query ON id=id_query",
conditions="urlargs LIKE '%%as=1%%'")
# Compute the trend for both types
s_trend = _get_keyevent_trend(args, simple,
return_sql=return_sql, sql_text="Simple: %s")
a_trend = _get_keyevent_trend(args, advanced,
return_sql=return_sql, sql_text="Advanced: %s")
# Assemble, according to return type
if return_sql:
return "%s <br /> %s" % (s_trend, a_trend)
return [(s_trend[i][0], (s_trend[i][1], a_trend[i][1]))
for i in range(len(s_trend))]
def get_keyevent_trend_download_frequency(args, return_sql=False):
"""
Returns the number of full text downloads carried out
during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# Collect list of timestamps of insertion in the specific collection
if args.get('collection', 'All') == 'All':
return _get_keyevent_trend(args, _get_sql_query("download_time",
args["granularity"], "rnkDOWNLOADS"), return_sql=return_sql)
else:
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
ids = get_collection_reclist(args['collection'])
if len(ids) == 0:
return []
sql = _get_sql_query("download_time", args["granularity"], "rnkDOWNLOADS",
extra_select=", GROUP_CONCAT(id_bibrec)")
if return_sql:
return sql % (lower, upper)
action_dates = []
for result in run_sql(sql, (lower, upper)):
count = result[1]
for id in result[2].split(","):
if id == '' or not int(id) in ids:
count -= 1
action_dates.append((result[0], count))
return _get_trend_from_actions(action_dates, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_number_of_loans(args, return_sql=False):
"""
Returns the number of loans carried out
during the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
return _get_keyevent_trend(args, _get_sql_query("loaned_on",
args["granularity"], "crcLOAN"), return_sql=return_sql)
def get_keyevent_trend_web_submissions(args, return_sql=False):
"""
Returns the quantity of websubmissions in Invenio for
the given timestamp range.
@param args['doctype']: A doctype name
@type args['doctype']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
if args['doctype'] == 'all':
sql = _get_sql_query("cd", args["granularity"], "sbmSUBMISSIONS",
conditions="action='SBI' AND status='finished'")
res = _get_keyevent_trend(args, sql, return_sql=return_sql)
else:
sql = _get_sql_query("cd", args["granularity"], "sbmSUBMISSIONS",
conditions="doctype=%s AND action='SBI' AND status='finished'")
res = _get_keyevent_trend(args, sql, extra_param=[args['doctype']],
return_sql=return_sql)
return res
def get_keyevent_loan_statistics(args, return_sql=False):
"""
Data:
- Number of documents (=records) loaned
- Number of items loaned on the total number of items
- Number of items never loaned on the total number of items
- Average time between the date of the record creation and the date of the first loan
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by item status (available, missing)
- by date of publication (MARC field 260__c)
- by date of the record creation in the database
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['item_status']: available, missing...
@type args['item_status']: str
@param args['publication_date']: MARC field 260__c
@type args['publication_date']: str
@param args['creation_date']: date of the record creation in the database
@type args['creation_date']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOAN l "
sql_where = "WHERE loaned_on > %s AND loaned_on < %s "
param = [lower, upper]
if 'udc' in args and args['udc'] != '':
sql_where += "AND l." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
if 'item_status' in args and args['item_status'] != '':
sql_from += ", crcITEM i "
sql_where += "AND l.barcode = i.barcode AND i.status = %s "
param.append(args['item_status'])
if 'publication_date' in args and args['publication_date'] != '':
sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib26x brb, bib26x b WHERE brb.id_bibxxx = b.id AND tag='260__c' \
AND value LIKE %s)"
param.append('%%%s%%' % args['publication_date'])
if 'creation_date' in args and args['creation_date'] != '':
sql_from += ", bibrec br "
sql_where += "AND br.id=l.id_bibrec AND br.creation_date LIKE %s "
param.append('%%%s%%' % args['creation_date'])
param = tuple(param)
# Number of loans:
loans_sql = "SELECT COUNT(DISTINCT l.id_bibrec) " + sql_from + sql_where
items_loaned_sql = "SELECT COUNT(DISTINCT l.barcode) " + sql_from + sql_where
# Only the CERN site wants the items of the collection "Books & Proceedings"
if CFG_CERN_SITE:
items_in_book_coll = _get_collection_recids_for_sql_query("Books & Proceedings")
if items_in_book_coll == "":
total_items_sql = 0
else:
total_items_sql = "SELECT COUNT(*) FROM crcITEM WHERE %s" % \
items_in_book_coll
else: # The rest take all the items
total_items_sql = "SELECT COUNT(*) FROM crcITEM"
# Average time between the date of the record creation and the date of the first loan
avg_sql = "SELECT AVG(DATEDIFF(loaned_on, br.creation_date)) " + sql_from
if not ('creation_date' in args and args['creation_date'] != ''):
avg_sql += ", bibrec br "
avg_sql += sql_where
if not ('creation_date' in args and args['creation_date'] != ''):
avg_sql += "AND br.id=l.id_bibrec "
if return_sql:
return "<ol><li>%s</li><li>Items loaned * 100 / Number of items <ul><li>\
Items loaned: %s </li><li>Number of items: %s</li></ul></li><li>100 - Items \
loaned on total number of items</li><li>%s</li></ol>" % \
(loans_sql % param, items_loaned_sql % param, total_items_sql, avg_sql % param)
loans = run_sql(loans_sql, param)[0][0]
items_loaned = run_sql(items_loaned_sql, param)[0][0]
if total_items_sql:
total_items = run_sql(total_items_sql)[0][0]
else:
total_items = 0
if total_items == 0:
loaned_on_total = 0
never_loaned_on_total = 0
else:
# Number of items loaned on the total number of items:
loaned_on_total = float(items_loaned) * 100 / float(total_items)
# Number of items never loaned on the total number of items:
never_loaned_on_total = 100L - loaned_on_total
avg = run_sql(avg_sql, param)[0][0]
if avg:
avg = float(avg)
else:
avg = 0L
return ((loans, ), (loaned_on_total, ), (never_loaned_on_total, ), (avg, ))
def get_keyevent_loan_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of documents (= records) never loaned
- List of most loaned documents (columns: number of loans,
number of copies and the creation date of the record, in
order to calculate the number of loans by copy), sorted
by decreasing order (50 items)
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by loan period (4 week loan, one week loan...)
- by a certain number of loans
- by date of publication (MARC field 260__c)
- by date of the record creation in the database
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['loan_period']: 4 week loan, one week loan...
@type args['loan_period']: str
@param args['min_loan']: minimum number of loans
@type args['min_loan']: int
@param args['max_loan']: maximum number of loans
@type args['max_loan']: int
@param args['publication_date']: MARC field 260__c
@type args['publication_date']: str
@param args['creation_date']: date of the record creation in the database
@type args['creation_date']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_where = []
param = []
sql_from = ""
if 'udc' in args and args['udc'] != '':
sql_where.append("i." + _check_udc_value_where())
param.append(_get_udc_truncated(args['udc']))
if 'loan_period' in args and args['loan_period'] != '':
sql_where.append("loan_period = %s")
param.append(args['loan_period'])
if 'publication_date' in args and args['publication_date'] != '':
sql_where.append("i.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib26x brb, bib26x b WHERE brb.id_bibxxx = b.id AND tag='260__c' \
AND value LIKE %s)")
param.append('%%%s%%' % args['publication_date'])
if 'creation_date' in args and args['creation_date'] != '':
sql_from += ", bibrec br"
sql_where.append("br.id=i.id_bibrec AND br.creation_date LIKE %s")
param.append('%%%s%%' % args['creation_date'])
if sql_where:
sql_where = "WHERE %s AND" % " AND ".join(sql_where)
else:
sql_where = "WHERE"
param = tuple(param + [lower, upper])
# SQL for both queries
check_num_loans = "HAVING "
if 'min_loans' in args and args['min_loans'] != '':
check_num_loans += "COUNT(*) >= %s" % args['min_loans']
if 'max_loans' in args and args['max_loans'] != '' and args['max_loans'] != 0:
if check_num_loans != "HAVING ":
check_num_loans += " AND "
check_num_loans += "COUNT(*) <= %s" % args['max_loans']
# Optimized to get all the data in only one query (not call get_fieldvalues several times)
mldocs_sql = "SELECT i.id_bibrec, COUNT(*) \
FROM crcLOAN l, crcITEM i%s %s l.barcode=i.barcode AND type = 'normal' AND \
loaned_on > %%s AND loaned_on < %%s GROUP BY i.id_bibrec %s" % \
(sql_from, sql_where, check_num_loans)
limit_n = ""
if limit > 0:
limit_n = "LIMIT %d" % limit
nldocs_sql = "SELECT id_bibrec, COUNT(*) FROM crcITEM i%s %s \
barcode NOT IN (SELECT id_bibrec FROM crcLOAN WHERE loaned_on > %%s AND \
loaned_on < %%s AND type = 'normal') GROUP BY id_bibrec ORDER BY COUNT(*) DESC %s" % \
(sql_from, sql_where, limit_n)
items_sql = "SELECT id_bibrec, COUNT(*) items FROM crcITEM GROUP BY id_bibrec"
creation_date_sql = "SELECT creation_date FROM bibrec WHERE id=%s"
authors_sql = "SELECT bx.value FROM bib10x bx, bibrec_bib10x bibx \
WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE '100__a' AND bibx.id_bibrec=%s"
title_sql = "SELECT GROUP_CONCAT(bx.value SEPARATOR ' ') value FROM bib24x bx, bibrec_bib24x bibx \
WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE %s AND bibx.id_bibrec=%s GROUP BY bibx.id_bibrec"
edition_sql = "SELECT bx.value FROM bib25x bx, bibrec_bib25x AS bibx \
WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE '250__a' AND bibx.id_bibrec=%s"
if return_sql:
return "Most loaned: %s<br \>Never loaned: %s" % \
(mldocs_sql % param, nldocs_sql % param)
mldocs = run_sql(mldocs_sql, param)
items = dict(run_sql(items_sql))
order_m = []
for mldoc in mldocs:
order_m.append([mldoc[0], mldoc[1], items[mldoc[0]], \
float(mldoc[1]) / float(items[mldoc[0]])])
order_m = sorted(order_m, key=itemgetter(3))
order_m.reverse()
# Check limit values
if limit > 0:
order_m = order_m[:limit]
res = [("", "Title", "Author", "Edition", "Number of loans",
"Number of copies", "Date of creation of the record")]
for mldoc in order_m:
res.append(("Most loaned documents",
_check_empty_value(run_sql(title_sql, ('245__%%', mldoc[0], ))),
_check_empty_value(run_sql(authors_sql, (mldoc[0], ))),
_check_empty_value(run_sql(edition_sql, (mldoc[0], ))),
mldoc[1], mldoc[2],
_check_empty_value(run_sql(creation_date_sql, (mldoc[0], )))))
nldocs = run_sql(nldocs_sql, param)
for nldoc in nldocs:
res.append(("Not loaned documents",
_check_empty_value(run_sql(title_sql, ('245__%%', nldoc[0], ))),
_check_empty_value(run_sql(authors_sql, (nldoc[0], ))),
_check_empty_value(run_sql(edition_sql, (nldoc[0], ))),
0, items[nldoc[0]],
_check_empty_value(run_sql(creation_date_sql, (nldoc[0], )))))
# nldocs = run_sql(nldocs_sql, param_n)
return (res)
def get_keyevent_renewals_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of most renewed items stored by decreasing order (50 items)
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by collection
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['collection']: collection of the record
@type args['collection']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOAN l, crcITEM i "
sql_where = "WHERE loaned_on > %s AND loaned_on < %s AND i.barcode = l.barcode "
param = [lower, upper]
if 'udc' in args and args['udc'] != '':
sql_where += "AND l." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
filter_coll = False
if 'collection' in args and args['collection'] != '':
filter_coll = True
recid_list = get_collection_reclist(args['collection'])
param = tuple(param)
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT i.id_bibrec, SUM(number_of_renewals) %s %s \
GROUP BY i.id_bibrec ORDER BY SUM(number_of_renewals) DESC %s" \
% (sql_from, sql_where, limit)
if return_sql:
return sql % param
# Results:
res = [("Title", "Author", "Edition", "Number of renewals")]
for rec, renewals in run_sql(sql, param):
if filter_coll and rec not in recid_list:
continue
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec), author, edition, int(renewals)))
return (res)
def get_keyevent_returns_table(args, return_sql=False):
"""
Data:
- Number of overdue returns in a timespan
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# Overdue returns:
sql = "SELECT COUNT(*) FROM crcLOAN l WHERE loaned_on > %s AND loaned_on < %s AND \
due_date < NOW() AND (returned_on IS NULL OR returned_on > due_date)"
if return_sql:
return sql % (lower, upper)
return ((run_sql(sql, (lower, upper))[0][0], ), )
def get_keyevent_trend_returns_percentage(args, return_sql=False):
"""
Returns the number of overdue returns and the total number of returns
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# SQL to determine overdue returns:
overdue = _get_sql_query("due_date", args["granularity"], "crcLOAN",
conditions="due_date < NOW() AND due_date IS NOT NULL \
AND (returned_on IS NULL OR returned_on > due_date)",
dates_range_param="loaned_on")
# SQL to determine all returns:
total = _get_sql_query("due_date", args["granularity"], "crcLOAN",
conditions="due_date < NOW() AND due_date IS NOT NULL",
dates_range_param="loaned_on")
# Compute the trend for both types
o_trend = _get_keyevent_trend(args, overdue,
return_sql=return_sql, sql_text="Overdue: %s")
t_trend = _get_keyevent_trend(args, total,
return_sql=return_sql, sql_text="Total: %s")
# Assemble, according to return type
if return_sql:
return "%s <br /> %s" % (o_trend, t_trend)
return [(o_trend[i][0], (o_trend[i][1], t_trend[i][1]))
for i in range(len(o_trend))]
def get_keyevent_ill_requests_statistics(args, return_sql=False):
"""
Data:
- Number of ILL requests
- Number of satisfied ILL requests 2 weeks after the date of request
creation on a timespan
- Average time between the date and the hour of the ill request
date and the date and the hour of the delivery item to the user
on a timespan
- Average time between the date and the hour the ILL request
was sent to the supplier and the date and hour of the
delivery item on a timespan
Filter by
- in a specified time span
- by type of document (book or article)
- by status of the request (= new, sent, etc.)
- by supplier
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of document (book or article)
@type args['doctype']: str
@param args['status']: status of the request (= new, sent, etc.)
@type args['status']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcILLREQUEST ill "
sql_where = "WHERE period_of_interest_from > %s AND period_of_interest_from < %s "
param = [lower, upper]
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s"
param.append(args['doctype'])
if 'status' in args and args['status'] != '':
sql_where += "AND ill.status = %s "
param.append(args['status'])
else:
sql_where += "AND ill.status != %s "
param.append(CFG_BIBCIRCULATION_ILL_STATUS_CANCELLED)
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
param = tuple(param)
requests_sql = "SELECT COUNT(*) %s %s" % (sql_from, sql_where)
satrequests_sql = "SELECT COUNT(*) %s %s \
AND arrival_date IS NOT NULL AND \
DATEDIFF(arrival_date, period_of_interest_from) < 14 " % (sql_from, sql_where)
avgdel_sql = "SELECT AVG(TIMESTAMPDIFF(DAY, period_of_interest_from, arrival_date)) %s %s \
AND arrival_date IS NOT NULL" % (sql_from, sql_where)
avgsup_sql = "SELECT AVG(TIMESTAMPDIFF(DAY, request_date, arrival_date)) %s %s \
AND arrival_date IS NOT NULL \
AND request_date IS NOT NULL" % (sql_from, sql_where)
if return_sql:
return "<ol><li>%s</li><li>%s</li><li>%s</li><li>%s</li></ol>" % \
(requests_sql % param, satrequests_sql % param,
avgdel_sql % param, avgsup_sql % param)
# Number of requests:
requests = run_sql(requests_sql, param)[0][0]
# Number of satisfied ILL requests 2 weeks after the date of request creation:
satrequests = run_sql(satrequests_sql, param)[0][0]
# Average time between the date and the hour of the ill request date and
# the date and the hour of the delivery item to the user
avgdel = run_sql(avgdel_sql, param)[0][0]
if avgdel:
avgdel = float(avgdel)
else:
avgdel = 0
# Average time between the date and the hour the ILL request was sent to
# the supplier and the date and hour of the delivery item
avgsup = run_sql(avgsup_sql, param)[0][0]
if avgsup:
avgsup = float(avgsup)
else:
avgsup = 0
return ((requests, ), (satrequests, ), (avgdel, ), (avgsup, ))
def get_keyevent_ill_requests_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of ILL requests
Filter by
- in a specified time span
- by type of request (article or book)
- by supplier
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of request (article or book)
@type args['doctype']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcILLREQUEST ill "
sql_where = "WHERE status != '%s' AND request_date > %%s AND request_date < %%s " \
% CFG_BIBCIRCULATION_ITEM_STATUS_CANCELLED
param = [lower, upper]
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s "
param.append(args['doctype'])
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
param = tuple(param)
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT ill.id, item_info %s %s %s" % (sql_from, sql_where, limit)
if return_sql:
return sql % param
# Results:
res = [("Id", "Title", "Author", "Edition")]
for req_id, item_info in run_sql(sql, param):
item_info = eval(item_info)
try:
res.append((req_id, item_info['title'], item_info['authors'], item_info['edition']))
except KeyError:
pass
return (res)
def get_keyevent_trend_satisfied_ill_requests_percentage(args, return_sql=False):
"""
Returns the number of satisfied ILL requests 2 weeks after the date of request
creation and the total number of ILL requests
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of document (book or article)
@type args['doctype']: str
@param args['status']: status of the request (= new, sent, etc.)
@type args['status']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
sql_from = "crcILLREQUEST ill "
sql_where = ""
param = []
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s"
param.append(args['doctype'])
if 'status' in args and args['status'] != '':
sql_where += "AND ill.status = %s "
param.append(args['status'])
else:
sql_where += "AND ill.status != %s "
param.append(CFG_BIBCIRCULATION_ILL_STATUS_CANCELLED)
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
# SQL to determine satisfied ILL requests:
satisfied = _get_sql_query("request_date", args["granularity"], sql_from,
conditions="ADDDATE(request_date, 14) < NOW() AND \
(arrival_date IS NULL OR arrival_date < ADDDATE(request_date, 14)) " + sql_where)
# SQL to determine all ILL requests:
total = _get_sql_query("request_date", args["granularity"], sql_from,
conditions="ADDDATE(request_date, 14) < NOW() "+ sql_where)
# Compute the trend for both types
s_trend = _get_keyevent_trend(args, satisfied, extra_param=param,
return_sql=return_sql, sql_text="Satisfied: %s")
t_trend = _get_keyevent_trend(args, total, extra_param=param,
return_sql=return_sql, sql_text="Total: %s")
# Assemble, according to return type
if return_sql:
return "%s <br /> %s" % (s_trend, t_trend)
return [(s_trend[i][0], (s_trend[i][1], t_trend[i][1]))
for i in range(len(s_trend))]
def get_keyevent_items_statistics(args, return_sql=False):
"""
Data:
- The total number of items
- Total number of new items added in last year
Filter by
- in a specified time span
- by collection
- by UDC (see MARC field 080__a - list to be submitted)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcITEM i "
sql_where = "WHERE "
param = []
if 'udc' in args and args['udc'] != '':
sql_where += "i." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
# Number of items:
if sql_where == "WHERE ":
sql_where = ""
items_sql = "SELECT COUNT(i.id_bibrec) %s %s" % (sql_from, sql_where)
# Number of new items:
if sql_where == "":
sql_where = "WHERE creation_date > %s AND creation_date < %s "
else:
sql_where += " AND creation_date > %s AND creation_date < %s "
new_items_sql = "SELECT COUNT(i.id_bibrec) %s %s" % (sql_from, sql_where)
if return_sql:
return "Total: %s <br />New: %s" % (items_sql % tuple(param), new_items_sql % tuple(param + [lower, upper]))
return ((run_sql(items_sql, tuple(param))[0][0], ), (run_sql(new_items_sql, tuple(param + [lower, upper]))[0][0], ))
def get_keyevent_items_lists(args, return_sql=False, limit=50):
"""
Lists:
- The list of items
Filter by
- by library (=physical location of the item)
- by status (=on loan, available, requested, missing...)
@param args['library']: physical location of the item
@type args[library'']: str
@param args['status']: on loan, available, requested, missing...
@type args['status']: str
"""
sql_from = "FROM crcITEM i "
sql_where = "WHERE "
param = []
if 'library' in args and args['library'] != '':
sql_from += ", crcLIBRARY li "
sql_where += "li.id=i.id_crcLIBRARY AND li.name=%s "
param.append(args['library'])
if 'status' in args and args['status'] != '':
if sql_where != "WHERE ":
sql_where += "AND "
sql_where += "i.status = %s "
param.append(args['status'])
param = tuple(param)
# Results:
res = [("Title", "Author", "Edition", "Barcode", "Publication date")]
if sql_where == "WHERE ":
sql_where = ""
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT i.barcode, i.id_bibrec %s %s %s" % (sql_from, sql_where, limit)
if len(param) == 0:
sqlres = run_sql(sql)
else:
sqlres = run_sql(sql, tuple(param))
sql = sql % param
if return_sql:
return sql
for barcode, rec in sqlres:
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec),
author, edition, barcode,
book_information_from_MARC(int(rec))[1]))
return (res)
def get_keyevent_loan_request_statistics(args, return_sql=False):
"""
Data:
- Number of hold requests, one week after the date of request creation
- Number of successful hold requests transactions
- Average time between the hold request date and the date of delivery document in a year
Filter by
- in a specified time span
- by item status (available, missing)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['item_status']: available, missing...
@type args['item_status']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOANREQUEST lr "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'item_status' in args and args['item_status'] != '':
sql_from += ", crcITEM i "
sql_where += "AND lr.barcode = i.barcode AND i.status = %s "
param.append(args['item_status'])
param = tuple(param)
custom_table = get_customevent_table("loanrequest")
# Number of hold requests, one week after the date of request creation:
holds = "SELECT COUNT(*) %s, %s ws %s AND ws.request_id=lr.id AND \
DATEDIFF(ws.creation_time, lr.request_date) >= 7" % (sql_from, custom_table, sql_where)
# Number of successful hold requests transactions
succesful_holds = "SELECT COUNT(*) %s %s AND lr.status='%s'" % (sql_from, sql_where,
CFG_BIBCIRCULATION_REQUEST_STATUS_DONE)
# Average time between the hold request date and the date of delivery document in a year
avg_sql = "SELECT AVG(DATEDIFF(ws.creation_time, lr.request_date)) \
%s, %s ws %s AND ws.request_id=lr.id" % (sql_from, custom_table, sql_where)
if return_sql:
return "<ol><li>%s</li><li>%s</li><li>%s</li></ol>" % \
(holds % param, succesful_holds % param, avg_sql % param)
avg = run_sql(avg_sql, param)[0][0]
if avg is int:
avg = int(avg)
else:
avg = 0
return ((run_sql(holds, param)[0][0], ),
(run_sql(succesful_holds, param)[0][0], ), (avg, ))
def get_keyevent_loan_request_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of the most requested items
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOANREQUEST lr "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'udc' in args and args['udc'] != '':
sql_where += "AND lr." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT lr.barcode %s %s GROUP BY barcode \
ORDER BY COUNT(*) DESC %s" % (sql_from, sql_where, limit)
if return_sql:
return sql
res = [("Title", "Author", "Edition", "Barcode")]
# Most requested items:
for barcode in run_sql(sql, param):
rec = get_id_bibrec(barcode[0])
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec), author, edition, barcode[0]))
return (res)
def get_keyevent_user_statistics(args, return_sql=False):
"""
Data:
- Total number of active users (to be defined = at least one transaction in the past year)
Filter by
- in a specified time span
- by registration date
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from_ill = "FROM crcILLREQUEST ill "
sql_from_loan = "FROM crcLOAN l "
sql_where_ill = "WHERE request_date > %s AND request_date < %s "
sql_where_loan = "WHERE loaned_on > %s AND loaned_on < %s "
param = (lower, upper, lower, upper)
# Total number of active users:
users = "SELECT COUNT(DISTINCT user) FROM ((SELECT id_crcBORROWER user %s %s) \
UNION (SELECT id_crcBORROWER user %s %s)) res" % \
(sql_from_ill, sql_where_ill, sql_from_loan, sql_where_loan)
if return_sql:
return users % param
return ((run_sql(users, param)[0][0], ), )
def get_keyevent_user_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of most intensive users (ILL requests + Loan)
Filter by
- in a specified time span
- by registration date
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
param = (lower, upper, lower, upper)
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT user, SUM(trans) FROM \
((SELECT id_crcBORROWER user, COUNT(*) trans FROM crcILLREQUEST ill \
WHERE request_date > %%s AND request_date < %%s GROUP BY id_crcBORROWER) UNION \
(SELECT id_crcBORROWER user, COUNT(*) trans FROM crcLOAN l WHERE loaned_on > %%s AND \
loaned_on < %%s GROUP BY id_crcBORROWER)) res GROUP BY user ORDER BY SUM(trans) DESC \
%s" % (limit)
if return_sql:
return sql % param
res = [("Name", "Address", "Mailbox", "E-mail", "Number of transactions")]
# List of most intensive users (ILL requests + Loan):
for borrower_id, trans in run_sql(sql, param):
name, address, mailbox, email = get_borrower_data(borrower_id)
res.append((name, address, mailbox, email, int(trans)))
return (res)
# KEY EVENT SNAPSHOT SECTION
def get_keyevent_snapshot_uptime_cmd():
"""
A specific implementation of get_current_event().
@return: The std-out from the UNIX command 'uptime'.
@type: str
"""
return _run_cmd('uptime').strip().replace(' ', ' ')
def get_keyevent_snapshot_apache_processes():
"""
A specific implementation of get_current_event().
@return: The std-out from the UNIX command 'uptime'.
@type: str
"""
# The number of Apache processes (root+children)
return _run_cmd('ps -e | grep apache2 | grep -v grep | wc -l')
def get_keyevent_snapshot_bibsched_status():
"""
A specific implementation of get_current_event().
@return: Information about the number of tasks in the different status modes.
@type: [(str, int)]
"""
sql = "SELECT status, COUNT(status) FROM schTASK GROUP BY status"
return [(x[0], int(x[1])) for x in run_sql(sql)]
def get_keyevent_snapshot_sessions():
"""
A specific implementation of get_current_event().
@return: The current number of website visitors (guests, logged in)
@type: (int, int)
"""
# SQL to retrieve sessions in the Guests
sql = "SELECT COUNT(session_expiry) " + \
"FROM session INNER JOIN user ON uid=id " + \
"WHERE email = '' AND " + \
"session_expiry-%d < unix_timestamp() AND " \
% WEBSTAT_SESSION_LENGTH + \
"unix_timestamp() < session_expiry"
guests = run_sql(sql)[0][0]
# SQL to retrieve sessions in the Logged in users
sql = "SELECT COUNT(session_expiry) " + \
"FROM session INNER JOIN user ON uid=id " + \
"WHERE email <> '' AND " + \
"session_expiry-%d < unix_timestamp() AND " \
% WEBSTAT_SESSION_LENGTH + \
"unix_timestamp() < session_expiry"
logged_ins = run_sql(sql)[0][0]
# Assemble, according to return type
return (guests, logged_ins)
def get_keyevent_bibcirculation_report(freq='yearly'):
"""
Monthly and yearly report with the total number of circulation
transactions (loans, renewals, returns, ILL requests, hold request).
@param freq: yearly or monthly
@type freq: str
@return: loans, renewals, returns, ILL requests, hold request
@type: (int, int, int, int, int)
"""
if freq == 'monthly':
datefrom = datetime.date.today().strftime("%Y-%m-01 00:00:00")
else: #yearly
datefrom = datetime.date.today().strftime("%Y-01-01 00:00:00")
loans, renewals = run_sql("SELECT COUNT(*), \
SUM(number_of_renewals) \
FROM crcLOAN WHERE loaned_on > %s", (datefrom, ))[0]
returns = run_sql("SELECT COUNT(*) FROM crcLOAN \
WHERE returned_on!='0000-00-00 00:00:00' and loaned_on > %s", (datefrom, ))[0][0]
illrequests = run_sql("SELECT COUNT(*) FROM crcILLREQUEST WHERE request_date > %s",
(datefrom, ))[0][0]
holdrequest = run_sql("SELECT COUNT(*) FROM crcLOANREQUEST WHERE request_date > %s",
(datefrom, ))[0][0]
return (loans, renewals, returns, illrequests, holdrequest)
def get_last_updates():
"""
List date/time when the last updates where done (easy reading format).
@return: last indexing, last ranking, last sorting, last webcolling
@type: (datetime, datetime, datetime, datetime)
"""
try:
last_index = convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(run_sql('SELECT last_updated FROM idxINDEX WHERE \
name="global"')[0][0])))
last_rank = convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(run_sql('SELECT last_updated FROM rnkMETHOD ORDER BY \
last_updated DESC LIMIT 1')[0][0])))
last_sort = convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(run_sql('SELECT last_updated FROM bsrMETHODDATA ORDER BY \
last_updated DESC LIMIT 1')[0][0])))
file_coll_last_update = open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, 'r')
last_coll = convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(file_coll_last_update.read())))
file_coll_last_update.close()
# database not filled
except IndexError:
return ("", "", "", "")
return (last_index, last_rank, last_sort, last_coll)
def get_list_link(process, category=None):
"""
Builds the link for the list of records not indexed, ranked, sorted or
collected.
@param process: kind of process the records are waiting for (index, rank,
sort, collect)
@type process: str
@param category: specific sub-category of the process.
Index: global, collection, abstract, author, keyword,
reference, reportnumber, title, fulltext, year,
journal, collaboration, affiliation, exactauthor,
caption, firstauthor, exactfirstauthor, authorcount)
Rank: wrd, demo_jif, citation, citerank_citation_t,
citerank_pagerank_c, citerank_pagerank_t
Sort: latest first, title, author, report number,
most cited
Collect: Empty / None
@type category: str
@return: link text
@type: string
"""
if process == "index":
list_registers = run_sql('SELECT id FROM bibrec WHERE \
modification_date > (SELECT last_updated FROM \
idxINDEX WHERE name=%s)', (category,))
elif process == "rank":
list_registers = run_sql('SELECT id FROM bibrec WHERE \
modification_date > (SELECT last_updated FROM \
rnkMETHOD WHERE name=%s)', (category,))
elif process == "sort":
list_registers = run_sql('SELECT id FROM bibrec WHERE \
modification_date > (SELECT last_updated FROM \
bsrMETHODDATA WHERE id_bsrMETHOD=(SELECT id \
FROM bsrMETHOD WHERE name=%s))', (category,))
elif process == "collect":
file_coll_last_update = open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, 'r')
coll_last_update = file_coll_last_update.read()
file_coll_last_update.close()
list_registers = zip(get_modified_records_since(coll_last_update).tolist())
# build the link
if len(list_registers) == 0:
return "Up to date"
link = '<a href="' + CFG_SITE_URL + '/search?p='
for register in list_registers:
link += 'recid%3A' + str(register[0]) + '+or+'
# delete the last '+or+'
link = link[:len(link)-4]
link += '">' + str(len(list_registers)) + '</a>'
return link
def get_search_link(record_id):
"""
Auxiliar, builds the direct link for a given record.
@param record_id: record's id number
@type record_id: int
@return: link text
@type: string
"""
link = '<a href="' + CFG_SITE_URL + '/record/' + \
str(record_id) + '">Record [' + str(record_id) + ']</a>'
return link
def get_ingestion_matching_records(request=None, limit=25):
"""
Fetches all the records matching a given pattern, arranges them by last
modificaton date and returns a list.
@param request: requested pattern to match
@type request: str
@return: list of records matching a pattern,
(0,) if no request,
(-1,) if the request was invalid
@type: list
"""
if request==None or request=="":
return (0,)
try:
records = list(search_pattern(p=request))
except:
return (-1,)
if records == []:
return records
# order by most recent modification date
query = 'SELECT id FROM bibrec WHERE '
for r in records:
query += 'id="' + str(r) + '" OR '
query = query[:len(query)-4]
query += ' ORDER BY modification_date DESC LIMIT %s'
list_records = run_sql(query, (limit,))
final_list = []
for lr in list_records:
final_list.append(lr[0])
return final_list
def get_record_ingestion_status(record_id):
"""
Returns the amount of ingestion methods not updated yet to a given record.
If 0, the record is up to date.
@param record_id: record id number
@type record_id: int
@return: number of methods not updated for the record
@type: int
"""
counter = 0
counter += run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT last_updated FROM \
idxINDEX WHERE name="global")', (record_id, ))[0][0]
counter += run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT last_updated FROM \
rnkMETHOD ORDER BY last_updated DESC LIMIT 1)', \
(record_id, ))[0][0]
counter = run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT last_updated FROM \
bsrMETHODDATA ORDER BY last_updated DESC LIMIT 1)', \
(record_id, ))[0][0]
file_coll_last_update = open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, 'r')
last_coll = file_coll_last_update.read()
file_coll_last_update.close()
counter += run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND \
modification_date >\
%s', (record_id, last_coll,))[0][0]
return counter
def get_specific_ingestion_status(record_id, process, method=None):
"""
Returns whether a record is or not up to date for a given
process and method.
@param record_id: identification number of the record
@type record_id: int
@param process: kind of process the records may be waiting for (index,
rank, sort, collect)
@type process: str
@param method: specific sub-method of the process.
Index: global, collection, abstract, author, keyword,
reference, reportnumber, title, fulltext, year,
journal, collaboration, affiliation, exactauthor,
caption, firstauthor, exactfirstauthor, authorcount
Rank: wrd, demo_jif, citation, citerank_citation_t,
citerank_pagerank_c, citerank_pagerank_t
Sort: latest first, title, author, report number,
most cited
Collect: Empty / None
@type category: str
@return: text: None if the record is up to date
Last time the method was updated if it is waiting
@type: date/time string
"""
exist = run_sql('SELECT COUNT(*) FROM bibrec WHERE id=%s', (record_id, ))
if exist[0][0] == 0:
return "REG not in DB"
if process == "index":
list_registers = run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT \
last_updated FROM idxINDEX WHERE name=%s)',
(record_id, method,))
last_time = run_sql ('SELECT last_updated FROM idxINDEX WHERE \
name=%s', (method,))[0][0]
elif process == "rank":
list_registers = run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT \
last_updated FROM rnkMETHOD WHERE name=%s)',
(record_id, method,))
last_time = run_sql ('SELECT last_updated FROM rnkMETHOD WHERE \
name=%s', (method,))[0][0]
elif process == "sort":
list_registers = run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT \
last_updated FROM bsrMETHODDATA WHERE \
id_bsrMETHOD=(SELECT id FROM bsrMETHOD \
WHERE name=%s))', (record_id, method,))
last_time = run_sql ('SELECT last_updated FROM bsrMETHODDATA WHERE \
id_bsrMETHOD=(SELECT id FROM bsrMETHOD \
WHERE name=%s)', (method,))[0][0]
elif process == "collect":
file_coll_last_update = open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, 'r')
last_time = file_coll_last_update.read()
file_coll_last_update.close()
list_registers = run_sql('SELECT COUNT(*) FROM bibrec WHERE id=%s \
AND modification_date > %s',
(record_id, last_time,))
# no results means the register is up to date
if list_registers[0][0] == 0:
return None
else:
return convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(last_time)))
def get_title_ingestion(record_id, last_modification):
"""
Auxiliar, builds a direct link for a given record, with its last
modification date.
@param record_id: id number of the record
@type record_id: string
@param last_modification: date/time of the last modification
@type last_modification: string
@return: link text
@type: string
"""
return '<h3><a href="%s/record/%s">Record [%s] last modification: %s</a></h3>' \
% (CFG_SITE_URL, record_id, record_id, last_modification)
def get_record_last_modification (record_id):
"""
Returns the date/time of the last modification made to a given record.
@param record_id: id number of the record
@type record_id: int
@return: date/time of the last modification
@type: string
"""
return convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(run_sql('SELECT modification_date FROM bibrec \
WHERE id=%s', (record_id,))[0][0])))
def get_general_status():
"""
Returns an aproximate amount of ingestions processes not aplied to new or
updated records, using the "global" category.
@return: number of processes not updated
@type: int
"""
return run_sql('SELECT COUNT(*) FROM bibrec WHERE \
modification_date > (SELECT last_updated FROM \
idxINDEX WHERE name="global")')[0][0]
# ERROR LOG STATS
def update_error_log_analyzer():
"""Creates splitted files for today's errors"""
_run_cmd('bash %s/webstat -e -is' % CFG_BINDIR)
def get_invenio_error_log_ranking():
""" Returns the ranking of the errors in the invenio log"""
return _run_cmd('bash %s/webstat -e -ir' % CFG_BINDIR)
def get_invenio_last_n_errors(nerr):
"""Returns the last nerr errors in the invenio log (without details)"""
return _run_cmd('bash %s/webstat -e -il %d' % (CFG_BINDIR, nerr))
def get_invenio_error_details(error):
"""Returns the complete text of the invenio error."""
out = _run_cmd('bash %s/webstat -e -id %s' % (CFG_BINDIR, error))
return out
def get_apache_error_log_ranking():
""" Returns the ranking of the errors in the apache log"""
return _run_cmd('bash %s/webstat -e -ar' % CFG_BINDIR)
# CUSTOM EVENT SECTION
def get_customevent_trend(args):
"""
Returns trend data for a custom event over a given
timestamp range.
@param args['event_id']: The event id
@type args['event_id']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
@param args['cols']: Columns and it's content that will be include
if don't exist or it's empty it will include all cols
@type args['cols']: [ [ str, str ], ]
"""
# Get a MySQL friendly date
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
tbl_name = get_customevent_table(args['event_id'])
col_names = get_customevent_args(args['event_id'])
where = []
sql_param = [lower, upper]
for col_bool, col_title, col_content in args['cols']:
if not col_title in col_names:
continue
if col_content:
if col_bool == "" or not where:
where.append(wash_table_column_name(col_title))
elif col_bool == "and":
where.append("AND %s"
% wash_table_column_name(col_title))
elif col_bool == "or":
where.append("OR %s"
% wash_table_column_name(col_title))
elif col_bool == "and_not":
where.append("AND NOT %s"
% wash_table_column_name(col_title))
else:
continue
where.append(" LIKE %s")
sql_param.append("%" + col_content + "%")
sql = _get_sql_query("creation_time", args['granularity'], tbl_name, " ".join(where))
return _get_trend_from_actions(run_sql(sql, tuple(sql_param)), 0,
args['t_start'], args['t_end'],
args['granularity'], args['t_format'])
def get_customevent_dump(args):
"""
Similar to a get_event_trend implemention, but NO refining aka frequency
handling is carried out what so ever. This is just a dump. A dump!
@param args['event_id']: The event id
@type args['event_id']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
@param args['cols']: Columns and it's content that will be include
if don't exist or it's empty it will include all cols
@type args['cols']: [ [ str, str ], ]
"""
# Get a MySQL friendly date
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# Get customevents
# events_list = [(creation_time, event, [arg1, arg2, ...]), ...]
event_list = []
event_cols = {}
for event_id, i in [(args['ids'][i], str(i))
for i in range(len(args['ids']))]:
# Get all the event arguments and creation times
tbl_name = get_customevent_table(event_id)
col_names = get_customevent_args(event_id)
sql_query = ["SELECT * FROM %s WHERE creation_time > '%%s'" % wash_table_column_name(tbl_name), (lower,)] # kwalitee: disable=sql
sql_query.append("AND creation_time < '%s'" % upper)
sql_param = []
for col_bool, col_title, col_content in args['cols' + i]:
if not col_title in col_names:
continue
if col_content:
if col_bool == "and" or col_bool == "":
sql_query.append("AND %s" % \
wash_table_column_name(col_title))
elif col_bool == "or":
sql_query.append("OR %s" % \
wash_table_column_name(col_title))
elif col_bool == "and_not":
sql_query.append("AND NOT %s" % \
wash_table_column_name(col_title))
else:
continue
sql_query.append(" LIKE %s")
sql_param.append("%" + col_content + "%")
sql_query.append("ORDER BY creation_time DESC")
sql = ' '.join(sql_query)
res = run_sql(sql, tuple(sql_param))
for row in res:
event_list.append((row[1], event_id, row[2:]))
# Get the event col names
try:
event_cols[event_id] = cPickle.loads(run_sql(
"SELECT cols FROM staEVENT WHERE id = %s",
(event_id, ))[0][0])
except TypeError:
event_cols[event_id] = ["Unnamed"]
event_list.sort()
output = []
for row in event_list:
temp = [row[1], row[0].strftime('%Y-%m-%d %H:%M:%S')]
arguments = ["%s: %s" % (event_cols[row[1]][i],
row[2][i]) for i in range(len(row[2]))]
temp.extend(arguments)
output.append(tuple(temp))
return output
def get_customevent_table(event_id):
"""
Helper function that for a certain event id retrives the corresponding
event table name.
"""
res = run_sql(
"SELECT CONCAT('staEVENT', number) FROM staEVENT WHERE id = %s", (event_id, ))
try:
return res[0][0]
except IndexError:
# No such event table
return None
def get_customevent_args(event_id):
"""
Helper function that for a certain event id retrives the corresponding
event argument (column) names.
"""
res = run_sql("SELECT cols FROM staEVENT WHERE id = %s", (event_id, ))
try:
if res[0][0]:
return cPickle.loads(res[0][0])
else:
return []
except IndexError:
# No such event table
return None
# CUSTOM SUMMARY SECTION
def get_custom_summary_data(query, tag):
"""Returns the annual report data for the specified year
@param query: Search query to make customized report
@type query: str
@param tag: MARC tag for the output
@type tag: str
"""
# Check arguments
if tag == '':
tag = CFG_JOURNAL_TAG.replace("%", "p")
# First get records of the year
recids = perform_request_search(p=query, of="id", wl=0)
# Then return list by tag
pub = get_most_popular_field_values(recids, tag)
if len(pub) == 0:
return []
if CFG_CERN_SITE:
total = sum([x[1] for x in pub])
else:
others = 0
total = 0
first_other = -1
for elem in pub:
total += elem[1]
if elem[1] < 2:
if first_other == -1:
first_other = pub.index(elem)
others += elem[1]
del pub[first_other:]
if others != 0:
pub.append(('Others', others))
pub.append(('TOTAL', total))
return pub
def create_custom_summary_graph(data, path, title):
"""
Creates a pie chart with the information from the custom summary and
saves it in the file specified by the path argument
"""
# If no input, we don't bother about anything
if len(data) == 0:
return False
os.environ['HOME'] = CFG_TMPDIR
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except:
from invenio.errorlib import register_exception
register_exception()
return False
# make a square figure and axes
matplotlib.rcParams['font.size'] = 8
labels = [x[0] for x in data]
numb_elem = len(labels)
width = 6 + float(numb_elem) / 7
gfile = plt.figure(1, figsize=(width, 6))
plt.axes([0.1, 0.1, 4.2 / width, 0.7])
numb = [x[1] for x in data]
total = sum(numb)
fracs = [x * 100 / total for x in numb]
colors = []
random.seed()
for i in range(numb_elem):
col = 0.5 + float(i) / (float(numb_elem) * 2.0)
rand = random.random() / 2.0
if i % 3 == 0:
red = col
green = col + rand
blue = col - rand
if green > 1.0:
green = 1
elif i % 3 == 1:
red = col - rand
green = col
blue = col + rand
if blue > 1.0:
blue = 1
elif i % 3 == 2:
red = col + rand
green = col - rand
blue = col
if red > 1.0:
red = 1
colors.append((red, green, blue))
patches = plt.pie(fracs, colors=tuple(colors), labels=labels,
autopct='%1i%%', pctdistance=0.8, shadow=True)[0]
ttext = plt.title(title)
plt.setp(ttext, size='xx-large', color='b', family='monospace', weight='extra bold')
legend_keywords = {"prop": {"size": "small"}}
plt.figlegend(patches, labels, 'lower right', **legend_keywords)
plt.savefig(path)
plt.close(gfile)
return True
# GRAPHER
def create_graph_trend(trend, path, settings):
"""
Creates a graph representation out of data produced from get_event_trend.
@param trend: The trend data
@type trend: [(str, str|int|(str|int,...))]
@param path: Where to store the graph
@type path: str
@param settings: Dictionary of graph parameters
@type settings: dict
"""
# If no input, we don't bother about anything
if not trend or len(trend) == 0:
return
# If no filename is given, we'll assume STD-out format and ASCII.
if path == '':
settings["format"] = 'asciiart'
if settings["format"] == 'asciiart':
create_graph_trend_ascii_art(trend, path, settings)
elif settings["format"] == 'gnuplot':
create_graph_trend_gnu_plot(trend, path, settings)
elif settings["format"] == "flot":
create_graph_trend_flot(trend, path, settings)
def create_graph_trend_ascii_art(trend, path, settings):
"""Creates the graph trend using ASCII art"""
out = ""
if settings["multiple"] is not None:
# Tokens that will represent the different data sets (maximum 16 sets)
# Set index (=100) to the biggest of the histogram sums
index = max([sum(x[1]) for x in trend])
# Print legend box
out += "Legend: %s\n\n" % ", ".join(["%s (%s)" % x
for x in zip(settings["multiple"], WEBSTAT_GRAPH_TOKENS)])
else:
index = max([x[1] for x in trend])
width = 82
# Figure out the max length of the xtics, in order to left align
xtic_max_len = max([len(_to_datetime(x[0]).strftime(
settings["xtic_format"])) for x in trend])
for row in trend:
# Print the xtic
xtic = _to_datetime(row[0]).strftime(settings["xtic_format"])
out_row = xtic + ': ' + ' ' * (xtic_max_len - len(xtic)) + '|'
try:
col_width = (1.0 * width / index)
except ZeroDivisionError:
col_width = 0
if settings["multiple"] is not None:
# The second value of the row-tuple, represents the n values from
# the n data sets. Each set, will be represented by a different
# ASCII character, chosen from the randomized string
# 'WEBSTAT_GRAPH_TOKENS'.
# NOTE: Only up to 16 (len(WEBSTAT_GRAPH_TOKENS)) data
# sets are supported.
total = sum(row[1])
for i in range(len(row[1])):
col = row[1][i]
try:
out_row += WEBSTAT_GRAPH_TOKENS[i] * int(1.0 * col * col_width)
except ZeroDivisionError:
break
if len([i for i in row[1] if type(i) is int and i > 0]) - 1 > 0:
out_row += out_row[-1]
else:
total = row[1]
try:
out_row += '-' * int(1.0 * total * col_width)
except ZeroDivisionError:
break
# Print sentinel, and the total
out += out_row + '>' + ' ' * (xtic_max_len + 4 +
width - len(out_row)) + str(total) + '\n'
# Write to destination file
if path == '':
print(out)
else:
open(path, 'w').write(out)
def create_graph_trend_gnu_plot(trend, path, settings):
"""Creates the graph trend using the GNU plot library"""
try:
import Gnuplot
except ImportError:
return
gnup = Gnuplot.Gnuplot()
gnup('set style data steps')
if 'size' in settings:
gnup('set terminal png tiny size %s' % settings['size'])
else:
gnup('set terminal png tiny')
gnup('set output "%s"' % path)
if settings["title"] != '':
gnup.title(settings["title"].replace("\"", ""))
if settings["xlabel"] != '':
gnup.xlabel(settings["xlabel"])
if settings["ylabel"] != '':
gnup.ylabel(settings["ylabel"])
if settings["xtic_format"] != '':
xtics = 'set xtics ('
xtics += ', '.join(['"%s" %d' %
(_to_datetime(trend[i][0], '%Y-%m-%d \
%H:%M:%S').strftime(settings["xtic_format"]), i)
for i in range(len(trend))]) + ')'
gnup(xtics)
gnup('set format y "%.0f"')
# If we have multiple data sets, we need to do
# some magic to make Gnuplot eat it,
# This is basically a matrix transposition,
# and the addition of index numbers.
if settings["multiple"] is not None:
cols = len(trend[0][1])
rows = len(trend)
plot_items = []
y_max = 0
y_min = 0
for col in range(cols):
data = []
for row in range(rows):
data.append([row, trend[row][1][col]])
data.append([rows, trend[-1][1][col]])
plot_items.append(Gnuplot.PlotItems
.Data(data, title=settings["multiple"][col]))
tmp_max = max([x[col] for x in data])
tmp_min = min([x[col] for x in data])
if tmp_max > y_max:
y_max = tmp_max
if tmp_min < y_min:
y_min = tmp_min
if y_max - y_min < 5 and y_min != 0:
gnup('set ytic %d, 1, %d' % (y_min - 1, y_max + 2))
elif y_max < 5:
gnup('set ytic 1')
gnup.plot(*plot_items)
else:
data = [x[1] for x in trend]
data.append(trend[-1][1])
y_max = max(data)
y_min = min(data)
if y_max - y_min < 5 and y_min != 0:
gnup('set ytic %d, 1, %d' % (y_min - 1, y_max + 2))
elif y_max < 5:
gnup('set ytic 1')
gnup.plot(data)
def create_graph_trend_flot(trend, path, settings):
"""Creates the graph trend using the flot library"""
size = settings.get("size", "500,400").split(",")
title = cgi.escape(settings["title"].replace(" ", "")[:10])
out = """<!--[if IE]><script language="javascript" type="text/javascript"
src="%(site)s/vendors/flot/excanvas.min.js"></script><![endif]-->
<script language="javascript" type="text/javascript" src="%(site)s/vendors/flot/jquery.flot.js"></script>
<script language="javascript" type="text/javascript" src="%(site)s/vendors/flot/jquery.flot.selection.js"></script>
<script id="source" language="javascript" type="text/javascript">
document.write('<div style="float:left"><div id="placeholder%(title)s" style="width:%(width)spx;height:%(height)spx"></div></div>'+
'<div id="miniature%(title)s" style="float:left;margin-left:20px;margin-top:50px">' +
'<div id="overview%(title)s" style="width:%(hwidth)dpx;height:%(hheigth)dpx"></div>' +
'<p id="overviewLegend%(title)s" style="margin-left:10px"></p>' +
'</div>');
$(function () {
function parseDate%(title)s(sdate){
var div1 = sdate.split(' ');
var day = div1[0].split('-');
var hour = div1[1].split(':');
return new Date(day[0], day[1]-1, day[2], hour[0], hour[1], hour[2]).getTime() - (new Date().getTimezoneOffset() * 60 * 1000) ;
}
function getData%(title)s() {""" % \
{'site': CFG_SITE_URL, 'width': size[0], 'height': size[1], 'hwidth': int(size[0]) / 2,
'hheigth': int(size[1]) / 2, 'title': title}
if(len(trend) > 1):
granularity_td = (_to_datetime(trend[1][0], '%Y-%m-%d %H:%M:%S') -
_to_datetime(trend[0][0], '%Y-%m-%d %H:%M:%S'))
else:
granularity_td = datetime.timedelta()
# Create variables with the format dn = [[x1,y1], [x2,y2]]
minx = trend[0][0]
maxx = trend[0][0]
if settings["multiple"] is not None:
cols = len(trend[0][1])
rows = len(trend)
first = 0
for col in range(cols):
out += """var d%d = [""" % (col)
for row in range(rows):
if(first == 0):
first = 1
else:
out += ", "
if trend[row][0] < minx:
minx = trend[row][0]
if trend[row][0] > maxx:
maxx = trend[row][0]
out += '[parseDate%s("%s"),%d]' % \
(title, _to_datetime(trend[row][0], '%Y-%m-%d \
%H:%M:%S'), trend[row][1][col])
out += ", [parseDate%s('%s'), %d]];\n" % (title,
_to_datetime(maxx, '%Y-%m-%d %H:%M:%S')+ granularity_td,
trend[-1][1][col])
out += "return [\n"
first = 0
for col in range(cols):
if first == 0:
first = 1
else:
out += ", "
out += '{data : d%d, label : "%s"}' % \
(col, settings["multiple"][col])
out += "];\n}\n"
else:
out += """var d1 = ["""
rows = len(trend)
first = 0
for row in range(rows):
if trend[row][0] < minx:
minx = trend[row][0]
if trend[row][0] > maxx:
maxx = trend[row][0]
if first == 0:
first = 1
else:
out += ', '
out += '[parseDate%s("%s"),%d]' % \
(title, _to_datetime(trend[row][0], '%Y-%m-%d %H:%M:%S'),
trend[row][1])
out += """, [parseDate%s("%s"), %d]];
return [d1];
}
""" % (title, _to_datetime(maxx, '%Y-%m-%d %H:%M:%S') +
granularity_td, trend[-1][1])
# Set options
tics = """yaxis: {
tickDecimals : 0
},"""
if settings["xtic_format"] != '':
current = _to_datetime(maxx, '%Y-%m-%d %H:%M:%S')
next = current + granularity_td
if (granularity_td.seconds + granularity_td.days * 24 * 3600) > 2592000:
next = current.replace(day=31)
tics += 'xaxis: { mode:"time",min:parseDate%s("%s"),max:parseDate%s("%s")},'\
% (title, _to_datetime(minx, '%Y-%m-%d %H:%M:%S'), title, next)
out += """var options%s ={
series: {
lines: { steps: true, fill: true},
points: { show: false }
},
legend: {show: false},
%s
grid: { hoverable: true, clickable: true },
selection: { mode: "xy" }
};
""" % (title, tics, )
# Write the plot method in javascript
out += """var startData%(title)s = getData%(title)s();
var plot%(title)s = $.plot($("#placeholder%(title)s"), startData%(title)s, options%(title)s);
// setup overview
var overview%(title)s = $.plot($("#overview%(title)s"), startData%(title)s, {
legend: { show: true, container: $("#overviewLegend%(title)s") },
series: {
lines: { steps: true, fill: true, lineWidth: 1},
shadowSize: 0
},
%(tics)s
grid: { color: "#999" },
selection: { mode: "xy" }
});
""" % {"title": title, "tics": tics}
# Tooltip and zoom
out += """
function showTooltip%(title)s(x, y, contents) {
$('<div id="tooltip%(title)s">' + contents + '</div>').css( {
position: 'absolute',
display: 'none',
top: y - 5,
left: x + 10,
border: '1px solid #fdd',
padding: '2px',
'background-color': '#fee',
opacity: 0.80
}).appendTo("body").fadeIn(200);
}
var previousPoint%(title)s = null;
$("#placeholder%(title)s").bind("plothover", function (event, pos, item) {
if (item) {
if (previousPoint%(title)s != item.datapoint) {
previousPoint%(title)s = item.datapoint;
$("#tooltip%(title)s").remove();
var y = item.datapoint[1];
showTooltip%(title)s(item.pageX, item.pageY, y);
}
}
else {
$("#tooltip%(title)s").remove();
previousPoint%(title)s = null;
}
});
$("#placeholder%(title)s").bind("plotclick", function (event, pos, item) {
if (item) {
plot%(title)s.highlight(item.series, item.datapoint);
}
});
// now connect the two
$("#placeholder%(title)s").bind("plotselected", function (event, ranges) {
// clamp the zooming to prevent eternal zoom
if (ranges.xaxis.to - ranges.xaxis.from < 0.00001){
ranges.xaxis.to = ranges.xaxis.from + 0.00001;}
if (ranges.yaxis.to - ranges.yaxis.from < 0.00001){
ranges.yaxis.to = ranges.yaxis.from + 0.00001;}
// do the zooming
plot%(title)s = $.plot($("#placeholder%(title)s"), getData%(title)s(ranges.xaxis.from, ranges.xaxis.to),
$.extend(true, {}, options%(title)s, {
xaxis: { min: ranges.xaxis.from, max: ranges.xaxis.to },
yaxis: { min: ranges.yaxis.from, max: ranges.yaxis.to }
}));
// don't fire event on the overview to prevent eternal loop
overview%(title)s.setSelection(ranges, true);
});
$("#overview%(title)s").bind("plotselected", function (event, ranges) {
plot%(title)s.setSelection(ranges);
});
});
</script>
<noscript>Your browser does not support JavaScript!
Please, select another output format</noscript>""" % {'title' : title}
open(path, 'w').write(out)
def get_numeric_stats(data, multiple):
""" Returns average, max and min values for data """
data = [x[1] for x in data]
if data == []:
return (0, 0, 0)
if multiple:
lists = []
for i in range(len(data[0])):
lists.append([x[i] for x in data])
return ([float(sum(x)) / len(x) for x in lists], [max(x) for x in lists],
[min(x) for x in lists])
else:
return (float(sum(data)) / len(data), max(data), min(data))
def create_graph_table(data, path, settings):
"""
Creates a html table representation out of data.
@param data: The data
@type data: (str,...)
@param path: Where to store the graph
@type path: str
@param settings: Dictionary of table parameters
@type settings: dict
"""
out = """<table border="1">
"""
if settings['rows'] == []:
for row in data:
out += """<tr>
"""
for value in row:
out += """<td>%s</td>
""" % value
out += "</tr>"
else:
for dta, value in zip(settings['rows'], data):
out += """<tr>
<td>%s</td>
<td>
""" % dta
for vrow in value:
out += """%s<br />
""" % vrow
out = out[:-6] + "</td></tr>"
out += "</table>"
open(path, 'w').write(out)
def create_graph_dump(dump, path):
"""
Creates a graph representation out of data produced from get_event_trend.
@param dump: The dump data
@type dump: [(str|int,...)]
@param path: Where to store the graph
@type path: str
"""
out = ""
if len(dump) == 0:
out += "No actions for this custom event " + \
"are registered in the given time range."
else:
# Make every row in dump equally long, insert None if appropriate.
max_len = max([len(x) for x in dump])
events = [tuple(list(x) + [None] * (max_len - len(x))) for x in dump]
cols = ["Event", "Date and time"] + ["Argument %d" % i
for i in range(max_len - 2)]
column_widths = [max([len(str(x[i])) \
for x in events + [cols]]) + 3 for i in range(len(events[0]))]
for i in range(len(cols)):
out += cols[i] + ' ' * (column_widths[i] - len(cols[i]))
out += "\n"
for i in range(len(cols)):
out += '=' * (len(cols[i])) + ' ' * (column_widths[i] - len(cols[i]))
out += "\n\n"
for action in dump:
for i in range(len(action)):
if action[i] is None:
temp = ''
else:
temp = action[i]
out += str(temp) + ' ' * (column_widths[i] - len(str(temp)))
out += "\n"
# Write to destination file
if path == '':
print(out)
else:
open(path, 'w').write(out)
# EXPORT DATA TO SLS
def get_search_frequency(day=datetime.datetime.now().date()):
"""Returns the number of searches performed in the chosen day"""
searches = get_keyevent_trend_search_type_distribution(get_args(day))
return sum(searches[0][1])
def get_total_records(day=datetime.datetime.now().date()):
"""Returns the total number of records which existed in the chosen day"""
tomorrow = (datetime.datetime.now() +
datetime.timedelta(days=1)).strftime("%Y-%m-%d")
args = {'collection': CFG_SITE_NAME, 't_start': day.strftime("%Y-%m-%d"),
't_end': tomorrow, 'granularity': "day", 't_format': "%Y-%m-%d"}
try:
return get_keyevent_trend_collection_population(args)[0][1]
except IndexError:
return 0
def get_new_records(day=datetime.datetime.now().date()):
"""Returns the number of new records submitted in the chosen day"""
args = {'collection': CFG_SITE_NAME,
't_start': (day - datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
't_end': day.strftime("%Y-%m-%d"), 'granularity': "day",
't_format': "%Y-%m-%d"}
try:
return (get_total_records(day) -
get_keyevent_trend_collection_population(args)[0][1])
except IndexError:
return 0
def get_download_frequency(day=datetime.datetime.now().date()):
"""Returns the number of downloads during the chosen day"""
return get_keyevent_trend_download_frequency(get_args(day))[0][1]
def get_comments_frequency(day=datetime.datetime.now().date()):
"""Returns the number of comments during the chosen day"""
return get_keyevent_trend_comments_frequency(get_args(day))[0][1]
def get_loans_frequency(day=datetime.datetime.now().date()):
"""Returns the number of comments during the chosen day"""
return get_keyevent_trend_number_of_loans(get_args(day))[0][1]
def get_web_submissions(day=datetime.datetime.now().date()):
"""Returns the number of web submissions during the chosen day"""
args = get_args(day)
args['doctype'] = 'all'
return get_keyevent_trend_web_submissions(args)[0][1]
def get_alerts(day=datetime.datetime.now().date()):
"""Returns the number of alerts during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'alerts'
return get_customevent_trend(args)[0][1]
def get_journal_views(day=datetime.datetime.now().date()):
"""Returns the number of journal displays during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'journals'
return get_customevent_trend(args)[0][1]
def get_basket_views(day=datetime.datetime.now().date()):
"""Returns the number of basket displays during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'baskets'
return get_customevent_trend(args)[0][1]
def get_args(day):
"""Returns the most common arguments for the exporting to SLS methods"""
return {'t_start': day.strftime("%Y-%m-%d"),
't_end': (day + datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
'granularity': "day", 't_format': "%Y-%m-%d"}
# EXPORTER
def export_to_python(data, req):
"""
Exports the data to Python code.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
_export("text/x-python", str(data), req)
def export_to_csv(data, req):
"""
Exports the data to CSV.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
csv_list = [""""%s",%s""" % (x[0], ",".join([str(y) for y in \
((type(x[1]) is tuple) and x[1] or (x[1], ))])) for x in data]
_export('text/csv', '\n'.join(csv_list), req)
def export_to_file(data, req):
"""
Exports the data to a file.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
try:
import xlwt
book = xlwt.Workbook(encoding="utf-8")
sheet1 = book.add_sheet('Sheet 1')
for row in range(0, len(data)):
for col in range(0, len(data[row])):
sheet1.write(row, col, "%s" % data[row][col])
filename = CFG_TMPDIR + "/webstat_export_" + \
str(time.time()).replace('.', '') + '.xls'
book.save(filename)
redirect_to_url(req, '%s/stats/export?filename=%s&mime=%s' \
% (CFG_SITE_URL, os.path.basename(filename), 'application/vnd.ms-excel'))
except ImportError:
csv_list = []
for row in data:
row = ['"%s"' % str(col) for col in row]
csv_list.append(",".join(row))
_export('text/csv', '\n'.join(csv_list), req)
# INTERNAL
def _export(mime, content, req):
"""
Helper function to pass on the export call. Create a
temporary file in which the content is stored, then let
redirect to the export web interface.
"""
filename = CFG_TMPDIR + "/webstat_export_" + \
str(time.time()).replace('.', '')
open(filename, 'w').write(content)
redirect_to_url(req, '%s/stats/export?filename=%s&mime=%s' \
% (CFG_SITE_URL, os.path.basename(filename), mime))
def _get_trend_from_actions(action_dates, initial_value,
t_start, t_end, granularity, dt_format, acumulative=False):
"""
Given a list of dates reflecting some sort of action/event, and some additional parameters,
an internal data format is returned. 'initial_value' set to zero, means that the frequency
will not be accumulative, but rather non-causal.
@param action_dates: A list of dates, indicating some sort of action/event.
@type action_dates: [datetime.datetime]
@param initial_value: The numerical offset the first action's value should make use of.
@type initial_value: int
@param t_start: Start time for the time domain in dt_format
@type t_start: str
@param t_end: End time for the time domain in dt_format
@type t_end: str
@param granularity: The granularity of the time domain, span between values.
Possible values are [year,month,day,hour,minute,second].
@type granularity: str
@param dt_format: Format of the 't_start' and 't_stop' parameters
@type dt_format: str
@return: A list of tuples zipping a time-domain and a value-domain
@type: [(str, int)]
"""
# Append the maximum date as a sentinel indicating we're done
action_dates = list(action_dates)
# Construct the datetime tuple for the stop time
stop_at = _to_datetime(t_end, dt_format) - datetime.timedelta(seconds=1)
vector = [(None, initial_value)]
try:
upcoming_action = action_dates.pop()
#Do not count null values (when year, month or day is 0)
if granularity in ("year", "month", "day") and upcoming_action[0] == 0:
upcoming_action = action_dates.pop()
except IndexError:
upcoming_action = (datetime.datetime.max, 0)
# Create an iterator running from the first day of activity
for current in _get_datetime_iter(t_start, granularity, dt_format):
# Counter of action_dates in the current span, set the initial value to
# zero to avoid accumlation.
if acumulative:
actions_here = vector[-1][1]
else:
actions_here = 0
# Check to see if there's an action date in the current span
if upcoming_action[0] == {"year": current.year,
"month": current.month,
"day": current.day,
"hour": current.hour,
"minute": current.minute,
"second": current.second
}[granularity]:
actions_here += upcoming_action[1]
try:
upcoming_action = action_dates.pop()
except IndexError:
upcoming_action = (datetime.datetime.max, 0)
vector.append((current.strftime('%Y-%m-%d %H:%M:%S'), actions_here))
# Make sure to stop the iteration at the end time
if {"year": current.year >= stop_at.year,
"month": current.month >= stop_at.month and current.year == stop_at.year,
"day": current.day >= stop_at.day and current.month == stop_at.month,
"hour": current.hour >= stop_at.hour and current.day == stop_at.day,
"minute": current.minute >= stop_at.minute and current.hour == stop_at.hour,
"second": current.second >= stop_at.second and current.minute == stop_at.minute
}[granularity]:
break
# Remove the first bogus tuple, and return
return vector[1:]
def _get_keyevent_trend(args, sql, initial_quantity=0, extra_param=[],
return_sql=False, sql_text='%s', acumulative=False):
"""
Returns the trend for the sql passed in the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
param = tuple([lower, upper] + extra_param)
if return_sql:
sql = sql % param
return sql_text % sql
return _get_trend_from_actions(run_sql(sql, param), initial_quantity, args['t_start'],
args['t_end'], args['granularity'], args['t_format'], acumulative)
def _get_datetime_iter(t_start, granularity='day',
dt_format='%Y-%m-%d %H:%M:%S'):
"""
Returns an iterator over datetime elements starting at an arbitrary time,
with granularity of a [year,month,day,hour,minute,second].
@param t_start: An arbitrary starting time in format %Y-%m-%d %H:%M:%S
@type t_start: str
@param granularity: The span between iterable elements, default is 'days'.
Possible values are [year,month,day,hour,minute,second].
@type granularity: str
@param dt_format: Format of the 't_start' parameter
@type dt_format: str
@return: An iterator of points in time
@type: iterator over datetime elements
"""
tim = _to_datetime(t_start, dt_format)
# Make a time increment depending on the granularity and the current time
# (the length of years and months vary over time)
span = ""
while True:
yield tim
if granularity == "year":
span = (calendar.isleap(tim.year) and ["days=366"] or ["days=365"])[0]
elif granularity == "month":
span = "days=" + str(calendar.monthrange(tim.year, tim.month)[1])
elif granularity == "day":
span = "days=1"
elif granularity == "hour":
span = "hours=1"
elif granularity == "minute":
span = "minutes=1"
elif granularity == "second":
span = "seconds=1"
else:
# Default just in case
span = "days=1"
tim += eval("datetime.timedelta(" + span + ")")
def _to_datetime(dttime, dt_format='%Y-%m-%d %H:%M:%S'):
"""
Transforms a string into a datetime
"""
return datetime.datetime(*time.strptime(dttime, dt_format)[:6])
def _run_cmd(command):
"""
Runs a certain command and returns the string output. If the command is
not found a string saying so will be returned. Use with caution!
@param command: The UNIX command to execute.
@type command: str
@return: The std-out from the command.
@type: str
"""
return commands.getoutput(command)
def _get_doctypes():
"""Returns all the possible doctypes of a new submission"""
doctypes = [("all", "All")]
for doctype in get_docid_docname_alldoctypes():
doctypes.append(doctype)
return doctypes
def _get_item_statuses():
"""Returns all the possible status of an item"""
return [(CFG_BIBCIRCULATION_ITEM_STATUS_CANCELLED, "Cancelled"),
(CFG_BIBCIRCULATION_ITEM_STATUS_CLAIMED, "Claimed"),
(CFG_BIBCIRCULATION_ITEM_STATUS_IN_PROCESS, "In process"),
(CFG_BIBCIRCULATION_ITEM_STATUS_NOT_ARRIVED, "Not arrived"),
(CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, "On loan"),
(CFG_BIBCIRCULATION_ITEM_STATUS_ON_ORDER, "On order"),
(CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF, "On shelf")] + \
[(status, status) for status in CFG_BIBCIRCULATION_ITEM_STATUS_OPTIONAL]
def _get_item_doctype():
"""Returns all the possible types of document for an item"""
dts = []
for dat in run_sql("""SELECT DISTINCT(request_type)
FROM crcILLREQUEST ORDER BY request_type ASC"""):
dts.append((dat[0], dat[0]))
return dts
def _get_request_statuses():
"""Returns all the possible statuses for an ILL request"""
dts = []
for dat in run_sql("SELECT DISTINCT(status) FROM crcILLREQUEST ORDER BY status ASC"):
dts.append((dat[0], dat[0]))
return dts
def _get_libraries():
"""Returns all the possible libraries"""
dts = []
for dat in run_sql("SELECT name FROM crcLIBRARY ORDER BY name ASC"):
if not CFG_CERN_SITE or not "CERN" in dat[0]: # do not add internal libraries for CERN site
dts.append((dat[0], dat[0]))
return dts
def _get_loan_periods():
"""Returns all the possible loan periods for an item"""
dts = []
for dat in run_sql("SELECT DISTINCT(loan_period) FROM crcITEM ORDER BY loan_period ASC"):
dts.append((dat[0], dat[0]))
return dts
def _get_tag_name(tag):
"""
For a specific MARC tag, it returns the human-readable name
"""
res = run_sql("SELECT name FROM tag WHERE value LIKE %s", ('%' + tag + '%',))
if res:
return res[0][0]
res = run_sql("SELECT name FROM tag WHERE value LIKE %s", ('%' + tag[:-1] + '%',))
if res:
return res[0][0]
return ''
def _get_collection_recids_for_sql_query(coll):
ids = get_collection_reclist(coll).tolist()
if len(ids) == 0:
return ""
return "id_bibrec IN %s" % str(ids).replace('[', '(').replace(']', ')')
def _check_udc_value_where():
return "id_bibrec IN (SELECT brb.id_bibrec \
FROM bibrec_bib08x brb, bib08x b WHERE brb.id_bibxxx = b.id AND tag='080__a' \
AND value LIKE %s) "
def _get_udc_truncated(udc):
if udc[-1] == '*':
return "%s%%" % udc[:-1]
if udc[0] == '*':
return "%%%s" % udc[1:]
return "%s" % udc
def _check_empty_value(value):
if len(value) == 0:
return ""
else:
return value[0][0]
def _get_granularity_sql_functions(granularity):
try:
return {
"year": ("YEAR",),
"month": ("YEAR", "MONTH",),
"day": ("MONTH", "DAY",),
"hour": ("DAY", "HOUR",),
"minute": ("HOUR", "MINUTE",),
"second": ("MINUTE", "SECOND")
}[granularity]
except KeyError:
return ("MONTH", "DAY",)
def _get_sql_query(creation_time_name, granularity, tables_from, conditions="",
extra_select="", dates_range_param="", group_by=True, count=True):
if len(dates_range_param) == 0:
dates_range_param = creation_time_name
conditions = "%s > %%s AND %s < %%s %s" % (dates_range_param, dates_range_param,
len(conditions) > 0 and "AND %s" % conditions or "")
values = {'creation_time_name': creation_time_name,
'granularity_sql_function': _get_granularity_sql_functions(granularity)[-1],
'count': count and ", COUNT(*)" or "",
'tables_from': tables_from,
'conditions': conditions,
'extra_select': extra_select,
'group_by': ""}
if group_by:
values['group_by'] = "GROUP BY "
for fun in _get_granularity_sql_functions(granularity):
values['group_by'] += "%s(%s), " % (fun, creation_time_name)
values['group_by'] = values['group_by'][:-2]
return "SELECT %(granularity_sql_function)s(%(creation_time_name)s) %(count)s %(extra_select)s \
FROM %(tables_from)s WHERE %(conditions)s \
%(group_by)s \
ORDER BY %(creation_time_name)s DESC" % values
|
gpl-2.0
|
ltiao/scikit-learn
|
examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py
|
218
|
3893
|
"""
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
|
bsd-3-clause
|
RomainBrault/scikit-learn
|
sklearn/__check_build/__init__.py
|
345
|
1671
|
""" Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
|
bsd-3-clause
|
gchandru1/name-validation
|
validate_name.py
|
1
|
2272
|
import nltk
import itertools
import pandas
import sys
import unidecode
def is_valid_name(full_name):
#parsing individual names from words and ignoring tokens which are just ','
words = nltk.word_tokenize(full_name)
if ',' in words :
words.remove(',')
#Single letter word can only occur once.
one_letter_words = 0;
for word in words:
if (len(word) < 2 and word != ','):
one_letter_words = one_letter_words + 1
if (one_letter_words > 1 and len(words) == 2) :
#print "Name " + full_name + " cannot be just 2 initials or blanks"
return False
#checking for alphabets and white space
for word in words:
for ch in word:
if not ch == ',' :
if (not ((ord(ch) >= 65 and ord(ch) <= 90) or (ord(ch) >= 97 and ord(ch) <= 122) or ord(ch) == 32 or ord(ch) == 45 or ord(ch) == 46 or ord(ch) == 39)):
#print "Found random character " + ch + " in " + full_name
return False
#if a word has a character repeated more than 2 times, it's not a valid name
corrected_word = ''.join(''.join(s)[:3] for _, s in itertools.groupby(word))
if corrected_word != word :
#print "Word " + word + " in name " + full_name + " corrected as " + corrected_word
return False
return True
path = 'https://raw.githubusercontent.com/gchandru1/name-validation/master/Datasets/'
df1 = pandas.read_csv(path + 'CSV_Database_of_First_Names.csv')
for name in df1[df1.columns[0]] :
name_ascii = unidecode.unidecode(name)
if isinstance(name_ascii, str) :
is_valid_name(name_ascii)
df2 = pandas.read_csv(path + 'CSV_Database_of_Last_Names.csv')
for name in df2[df2.columns[0]] :
name_ascii = unidecode.unidecode(name)
if isinstance(name_ascii, str) :
is_valid_name(name_ascii)
df3 = pandas.read_csv(path + 'chicago_employees.csv')
for name in df3[df3.columns[0]] :
name_ascii = unidecode.unidecode(name)
if isinstance(name_ascii, str) :
is_valid_name(name_ascii)
df4 = pandas.read_csv(path + 'fifa_players_2012.csv')
for name in df4[df4.columns[1]] :
name_ascii = unidecode.unidecode(name)
if isinstance(name_ascii, str) :
is_valid_name(name_ascii)
df5 = pandas.read_csv(path + 'olympicathletes.csv')
for name in df5[df5.columns[0]] :
name_ascii = unidecode.unidecode(name)
if isinstance(name_ascii, str) :
is_valid_name(name_ascii)
|
gpl-2.0
|
mhri/etri_perceptions
|
sensory_perception/perception_vision/perception_face/src/ColorDetector.py
|
1
|
2420
|
#!/usr/bin/env python
#-*- encoding: utf8 -*-
'''
Cloth Color Detector
Author: Minsu Jang ([email protected])
'''
import rospy
import cv2
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from sklearn.externals import joblib
from nolearn.dbn import DBN
class ColorDetector:
def __init__(self, input_shape, num_classes, model_file=None):
self.color_index = {}
self.color_index[0] = 'black'
self.color_index[1] = 'blue'
self.color_index[2] = 'brown'
self.color_index[3] = 'green'
self.color_index[4] = 'grey'
self.color_index[5] = 'orange'
self.color_index[6] = 'pink'
self.color_index[7] = 'purple'
self.color_index[8] = 'red'
self.color_index[9] = 'white'
self.color_index[10] = 'yellow'
self.colorNames = {}
self.colorNames = {v: k for k, v in self.color_index.items()}
self.model = self.create_model(input_shape, num_classes)
if model_file != None:
self.model.load_weights(model_file)
rospy.loginfo("ColorDetector Initialized.")
def extract_feature(self, img):
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
feature = []
hist = cv2.calcHist( [hsv], [0], None, [40], [0, 180] )
nf = hsv.shape[0]*hsv.shape[1]
#print "NF = ", hsv.shape, " & ", hsv.shape[0]
hist = hist.flatten() / nf
#print "SUM = ", sum(hist)
feature = np.concatenate([feature,hist])
hist = cv2.calcHist( [hsv], [1], None, [40], [0, 256] )
nf = hsv.shape[0]*hsv.shape[1]
hist = hist.flatten() / nf
feature = np.concatenate([feature,hist])
hist = cv2.calcHist( [hsv], [2], None, [40], [0, 256] )
nf = hsv.shape[0]*hsv.shape[1]
hist = hist.flatten() / nf
feature = np.concatenate([feature,hist])
np.set_printoptions(precision=4,suppress=True)
return feature
def create_model(self, input_shape, num_classes):
model = Sequential()
model.add(Dense(300, activation='sigmoid', input_shape=(input_shape,)))
#model.add(Dropout(0.2))
model.add(Dense(256, activation='relu'))
#model.add(Dropout(0.2))
model.add(Dense(300, activation='relu'))
#model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
return model
def classify_color(self, img):
feature = self.extract_feature(img)
x = np.array([feature])
predictions = self.model.predict(x)
index = np.where(predictions[0] > 0.5)[0][0]
return self.color_index[index]
|
gpl-2.0
|
andaag/scikit-learn
|
sklearn/feature_selection/tests/test_chi2.py
|
221
|
2398
|
"""
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
|
bsd-3-clause
|
Silmathoron/nest-simulator
|
pynest/examples/correlospinmatrix_detector_two_neuron.py
|
12
|
2587
|
# -*- coding: utf-8 -*-
#
# correlospinmatrix_detector_two_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Correlospinmatrix detector example
----------------------------------------
This scripts simulates two connected binary neurons, similar
as in [1]_. It measures and plots the auto- and cross covariance functions
of the individual neurons and between them, repsectively.
References
~~~~~~~~~~~~
.. [1] Ginzburg and Sompolinsky (1994). Theory of correlations in stochastic neural netoworks. 50(4) p. 3175. Fig. 1.
"""
import matplotlib.pyplot as plt
import nest
import numpy as np
m_x = 0.5
tau_m = 10.
h = 0.1
T = 1000000.
tau_max = 100.
csd = nest.Create("correlospinmatrix_detector")
csd.set(N_channels=2, tau_max=tau_max, Tstart=tau_max, delta_tau=h)
nest.SetDefaults('ginzburg_neuron', {'theta': 0.0, 'tau_m': tau_m,
'c_1': 0.0, 'c_2': 2. * m_x, 'c_3': 1.0})
n1 = nest.Create("ginzburg_neuron")
nest.SetDefaults("mcculloch_pitts_neuron", {'theta': 0.5, 'tau_m': tau_m})
n2 = nest.Create("mcculloch_pitts_neuron")
nest.Connect(n1, n2, syn_spec={"weight": 1.0})
nest.Connect(n1, csd, syn_spec={"receptor_type": 0})
nest.Connect(n2, csd, syn_spec={"receptor_type": 1})
nest.Simulate(T)
c = csd.get("count_covariance")
m = np.zeros(2, dtype=float)
for i in range(2):
m[i] = c[i][i][int(tau_max / h)] * (h / T)
print('mean activities =', m)
cmat = np.zeros((2, 2, int(2 * tau_max / h) + 1), dtype=float)
for i in range(2):
for j in range(2):
cmat[i, j] = c[i][j] * (h / T) - m[i] * m[j]
ts = np.arange(-tau_max, tau_max + h, h)
plt.title("auto- and cross covariance functions")
plt.plot(ts, cmat[0, 1], 'r', label=r"$c_{12}$")
plt.plot(ts, cmat[1, 0], 'b', label=r"$c_{21}$")
plt.plot(ts, cmat[0, 0], 'g', label=r"$c_{11}$")
plt.plot(ts, cmat[1, 1], 'y', label=r"$c_{22}$")
plt.xlabel(r"time $t \; \mathrm{ms}$")
plt.ylabel(r"$c$")
plt.legend()
plt.show()
|
gpl-2.0
|
rgommers/statsmodels
|
statsmodels/tsa/base/datetools.py
|
1
|
10627
|
from statsmodels.compat.python import (lrange, lzip, lmap, string_types, callable,
asstr, reduce, zip, map)
import re
import datetime
from pandas import Period
from pandas.tseries.frequencies import to_offset
from pandas import datetools as pandas_datetools
import numpy as np
#NOTE: All of these frequencies assume end of period (except wrt time)
class _freq_to_pandas_class(object):
# being lazy, don't want to replace dictionary below
def __getitem__(self, key):
return to_offset(key)
_freq_to_pandas = _freq_to_pandas_class()
def _is_datetime_index(dates):
if isinstance(dates[0], (datetime.datetime, Period)):
return True # TimeStamp is a datetime subclass
else:
return False
def _index_date(date, dates):
"""
Gets the index number of a date in a date index.
Works in-sample and will return one past the end of the dates since
prediction can start one out.
Currently used to validate prediction start dates.
If there dates are not of a fixed-frequency and date is not on the
existing dates, then a ValueError is raised.
"""
if isinstance(date, string_types):
date = date_parser(date)
try:
if hasattr(dates, 'indexMap'): # 0.7.x
return dates.indexMap[date]
else:
date = dates.get_loc(date)
try: # pandas 0.8.0 returns a boolean array
len(date)
return np.where(date)[0].item()
except TypeError: # expected behavior
return date
except KeyError as err:
freq = _infer_freq(dates)
if freq is None:
#TODO: try to intelligently roll forward onto a date in the
# index. Waiting to drop pandas 0.7.x support so this is
# cleaner to do.
raise ValueError("There is no frequency for these dates and "
"date %s is not in dates index. Try giving a "
"date that is in the dates index or use "
"an integer" % date)
# we can start prediction at the end of endog
if _idx_from_dates(dates[-1], date, freq) == 1:
return len(dates)
raise ValueError("date %s not in date index. Try giving a "
"date that is in the dates index or use an integer"
% date)
def _date_from_idx(d1, idx, freq):
"""
Returns the date from an index beyond the end of a date series.
d1 is the datetime of the last date in the series. idx is the
index distance of how far the next date should be from d1. Ie., 1 gives
the next date from d1 at freq.
Notes
-----
This does not do any rounding to make sure that d1 is actually on the
offset. For now, this needs to be taken care of before you get here.
"""
return d1 + idx * _freq_to_pandas[freq]
def _idx_from_dates(d1, d2, freq):
"""
Returns an index offset from datetimes d1 and d2. d1 is expected to be the
last date in a date series and d2 is the out of sample date.
Notes
-----
Rounds down the index if the end date is before the next date at freq.
Does not check the start date to see whether it is on the offest but
assumes that it is.
"""
from pandas import DatetimeIndex
return len(DatetimeIndex(start=d1, end=d2,
freq = _freq_to_pandas[freq])) - 1
_quarter_to_day = {
"1" : (3, 31),
"2" : (6, 30),
"3" : (9, 30),
"4" : (12, 31),
"I" : (3, 31),
"II" : (6, 30),
"III" : (9, 30),
"IV" : (12, 31)
}
_mdays = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_months_with_days = lzip(lrange(1,13), _mdays)
_month_to_day = dict(zip(map(str,lrange(1,13)), _months_with_days))
_month_to_day.update(dict(zip(["I", "II", "III", "IV", "V", "VI",
"VII", "VIII", "IX", "X", "XI", "XII"],
_months_with_days)))
# regex patterns
_y_pattern = '^\d?\d?\d?\d$'
_q_pattern = '''
^ # beginning of string
\d?\d?\d?\d # match any number 1-9999, includes leading zeros
(:?q) # use q or a : as a separator
([1-4]|(I{1,3}V?)) # match 1-4 or I-IV roman numerals
$ # end of string
'''
_m_pattern = '''
^ # beginning of string
\d?\d?\d?\d # match any number 1-9999, includes leading zeros
(:?m) # use m or a : as a separator
(([1-9][0-2]?)|(I?XI{0,2}|I?VI{0,3}|I{1,3})) # match 1-12 or
# I-XII roman numerals
$ # end of string
'''
#NOTE: see also ts.extras.isleapyear, which accepts a sequence
def _is_leap(year):
year = int(year)
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def date_parser(timestr, parserinfo=None, **kwargs):
"""
Uses dateutil.parser.parse, but also handles monthly dates of the form
1999m4, 1999:m4, 1999:mIV, 1999mIV and the same for quarterly data
with q instead of m. It is not case sensitive. The default for annual
data is the end of the year, which also differs from dateutil.
"""
flags = re.IGNORECASE | re.VERBOSE
if re.search(_q_pattern, timestr, flags):
y,q = timestr.replace(":","").lower().split('q')
month, day = _quarter_to_day[q.upper()]
year = int(y)
elif re.search(_m_pattern, timestr, flags):
y,m = timestr.replace(":","").lower().split('m')
month, day = _month_to_day[m.upper()]
year = int(y)
if _is_leap(y) and month == 2:
day += 1
elif re.search(_y_pattern, timestr, flags):
month, day = 12, 31
year = int(timestr)
else:
if (hasattr(pandas_datetools, 'parser') and
not callable(pandas_datetools.parser)):
# exists in 0.8.0 pandas, but it's the class not the module
return pandas_datetools.parser.parse(timestr, parserinfo,
**kwargs)
else: # 0.8.1 pandas version didn't import this into namespace
from dateutil import parser
return parser.parse(timestr, parserinfo, **kwargs)
return datetime.datetime(year, month, day)
def date_range_str(start, end=None, length=None):
"""
Returns a list of abbreviated date strings.
Parameters
----------
start : str
The first abbreviated date, for instance, '1965q1' or '1965m1'
end : str, optional
The last abbreviated date if length is None.
length : int, optional
The length of the returned array of end is None.
Returns
-------
date_range : list
List of strings
"""
flags = re.IGNORECASE | re.VERBOSE
#_check_range_inputs(end, length, freq)
start = start.lower()
if re.search(_m_pattern, start, flags):
annual_freq = 12
split = 'm'
elif re.search(_q_pattern, start, flags):
annual_freq = 4
split = 'q'
elif re.search(_y_pattern, start, flags):
annual_freq = 1
start += 'a1' # hack
if end:
end += 'a1'
split = 'a'
else:
raise ValueError("Date %s not understood" % start)
yr1, offset1 = lmap(int, start.replace(":","").split(split))
if end is not None:
end = end.lower()
yr2, offset2 = lmap(int, end.replace(":","").split(split))
length = (yr2 - yr1) * annual_freq + offset2
elif length:
yr2 = yr1 + length // annual_freq
offset2 = length % annual_freq + (offset1 - 1)
years = np.repeat(lrange(yr1+1, yr2), annual_freq).tolist()
years = np.r_[[str(yr1)]*(annual_freq+1-offset1), years] # tack on first year
years = np.r_[years, [str(yr2)]*offset2] # tack on last year
if split != 'a':
offset = np.tile(np.arange(1, annual_freq+1), yr2-yr1-1)
offset = np.r_[np.arange(offset1, annual_freq+1).astype('a2'), offset]
offset = np.r_[offset, np.arange(1,offset2+1).astype('a2')]
date_arr_range = [''.join([i, split, asstr(j)]) for i,j in
zip(years, offset)]
else:
date_arr_range = years.tolist()
return date_arr_range
def dates_from_str(dates):
"""
Turns a sequence of date strings and returns a list of datetime.
Parameters
----------
dates : array-like
A sequence of abbreviated dates as string. For instance,
'1996m1' or '1996Q1'. The datetime dates are at the end of the
period.
Returns
-------
date_list : array
A list of datetime types.
"""
return lmap(date_parser, dates)
def dates_from_range(start, end=None, length=None):
"""
Turns a sequence of date strings and returns a list of datetime.
Parameters
----------
start : str
The first abbreviated date, for instance, '1965q1' or '1965m1'
end : str, optional
The last abbreviated date if length is None.
length : int, optional
The length of the returned array of end is None.
Example
-------
>>> import statsmodels.api as sm
>>> dates = sm.tsa.datetools.date_range('1960m1', length=nobs)
Returns
-------
date_list : array
A list of datetime types.
"""
dates = date_range_str(start, end, length)
return dates_from_str(dates)
def _add_datetimes(dates):
return reduce(lambda x, y: y+x, dates)
def _infer_freq(dates):
maybe_freqstr = getattr(dates, 'freqstr', None)
if maybe_freqstr is not None:
return maybe_freqstr
try:
from pandas.tseries.api import infer_freq
freq = infer_freq(dates)
return freq
except ImportError:
pass
timedelta = datetime.timedelta
nobs = min(len(dates), 6)
if nobs == 1:
raise ValueError("Cannot infer frequency from one date")
if hasattr(dates, 'values'):
dates = dates.values # can't do a diff on a DateIndex
diff = np.diff(dates[:nobs])
delta = _add_datetimes(diff)
nobs -= 1 # after diff
if delta == timedelta(nobs): #greedily assume 'D'
return 'D'
elif delta == timedelta(nobs + 2):
return 'B'
elif delta == timedelta(7*nobs):
return 'W'
elif delta >= timedelta(28*nobs) and delta <= timedelta(31*nobs):
return 'M'
elif delta >= timedelta(90*nobs) and delta <= timedelta(92*nobs):
return 'Q'
elif delta >= timedelta(365 * nobs) and delta <= timedelta(366 * nobs):
return 'A'
else:
return
|
bsd-3-clause
|
toolgirl/irish-folk-motifs
|
src/abc_to_numbers.py
|
1
|
4073
|
import numpy as np
import pandas as pd
from collections import Counter
possible_keys = {
'A': 'Amajor',
'Am': 'Aminor',
'Ador': 'Adorian',
'ADor': 'Adorian',
'Amix': 'Amixolydian',
'AMix': 'Amixolydian',
'Aphr': 'Aphrygian',
'Bb': 'Bbmajor',
'Bn': 'Bmajor',
'Bm': 'Bminor',
'Bdor': 'Bdorian',
'Bmix': 'Bmixolydian',
'Bphr': 'Bphrygian',
'C': 'Cmajor',
'Cm': 'Cminor',
'Cdor': 'Cdorian',
'D': 'Dmajor',
'Dm': 'Dminor',
'Ddor': 'Ddorian',
'DDor': 'Ddorian',
'Dmix': 'Dmixolydian',
'DMix': 'Dmixolydian',
'Dmixm': 'Dmixolydian',
'Dphr': 'Dphrygian',
'Dlyd': 'Dlydian',
'Eb': 'Ebmajor',
'E': 'Emajor',
'Em': 'Eminor',
'Edor': 'Edorian',
'Emix': 'Emixolydian',
'F': 'Fmajor',
'Fdor': 'Fdorian',
'F#m': 'F#m',
'Fmix': 'Fmixolydian',
'G': 'Gmajor',
'Gm': 'Gminor',
'Gdor': 'Gdorian',
'GDor': 'Gdorian',
'Gmix': 'Gmixolydian',
'Glyd': 'Glydian'
}
actual_keys = ['B', 'E', 'A', 'D', 'G', 'C', 'F', 'Bb', 'Eb']
sharps_flats = {'B': ['B#, D#', 'F#', 'G#', 'A#'],
'E': ['C#', 'D#','F#','G#'],
'A': ['C#', 'F#', 'G#'],
'D': ['C#', 'F#'],
'G': ['C#'],
'C': [None],
'F': ['Bb'],
'Bb': ['Bb', 'Eb'],
'Eb': ['Bb', 'Eb', 'Ab']
}
def abc_to_basekeys(df):
base_keys = {
'Amajor': 'A',
'Aminor': 'C',
'Amixolydian': 'D',
'Adorian': 'G',
'Aphrygian': 'F',
'Bbmajor': 'Bb',
'Bmajor': 'B',
'Bminor': 'D',
'Bmixolydian': 'E',
'Bdorian': 'A',
'Bphrygian': 'G',
'Cmajor': 'C',
'Cminor': 'Eb',
'Cdorian': 'Bb',
'Dmajor': 'D',
'Dminor': 'F',
'Dmixolydian': 'G',
'Ddorian': 'C',
'Dphrygian': 'Bb',
'Dlydian': 'A',
'Ebmajor': 'Eb',
'Emajor': 'E',
'Eminor': 'G',
'Emixolydian': 'A',
'Edorian': 'D',
'Fmajor': 'F',
'Fmixolydian': 'Bb',
'Fdorian': 'Eb',
'F#m': 'A',
'Gmajor': 'G',
'Gminor': 'Bb',
'Gmixolydian': 'C',
'Gdorian': 'F',
'Glydian': 'D'
}
df.replace({'mode': base_keys})
df = pd.DataFrame(0, index=possible_keys)
import re
abc = """
E|A2E Ace|ede ABA|=G2D G>Bc|dcd =G2B|A2E ABd|e2f =gfg|edc Bcd|ecA A2:|
|:a|aga A2a|aga A2=g|=gfg =G2g|=gfg =G2B|c2c d2d|e2f =gfg|edc Bcd|ecA A2:|
"""
abc = "|=G2D G>Bc|dcd =G2B|A2E ABd|e2f =gfg|"
accidentals = {
"A": '',
"B": '',
"C": '',
"D": '',
"E": '',
"F": '',
"G": ''
}
pattern = r"[\^=_]?[a-gA-G][,\']?\d?"
offset = 0
while offset < len(abc):
if abc[offset] == '|':
for note in accidentals:
accidentals[note] = ''
match = re.match(pattern, abc[offset:])
if not match:
offset += 1
continue
note_str = match.group(0)
if note_str[0] in ('^', '=', '_'):
uppercase_note = note_str[1].upper()
accidentals[uppercase_note] = note_str[0]
else:
uppercase_note = note_str[0].upper()
note_str = accidentals[uppercase_note] + note_str
print(note_str)
offset += len(note_str)
|
gpl-3.0
|
krez13/scikit-learn
|
examples/neighbors/plot_nearest_centroid.py
|
264
|
1804
|
"""
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
jakevdp/JSAnimation
|
JSAnimation/IPython_display.py
|
4
|
3030
|
from .html_writer import HTMLWriter
from matplotlib.animation import Animation
import matplotlib.pyplot as plt
import tempfile
import random
import os
__all__ = ['anim_to_html', 'display_animation']
class _NameOnlyTemporaryFile(object):
"""A context-managed temporary file which is not opened.
The file should be accessible by name on any system.
Parameters
----------
suffix : string
The suffix of the temporary file (default = '')
prefix : string
The prefix of the temporary file (default = '_tmp_')
hash_length : string
The length of the random hash. The size of the hash space will
be 16 ** hash_length (default=8)
seed : integer
the seed for the random number generator. If not specified, the
system time will be used as a seed.
absolute : boolean
If true, return an absolute path to a temporary file in the current
working directory.
Example
-------
>>> with _NameOnlyTemporaryFile(seed=0, absolute=False) as f:
... print(f)
...
_tmp_d82c07cd
>>> os.path.exists('_tmp_d82c07cd') # file removed after context
False
"""
def __init__(self, prefix='_tmp_', suffix='', hash_length=8,
seed=None, absolute=True):
rng = random.Random(seed)
self.name = '%s%0*x%s' % (prefix, hash_length,
rng.getrandbits(4 * hash_length), suffix)
if absolute:
self.name = os.path.abspath(self.name)
def __enter__(self):
return self
def __exit__(self, *exc_info):
if os.path.exists(self.name):
os.remove(self.name)
def anim_to_html(anim, fps=None, embed_frames=True, default_mode='loop'):
"""Generate HTML representation of the animation"""
if fps is None and hasattr(anim, '_interval'):
# Convert interval in ms to frames per second
fps = 1000. / anim._interval
plt.close(anim._fig)
if hasattr(anim, "_html_representation"):
return anim._html_representation
else:
# tempfile can't be used here: we need a filename, and this
# fails on windows. Instead, we use a custom filename generator
#with tempfile.NamedTemporaryFile(suffix='.html') as f:
with _NameOnlyTemporaryFile(suffix='.html') as f:
anim.save(f.name, writer=HTMLWriter(fps=fps,
embed_frames=embed_frames,
default_mode=default_mode))
html = open(f.name).read()
anim._html_representation = html
return html
def display_animation(anim, **kwargs):
"""Display the animation with an IPython HTML object"""
from IPython.display import HTML
return HTML(anim_to_html(anim, **kwargs))
# This is the magic that makes animations display automatically in the
# IPython notebook. The _repr_html_ method is a special method recognized
# by IPython.
Animation._repr_html_ = anim_to_html
|
bsd-2-clause
|
PennyQ/astro-vispy
|
glue_vispy_viewers/volume/layer_artist.py
|
3
|
8246
|
import uuid
import weakref
from matplotlib.colors import ColorConverter
from glue.core.data import Subset, Data
from glue.core.exceptions import IncompatibleAttribute
from glue.utils import broadcast_to
from glue.core.fixed_resolution_buffer import ARRAY_CACHE, PIXEL_CACHE
from .colors import get_translucent_cmap
from .layer_state import VolumeLayerState
from ..common.layer_artist import VispyLayerArtist
class DataProxy(object):
def __init__(self, viewer_state, layer_artist):
self._viewer_state = weakref.ref(viewer_state)
self._layer_artist = weakref.ref(layer_artist)
@property
def layer_artist(self):
return self._layer_artist()
@property
def viewer_state(self):
return self._viewer_state()
@property
def shape(self):
x_axis = self.viewer_state.x_att.axis
y_axis = self.viewer_state.y_att.axis
z_axis = self.viewer_state.z_att.axis
if isinstance(self.layer_artist.layer, Subset):
full_shape = self.layer_artist.layer.data.shape
else:
full_shape = self.layer_artist.layer.shape
return full_shape[z_axis], full_shape[y_axis], full_shape[x_axis]
def compute_fixed_resolution_buffer(self, bounds=None):
shape = [bound[2] for bound in bounds]
if self.layer_artist is None or self.viewer_state is None:
return broadcast_to(0, shape)
if isinstance(self.layer_artist.layer, Subset):
try:
subset_state = self.layer_artist.layer.subset_state
result = self.layer_artist.layer.data.compute_fixed_resolution_buffer(
target_data=self.layer_artist._viewer_state.reference_data,
bounds=bounds, subset_state=subset_state,
cache_id=self.layer_artist.id)
except IncompatibleAttribute:
self.layer_artist.disable_incompatible_subset()
return broadcast_to(0, shape)
else:
self.layer_artist.enable()
else:
try:
result = self.layer_artist.layer.compute_fixed_resolution_buffer(
target_data=self.layer_artist._viewer_state.reference_data,
bounds=bounds, target_cid=self.layer_artist.state.attribute,
cache_id=self.layer_artist.id)
except IncompatibleAttribute:
self.layer_artist.disable('Layer data is not fully linked to reference data')
return broadcast_to(0, shape)
else:
self.layer_artist.enable()
return result
class VolumeLayerArtist(VispyLayerArtist):
"""
A layer artist to render volumes.
This is more complex than for other visual types, because for volumes, we
need to manage all the volumes via a single MultiVolume visual class for
each data viewer.
"""
def __init__(self, vispy_viewer=None, layer=None, layer_state=None):
super(VolumeLayerArtist, self).__init__(layer)
self._clip_limits = None
self.layer = layer or layer_state.layer
self.vispy_widget = vispy_viewer._vispy_widget
# TODO: need to remove layers when layer artist is removed
self._viewer_state = vispy_viewer.state
self.state = layer_state or VolumeLayerState(layer=self.layer)
if self.state not in self._viewer_state.layers:
self._viewer_state.layers.append(self.state)
# We create a unique ID for this layer artist, that will be used to
# refer to the layer artist in the MultiVolume. We have to do this
# rather than use self.id because we can't guarantee the latter is
# unique.
self.id = str(uuid.uuid4())
self._multivol = self.vispy_widget._multivol
self._multivol.allocate(self.id)
self._viewer_state.add_global_callback(self._update_volume)
self.state.add_global_callback(self._update_volume)
self.reset_cache()
self._data_proxy = None
def reset_cache(self):
self._last_viewer_state = {}
self._last_layer_state = {}
@property
def visual(self):
return self._multivol
@property
def bbox(self):
return (-0.5, self.layer.shape[2] - 0.5,
-0.5, self.layer.shape[1] - 0.5,
-0.5, self.layer.shape[0] - 0.5)
@property
def shape(self):
return self.layer.shape
def redraw(self):
"""
Redraw the Vispy canvas
"""
self.vispy_widget.canvas.update()
def clear(self):
"""
Remove the layer artist from the visualization
"""
# We don't want to deallocate here because this can be called if we
# disable the layer due to incompatible attributes
self._multivol.disable(self.id)
def remove(self):
"""
Remove the layer artist for good
"""
self._multivol.deallocate(self.id)
ARRAY_CACHE.pop(self.id, None)
PIXEL_CACHE.pop(self.id, None)
def _update_cmap_from_color(self):
cmap = get_translucent_cmap(*ColorConverter().to_rgb(self.state.color))
self._multivol.set_cmap(self.id, cmap)
self.redraw()
def _update_limits(self):
if isinstance(self.layer, Subset):
self._multivol.set_clim(self.id, None)
else:
self._multivol.set_clim(self.id, (self.state.vmin, self.state.vmax))
self.redraw()
def _update_alpha(self):
self._multivol.set_weight(self.id, self.state.alpha)
self.redraw()
def _update_subset_mode(self):
if isinstance(self.state.layer, Data) or self.state.subset_mode == 'outline':
self._multivol.set_multiply(self.id, None)
else:
label = self._multivol.label_for_layer(self.state.layer.data)
self._multivol.set_multiply(self.id, label)
self.redraw()
def _update_data(self):
if self._data_proxy is None:
self._data_proxy = DataProxy(self._viewer_state, self)
self._multivol.set_data(self.id, self._data_proxy, layer=self.layer)
else:
self._multivol._update_scaled_data(self.id)
self._update_subset_mode()
def _update_visibility(self):
if self.state.visible:
self._multivol.enable(self.id)
else:
self._multivol.disable(self.id)
self.redraw()
def set_clip(self, limits):
pass
def _update_volume(self, force=False, **kwargs):
if self.state.attribute is None or self.state.layer is None:
return
# Figure out which attributes are different from before. Ideally we shouldn't
# need this but currently this method is called multiple times if an
# attribute is changed due to x_att changing then hist_x_min, hist_x_max, etc.
# If we can solve this so that _update_histogram is really only called once
# then we could consider simplifying this. Until then, we manually keep track
# of which properties have changed.
changed = set()
if not force:
for key, value in self._viewer_state.as_dict().items():
if value != self._last_viewer_state.get(key, None):
changed.add(key)
for key, value in self.state.as_dict().items():
if value != self._last_layer_state.get(key, None):
changed.add(key)
self._last_viewer_state.update(self._viewer_state.as_dict())
self._last_layer_state.update(self.state.as_dict())
if force or 'color' in changed:
self._update_cmap_from_color()
if force or 'vmin' in changed or 'vmax' in changed:
self._update_limits()
if force or 'alpha' in changed:
self._update_alpha()
if force or 'layer' in changed or 'attribute' in changed:
self._update_data()
if force or 'subset_mode' in changed:
self._update_subset_mode()
if force or 'visible' in changed:
self._update_visibility()
def update(self):
self._update_volume(force=True)
self.redraw()
|
bsd-2-clause
|
RoyBoy432/Emergence-Senescence
|
figure_code/MacroecologyPatterns/SAR.py
|
8
|
1591
|
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import os
from scipy.stats.kde import gaussian_kde
import sys
mydir = os.path.expanduser('~/GitHub/Emergence')
tools = os.path.expanduser(mydir + "/tools")
data = mydir + '/results/simulated_data/SAR-Data.csv'
def get_kdens_choose_kernel(_list,kernel):
""" Finds the kernel density function across a sample of SADs """
density = gaussian_kde(_list)
n = len(_list)
xs = np.linspace(0, 1, n)
density.covariance_factor = lambda : kernel
density._compute_covariance()
D = [xs,density(xs)]
return D
z_nest = []
z_rand = []
with open(data) as f:
for d in f:
d = list(eval(d))
sim = d.pop(0)
ct = d.pop(0)
if ct > 100:
z1, z2 = d
z_nest.append(z1)
z_rand.append(z2)
fs = 14
fig = plt.figure(figsize=(3, 2))
fig.add_subplot(1, 1, 1)
kernel = 0.1
D = get_kdens_choose_kernel(z_nest, kernel)
plt.plot(D[0],D[1],color = 'k', lw=3, alpha = 0.99, label= 'Nested SAR '+'$z$'+'-values')
D = get_kdens_choose_kernel(z_rand, kernel)
plt.plot(D[0],D[1],color = '0.5', lw=3, alpha = 0.99, label= 'R.A. SAR '+'$z$'+'-values')
plt.legend(loc='best', fontsize=fs-5, frameon=False)
plt.xlabel('$z$', fontsize=fs+6)
plt.ylabel('$density$', fontsize=fs+3)
plt.tick_params(axis='both', labelsize=fs-3)
#### Final Format and Save #####################################################
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.savefig(mydir + '/results/figures/SAR.png', dpi=200, bbox_inches = "tight")
plt.close()
|
gpl-3.0
|
ryfeus/lambda-packs
|
Tensorflow/source/tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py
|
92
|
4535
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
|
mit
|
johnbachman/pysb
|
pysb/examples/run_earm_hpp.py
|
5
|
2377
|
""" Run the Extrinsic Apoptosis Reaction Model (EARM) using BioNetGen's
Hybrid-Particle Population (HPP) algorithm.
NFsim provides stochastic simulation without reaction network generation,
allowing simulation of models with large (or infinite) reaction networks by
keeping track of species counts. However, it can fail when the number of
instances of a species gets too large (typically >200000). HPP circumvents
this problem by allowing the user to define species with large instance
counts as populations rather than NFsim particles.
This example runs the EARM 1.0 model with HPP, which fails to run on NFsim
with the default settings due to large initial concentration coutns of
several species. By assigning population maps to these species, we can run
the simulation.
Reference: Hogg et al. Plos Comb Biol 2014
https://doi.org/10.1371/journal.pcbi.1003544
"""
from pysb.examples.earm_1_0 import model
from pysb.simulator import BngSimulator
from pysb.simulator.bng import PopulationMap
from pysb import Parameter
import matplotlib.pyplot as plt
import numpy as np
def plot_mean_min_max(name, title=None):
x = np.array([tr[:][name] for tr in trajectories]).T
if not title:
title = name
plt.figure(title)
plt.plot(tout.T, x, '0.5', lw=2, alpha=0.25) # individual trajectories
plt.plot(tout[0], x.mean(1), 'k--', lw=3, label="Mean")
plt.plot(tout[0], x.min(1), 'b--', lw=3, label="Minimum")
plt.plot(tout[0], x.max(1), 'r--', lw=3, label="Maximum")
plt.legend(loc=0)
plt.xlabel('Time')
plt.ylabel('Population of %s' % name)
PARP, CPARP, Mito, mCytoC = [model.monomers[x] for x in
['PARP', 'CPARP', 'Mito', 'mCytoC']]
klump = Parameter('klump', 10000, _export=False)
model.add_component(klump)
population_maps = [
PopulationMap(PARP(b=None), klump),
PopulationMap(CPARP(b=None), klump),
PopulationMap(Mito(b=None), klump),
PopulationMap(mCytoC(b=None), klump)
]
sim = BngSimulator(model, tspan=np.linspace(0, 20000, 101))
simres = sim.run(n_runs=20, method='nf', population_maps=population_maps)
trajectories = simres.all
tout = simres.tout
plot_mean_min_max('Bid_unbound')
plot_mean_min_max('PARP_unbound')
plot_mean_min_max('mSmac_unbound')
plot_mean_min_max('tBid_total')
plot_mean_min_max('CPARP_total')
plot_mean_min_max('cSmac_total')
plt.show()
|
bsd-2-clause
|
binhqnguyen/lena-local
|
src/flow-monitor/examples/wifi-olsr-flowmon.py
|
108
|
7439
|
# -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <[email protected]>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
try:
import ns.visualizer
except ImportError:
pass
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.NqosWifiMacHelper.Default()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]"))
onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]"))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
urv = ns.core.UniformRandomVariable()
app.Start(ns.core.Seconds(urv.GetValue(20, 30)))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
gpl-2.0
|
mkness/TheCannon
|
code/makeplot_chi_general2.py
|
1
|
3235
|
#!/usr/bin/python
import numpy
from numpy import savetxt
import matplotlib
from matplotlib import pyplot
import scipy
from scipy import interpolate
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
s = matplotlib.font_manager.FontProperties()
s.set_family('serif')
s.set_size(14)
from matplotlib import rc
rc('text', usetex=False)
rc('font', family='serif')
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib
from matplotlib import pyplot
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
s = matplotlib.font_manager.FontProperties()
s.set_family('serif')
rcParams["xtick.labelsize"] = 14
rcParams["ytick.labelsize"] = 14
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
s = matplotlib.font_manager.FontProperties()
majorLocator = MultipleLocator(5)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(5)
yminorLocator = MultipleLocator(10)
yminorLocator2 = MultipleLocator(25)
xminorLocator = MultipleLocator(5)
yminorLocator = MultipleLocator(5)
ymajorLocator = MultipleLocator(50)
xmajorLocator = MultipleLocator(10)
rcParams['figure.figsize'] = 15.0, 10.0
a = open('starsin_new_all_ordered.txt', 'r' )
al = a.readlines()
names = []
for each in al:
names.append(each.split()[1])
unames = unique(names)
starind = arange(0,len(names), 1)
name_ind = []
names = array(names)
for each in unames:
takeit = each == names
name_ind.append(starind[takeit][-1]+1. )
#x, median_y, t_y, g_y,feh_y,chi_y = loadtxt('data_test.txt', usecols = (0,1,2,3,4,5), unpack =1)
#fig1 = pyplot.figure()
#ax0 = fig1.add_subplot(111)
fig, ax = plt.subplots()
sortindx = 2
sortname = ["Teff", "logg", "Fe/H"]
index_use = argsort(metaall[:,sortindx])
ax.set_title("Per-pixel scaled residuals ($\chi$); spectra ordered by cluster ordered by [Fe/H] ")# %s" % (sortname[sortindx]),fontsize = 20 )
#ax.set_xlabel("Wavelength, $\AA$",fontsize = 20,labelpad = 10 )
ax.set_xlabel("Wavelength-direction pixel number ",fontsize = 20,labelpad = 10 )
ax.set_ylabel("Star Number",fontsize = 20)
print "Ordered by %s" % (sortname[sortindx])
# run makeplot_coeff_general for name_ind
wl = dataall[:,0,0]
image = np.arcsinh(chis)
image2 = np.insert(image, name_ind, values=-10, axis =1)
#test = ax.imshow(image[:,index_use].T, cmap=plt.cm.RdGy, interpolation="nearest", vmin = -5, vmax = 5 ,aspect = 'auto',origin = 'lower', extent = (wl.min(), wl.max(), 0, len(image.T)))
test = ax.imshow(image2.T, cmap=plt.cm.RdBu, interpolation="nearest", vmin = -2, vmax = 2 ,aspect = 'auto',origin = 'lower', extent = (wl.min(), wl.max(), 0, len(image.T)))
#test = ax.imshow(image2.T, cmap=plt.cm.Set1, interpolation="nearest", vmin = -5, vmax = 5 ,aspect = 'auto',origin = 'lower', extent = (0, len(wl), 0, len(image.T)))
#test = ax.imshow(image[:,index_use].T, cmap=plt.cm.RdGy, interpolation="nearest", vmin = -5, vmax = 5 ,aspect = 'auto',origin = 'lower', extent = (wl.min(), wl.max(), 0, len(image.T)))
cb = fig.colorbar(test)
cb.set_label("arcsinh($\chi$)", fontsize = 20 )
fig.savefig('/Users/ness/Downloads/Apogee_Raw/calibration_apogeecontinuum/documents/plots/chi_map.eps', transparent=True, bbox_inches='tight', pad_inches=0)
|
mit
|
calliope-project/calliope
|
calliope/postprocess/plotting/flows.py
|
1
|
13226
|
"""
Copyright (C) 2013-2019 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
flows.py
~~~~~~~~~~~~~~~
Plot energy flows data.
"""
import pandas as pd
from calliope.postprocess.plotting.util import break_name, get_range
def _line(
node_coordinates,
transmission_type,
to_node,
from_node,
carrier,
tech,
prod,
scale_factor,
techs_colors,
is_initial_timestep,
add_legend,
name,
):
# e.g. "Region1->Region2: 256.54 by gas_transmission (gas)"
hover_info = "%s->%s: %.2f by %s (%s)" % (
from_node,
to_node,
prod,
transmission_type,
carrier,
)
line = dict(
visible=False,
mode="lines",
hoverinfo="text",
text="",
line=dict(width=prod * scale_factor + 1, color=techs_colors[transmission_type]),
legendgroup=transmission_type,
opacity=0.6,
showlegend=False,
)
line_legend = dict(
visible=False,
mode="lines",
hoverinfo="text",
text="",
line=dict(width=10, color=techs_colors[transmission_type]),
legendgroup=transmission_type,
name=break_name(name, 18),
opacity=0.6,
)
line_info_marker = dict(
visible=False,
mode="markers",
hoverinfo="text",
text=hover_info,
marker=dict(symbol="square", opacity=0, color=techs_colors[transmission_type]),
legendgroup=transmission_type,
name=tech,
showlegend=False,
)
if set(node_coordinates.index) == set(["x", "y"]):
h_coord, v_coord = "x", "y"
elif set(node_coordinates.index) == set(["lon", "lat"]):
h_coord, v_coord = "lon", "lat"
line[h_coord] = [
node_coordinates[from_node][h_coord],
node_coordinates[to_node][h_coord],
]
line[v_coord] = [
node_coordinates[from_node][v_coord],
node_coordinates[to_node][v_coord],
]
line_legend[h_coord] = [None]
line_legend[v_coord] = [None]
line_info_marker[h_coord] = [
(1 / 2)
* (node_coordinates[from_node][h_coord] + node_coordinates[to_node][h_coord])
]
line_info_marker[v_coord] = [
(1 / 2)
* (node_coordinates[from_node][v_coord] + node_coordinates[to_node][v_coord])
]
if is_initial_timestep:
# plot only the first timestep data when the chart is initialized
line["visible"] = True
line_legend["visible"] = True
line_info_marker["visible"] = True
if add_legend:
return [line, line_legend, line_info_marker]
else:
return [line, line_info_marker]
def _marker(
node_coordinates,
node,
carrier,
tech,
prod,
scale_factor,
techs_colors,
is_initial_timestep,
add_legend,
name,
):
# Example: "Region1: 3552.65 of pipe_import (gas)"
hover_info = "%s: %.2f of %s (%s)" % (node, prod, tech, carrier)
marker_dict = dict(
visible=False,
hoverinfo="text",
text=hover_info,
mode="markers",
marker=dict(
symbol="circle-dot",
opacity=0.6,
size=prod * scale_factor + 1,
color=techs_colors[tech],
),
legendgroup=tech,
showlegend=False,
)
marker_legend = dict(
visible=False,
hoverinfo="text",
text=hover_info,
mode="markers",
marker=dict(
symbol="circle-dot",
opacity=0.6,
size=10,
color=techs_colors[tech],
),
legendgroup=tech,
name=break_name(name, 18),
)
if set(node_coordinates.index) == set(["x", "y"]):
h_coord, v_coord = "x", "y"
elif set(node_coordinates.index) == set(["lon", "lat"]):
h_coord, v_coord = "lon", "lat"
marker_dict[h_coord] = [node_coordinates[node][h_coord]]
marker_dict[v_coord] = [node_coordinates[node][v_coord]]
marker_legend[h_coord] = [None]
marker_legend[v_coord] = [None]
if is_initial_timestep:
# plot only the first timestep data when the chart is initialized
marker_dict["visible"] = True
marker_legend["visible"] = True
if add_legend:
return [marker_dict, marker_legend]
else:
return [marker_dict]
def _production_data(model, timesteps, timestep):
"""
returns a list of dicts, each dict is a plotly marker (node production)
or line (transmission) on the map.
"""
node_coordinates = model._model_data.loc_coordinates.to_pandas()
node_techs_carriers_production = model.get_formatted_array("carrier_prod")
techs_colors = model._model_data.colors.to_pandas()
scale_factor = 100 / abs(
model.results.carrier_prod.values.max()
- model.results.carrier_prod.values.min()
)
tech_names = set(model._model_data.techs.values)
production_data = []
# we iterate through each dimension for one timestep in order to
# add the different production sources of the node and line
# transmissions toward it
# Complexity: O(len(carriers)*len(techs)*len(nodes)). Considering
# len(carriers) is supposed to be small (<10), we have a large margin in
# terms of techs and nodes numbers (len(techs)*len(nodes) <= 10^9,
# equivalent to one second processing time for python)
links = []
# list of sets { tech, from_node, to_node }
links_data = []
# links associated data, like prod, carrier, transmission_type
# [ [prod, carrier, transmission_type], [] ..]
for node in node_techs_carriers_production.nodes.values:
for carrier in node_techs_carriers_production.carriers.values:
techs_production = node_techs_carriers_production.sel(
carriers=carrier, nodes=node
).to_pandas()
for tech, prod in techs_production.loc[:, timestep].iteritems():
if prod and prod > 0:
# if some energy is at stake
tech_name = tech.split(":")[0]
if tech_name in tech_names:
add_legend = True
tech_names.discard(tech_name)
name = model._model_data.names.loc[tech_name].item()
else:
# only add legend information once for a tech
add_legend = False
name = ""
if len(tech.split(":")) > 1:
# "transmission_type:node"
# if it gets energy from another node
[transmission_type, from_node] = tech.split(":")
links.append({tech_name, from_node, node})
links_data.append(
{
"transmission_type": transmission_type,
"from_node": from_node,
"to_node": node,
"prod": prod,
"carrier": carrier,
"tech": tech,
"add_legend": add_legend,
"name": name,
}
)
else:
# if the energy comes from this node
production_data.extend(
_marker(
node_coordinates,
node,
carrier,
tech,
prod,
scale_factor,
techs_colors,
timestep == timesteps[0],
add_legend,
name,
)
)
def merge(first_link, second_link):
if first_link["prod"] > second_link["prod"]:
first_link["prod"] -= second_link["prod"]
return first_link
elif first_link["prod"] < second_link["prod"]:
second_link["prod"] -= first_link["prod"]
return second_link
else:
# the two transmission links are equal,
# thus, no representation of it is return
return {}
# merge the links data
links_merged = []
while len(links) > 0:
data = links_data[0]
# we check if there is a transmission
# link in the opposite direction and merge if so
j = 1
while j < len(links):
if links[j] == links[0]:
data = merge(links_data[0], links_data[j])
links.remove(links[j])
links_data.remove(links_data[j])
j -= 1
j += 1
links_merged.append(data)
links.remove(links[0])
links_data.remove(links_data[0])
# add merged links to production_data
for link in links_merged:
if link:
params_list = [
node_coordinates,
link["transmission_type"],
link["to_node"],
link["from_node"],
link["carrier"],
link["tech"],
link["prod"],
scale_factor,
techs_colors,
timestep == timesteps[0],
link["add_legend"],
link["name"],
]
production_data.extend(_line(*params_list))
return production_data
def plot_flows(model, timestep_cycle=1, timestep_index_subset=[], **kwargs):
"""
Parameters
----------
timestep_cycle : int, optional
Shows one of every timestep_cycle timesteps. Default is 1 (all timesteps
are shown).
timestep_index_subset : list of int, optional
Only the timesteps between those two indexes are shown. Default is []
(all timesteps are shown).
"""
if len(timestep_index_subset) == 2:
timestep_start = timestep_index_subset[0]
timestep_end = timestep_index_subset[1]
else:
timestep_start, timestep_end = 0, len(model._model_data.timesteps.values)
try:
node_coordinates = model._model_data.loc_coordinates
except AttributeError:
raise ValueError(
"Model does not define node coordinates "
"- no energy flow plotting possible."
)
timesteps = model._model_data.timesteps.values[
timestep_start:timestep_end:timestep_cycle
] # slicing the desired timesteps
timeseries_dateformat = model.model_config["timeseries_dateformat"]
steps_length = []
data = []
for timestep in timesteps:
data_by_timestep = _production_data(model, timesteps, timestep)
steps_length.append(len(data_by_timestep))
data.extend(data_by_timestep)
steps = []
for i, timestep in enumerate(timesteps):
step = dict(
# active="label of first show timestep data",
method="restyle",
args=["visible", [False] * len(data)],
label=pd.to_datetime(timestep).strftime(timeseries_dateformat),
)
i_start = sum(steps_length[:i]) # visible start index
i_end = i_start + steps_length[i] # visible end index
step["args"][1][i_start:i_end] = [True] * steps_length[i]
# we set visible to True for all the points of one timestep
steps.append(step)
sliders = [
dict(
# active="start sliding",True
currentvalue=dict(
visible=True,
prefix="Timestep: ",
),
pad={"t": 50},
activebgcolor="black",
bgcolor="grey",
steps=steps,
)
]
# define the map general layout here
layout = dict(
title="Energy Flow",
showlegend=True,
width=900,
height=700,
hovermode="closest",
sliders=sliders,
margin={"autoexpand": False, "b": 150, "r": 180},
)
# change the range of the plot whether its x,y or lat,lon coords
if sorted(node_coordinates.coordinates.values) == ["x", "y"]:
layout["xaxis"] = dict(range=get_range(node_coordinates, "x", 0.2))
layout["yaxis"] = dict(range=get_range(node_coordinates, "y", 0.2))
for trace in data:
trace["type"] = "scatter"
elif sorted(node_coordinates.coordinates.values) == ["lat", "lon"]:
layout["geo"] = dict(
scope="world",
showland=True,
showcountries=True,
showsubunits=True,
showocean=True,
oceancolor="#aec6cf",
subunitcolor="blue",
countrycolor="green",
lonaxis=dict(range=get_range(node_coordinates, "lon", 0.2)),
lataxis=dict(range=get_range(node_coordinates, "lat", 0.2)),
countrywidth=0.5,
subunitwidth=0.5,
landcolor="rgb(255,255,255)",
)
for trace in data:
trace["type"] = "scattergeo"
return data, layout
|
apache-2.0
|
gingi99/research_dr
|
python/MLEM2/mixture_lers.py
|
1
|
5332
|
# coding: utf-8
import os
import sys
import joblib
import datetime
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from itertools import product
from mlem2 import getNominalList
from mlem2 import getJudgeNominal
from LERS import predictByLERS
from LERS import predictProbaByLERS
# from mlem2 import showRules
# common
sys.path.append("../common/")
from util import loadPickle
# ----------------------------------------
# Rules をロードする
# ----------------------------------------
def loadRules(DIR, FILENAME, ruleset, method, ITER):
if method == 'A' or method == 'B' :
fullpath_rules = DIR+'/'+FILENAME+'/'+ruleset+'/'+ITER+'/rules_'+method+'.pkl'
rules = loadPickle(fullpath_rules)
elif method == 'both' :
fullpath_rules_A = DIR+'/'+FILENAME+'/'+ruleset+'/'+ITER+'/rules_A.pkl'
fullpath_rules_B = DIR+'/'+FILENAME+'/'+ruleset+'/'+ITER+'/rules_B.pkl'
rules_A = loadPickle(fullpath_rules_A)
rules_B = loadPickle(fullpath_rules_B)
rules = rules_A + rules_B
else :
print("no method")
return(rules)
# ========================================
# main
# ========================================
def main(DIR, FILENAME, CLASSES, ruleset, method, ITER) :
ITER = str(ITER)
print(datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')+' '+FILENAME+' '+method+' '+ITER+" START")
# load rules
rules = loadRules(DIR, FILENAME, ruleset, method, ITER)
# load test and val data
filepath_test_A = DIR+'/'+FILENAME+'/MIXTURE/dataset/nobias2/'+ITER+'/test_A.tsv'
df_test_A = pd.read_csv(filepath_test_A, delimiter='\t')
filepath_val_A = DIR+'/'+FILENAME+'/MIXTURE/dataset/nobias2/'+ITER+'/val_A.tsv'
df_val_A = pd.read_csv(filepath_val_A, delimiter='\t')
decision_class_test = df_test_A[df_test_A.columns[-1]].values.tolist()
decision_class_val = df_val_A[df_val_A.columns[-1]].values.tolist()
filepath_nominal = DIR+'/'+FILENAME+'/dataset/'+FILENAME+'.nominal'
list_nominal = getNominalList(filepath_nominal)
list_judgeNominal = getJudgeNominal(df_test_A, list_nominal)
# predict by LERS
predict_test = predictByLERS(rules, df_test_A, list_judgeNominal)
predict_proba_test = predictProbaByLERS(rules, df_test_A, list_judgeNominal, classes = CLASSES)
predict_proba_test = np.array(predict_proba_test)[:,0]
predict_val = predictByLERS(rules, df_val_A, list_judgeNominal)
predict_proba_val = predictProbaByLERS(rules, df_val_A, list_judgeNominal, classes = CLASSES)
predict_proba_val = np.array(predict_proba_val)[:,0]
# 正答率を求める
result_acc_test = accuracy_score(decision_class_test, predict_test)
result_acc_val = accuracy_score(decision_class_val, predict_val)
# AUCを求める
fpr, tpr, thresholds = roc_curve(decision_class_test, predict_proba_test, pos_label = CLASSES[0])
result_auc_test = auc(fpr, tpr)
fpr, tpr, thresholds = roc_curve(decision_class_val, predict_proba_val, pos_label = CLASSES[0])
result_auc_val = auc(fpr, tpr)
# save
DIR_SAVE = DIR+'/'+FILENAME+'/MIXTURE/'+method+'/nobias2/'+ITER
if not os.path.isdir(DIR_SAVE) : os.makedirs(DIR_SAVE, exist_ok=True)
fullpath_test = DIR_SAVE+'/result_test.tsv'
fullpath_val = DIR_SAVE+'/result_val.tsv'
df_result_test = pd.DataFrame({'y_true' : decision_class_test, 'y_predict' : predict_test, 'y_predict_proba' : predict_proba_test, 'acc' : result_acc_test, 'auc' : result_auc_test})
df_result_val = pd.DataFrame({'y_true' : decision_class_val, 'y_predict' : predict_val, 'y_predict_proba' : predict_proba_val, 'acc' : result_acc_val, 'auc' : result_auc_val})
pd.DataFrame.to_csv(df_result_test, fullpath_test, index=False, sep='\t', header=True)
pd.DataFrame.to_csv(df_result_val, fullpath_val, index=False, sep='\t', header=True)
print(datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')+' '+FILENAME+' '+method+' '+ITER+" END")
# ========================================
# multi に実行する
# ========================================
def multi_main(n_jobs, DIR, FILENAMES, CLASSES, rulesets, methods):
joblib.Parallel(n_jobs=n_jobs)(joblib.delayed(main)(DIR, FILENAME, CLASSES[FILENAME], ruleset, method, ITER) for (FILENAME, ruleset, method, ITER) in product(FILENAMES, rulesets, methods, range(1,51)))
# -------------------------------------------
# main
# -------------------------------------------
if __name__ == "__main__":
DIR = '/mnt/data/uci'
#FILENAMES = ['german_credit_categorical', 'default_cleansing', 'adult_cleansing2']
FILENAMES = ['adult_cleansing2']
#FILENAMES = ['default_cleansing']
#FILENAMES = ['german_credit_categorical']
CLASSES = {'german_credit_categorical' : [1, 2],
'default_cleansing' : [1, 2],
'adult_cleansing2' : ["<=50K", ">50K"]}
rulesets = ['MIXTURE/rules/nobias2']
methods = ['A','B','both']
#FILENAME = 'adult_cleansing2'
#FILENAME = 'german_credit_categorical'
#ruleset = 'MIXTURE/rules/nobias'
#method = "A"
#ITER = 1
#main(DIR, FILENAME, CLASSES[FILENAME], ruleset, method, ITER)
n_jobs = 50
multi_main(n_jobs, DIR, FILENAMES, CLASSES, rulesets, methods)
|
mit
|
Ziqi-Li/bknqgis
|
Shapely/docs/code/multilinestring.py
|
4
|
1045
|
from matplotlib import pyplot
from shapely.geometry import MultiLineString
from figures import SIZE, set_limits, plot_line, plot_bounds, color_issimple
from figures import plot_coords as _plot_coords
def plot_coords(ax, ob):
for line in ob:
_plot_coords(ax, line, zorder=1)
def plot_lines(ax, ob):
color = color_issimple(ob)
for line in ob:
plot_line(ax, line, color=color, alpha=0.7, zorder=2)
fig = pyplot.figure(1, figsize=SIZE, dpi=90)
# 1: disconnected multilinestring
ax = fig.add_subplot(121)
mline1 = MultiLineString([((0, 0), (1, 1)), ((0, 2), (1, 1.5), (1.5, 1), (2, 0))])
plot_coords(ax, mline1)
plot_bounds(ax, mline1)
plot_lines(ax, mline1)
ax.set_title('a) simple')
set_limits(ax, -1, 3, -1, 3)
#2: invalid self-touching ring
ax = fig.add_subplot(122)
mline2 = MultiLineString([((0, 0), (1, 1), (1.5, 1)), ((0, 2), (1, 1.5), (1.5, 1), (2, 0))])
plot_coords(ax, mline2)
plot_bounds(ax, mline2)
plot_lines(ax, mline2)
ax.set_title('b) complex')
set_limits(ax, -1, 3, -1, 3)
pyplot.show()
|
gpl-2.0
|
rubikloud/scikit-learn
|
examples/text/hashing_vs_dict_vectorizer.py
|
284
|
3265
|
"""
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
|
bsd-3-clause
|
airbnb/airflow
|
tests/providers/elasticsearch/hooks/test_elasticsearch.py
|
7
|
3562
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from airflow.models import Connection
from airflow.providers.elasticsearch.hooks.elasticsearch import ElasticsearchHook
class TestElasticsearchHookConn(unittest.TestCase):
def setUp(self):
super().setUp()
self.connection = Connection(host='localhost', port=9200, schema='http')
class UnitTestElasticsearchHook(ElasticsearchHook):
conn_name_attr = 'elasticsearch_conn_id'
self.db_hook = UnitTestElasticsearchHook()
self.db_hook.get_connection = mock.Mock()
self.db_hook.get_connection.return_value = self.connection
@mock.patch('airflow.providers.elasticsearch.hooks.elasticsearch.connect')
def test_get_conn(self, mock_connect):
self.db_hook.test_conn_id = 'non_default' # pylint: disable=attribute-defined-outside-init
self.db_hook.get_conn()
mock_connect.assert_called_with(host='localhost', port=9200, scheme='http', user=None, password=None)
class TestElasticsearchHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.cur = mock.MagicMock()
self.conn = mock.MagicMock()
self.conn.cursor.return_value = self.cur
conn = self.conn
class UnitTestElasticsearchHook(ElasticsearchHook):
conn_name_attr = 'test_conn_id'
def get_conn(self):
return conn
self.db_hook = UnitTestElasticsearchHook()
def test_get_first_record(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchone.return_value = result_sets[0]
self.assertEqual(result_sets[0], self.db_hook.get_first(statement))
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_records(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchall.return_value = result_sets
self.assertEqual(result_sets, self.db_hook.get_records(statement))
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_pandas_df(self):
statement = 'SQL'
column = 'col'
result_sets = [('row1',), ('row2',)]
self.cur.description = [(column,)]
self.cur.fetchall.return_value = result_sets
df = self.db_hook.get_pandas_df(statement)
self.assertEqual(column, df.columns[0])
self.assertEqual(result_sets[0][0], df.values.tolist()[0][0])
self.assertEqual(result_sets[1][0], df.values.tolist()[1][0])
self.cur.execute.assert_called_once_with(statement)
|
apache-2.0
|
vinodkc/spark
|
python/pyspark/pandas/exceptions.py
|
15
|
5003
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Exceptions/Errors used in pandas-on-Spark.
"""
from typing import Optional
class DataError(Exception):
pass
class SparkPandasIndexingError(Exception):
pass
def code_change_hint(pandas_function: Optional[str], spark_target_function: Optional[str]) -> str:
if pandas_function is not None and spark_target_function is not None:
return "You are trying to use pandas function {}, use spark function {}".format(
pandas_function, spark_target_function
)
elif pandas_function is not None and spark_target_function is None:
return (
"You are trying to use pandas function {}, checkout the spark "
"user guide to find a relevant function"
).format(pandas_function)
elif pandas_function is None and spark_target_function is not None:
return "Use spark function {}".format(spark_target_function)
else: # both none
return "Checkout the spark user guide to find a relevant function"
class SparkPandasNotImplementedError(NotImplementedError):
def __init__(
self,
pandas_function: Optional[str] = None,
spark_target_function: Optional[str] = None,
description: str = "",
):
self.pandas_source = pandas_function
self.spark_target = spark_target_function
hint = code_change_hint(pandas_function, spark_target_function)
if len(description) > 0:
description += " " + hint
else:
description = hint
super().__init__(description)
class PandasNotImplementedError(NotImplementedError):
def __init__(
self,
class_name: str,
method_name: Optional[str] = None,
arg_name: Optional[str] = None,
property_name: Optional[str] = None,
deprecated: bool = False,
reason: str = "",
):
assert (method_name is None) != (property_name is None)
self.class_name = class_name
self.method_name = method_name
self.arg_name = arg_name
if method_name is not None:
if arg_name is not None:
msg = "The method `{0}.{1}()` does not support `{2}` parameter. {3}".format(
class_name, method_name, arg_name, reason
)
else:
if deprecated:
msg = (
"The method `{0}.{1}()` is deprecated in pandas and will therefore "
+ "not be supported in pandas-on-Spark. {2}"
).format(class_name, method_name, reason)
else:
if reason == "":
reason = " yet."
else:
reason = ". " + reason
msg = "The method `{0}.{1}()` is not implemented{2}".format(
class_name, method_name, reason
)
else:
if deprecated:
msg = (
"The property `{0}.{1}()` is deprecated in pandas and will therefore "
+ "not be supported in pandas-on-Spark. {2}"
).format(class_name, property_name, reason)
else:
if reason == "":
reason = " yet."
else:
reason = ". " + reason
msg = "The property `{0}.{1}()` is not implemented{2}".format(
class_name, property_name, reason
)
super().__init__(msg)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.exceptions
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.exceptions.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.exceptions tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.exceptions,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
drammock/mne-python
|
mne/decoding/ems.py
|
12
|
7624
|
# Author: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
from collections import Counter
import numpy as np
from .mixin import TransformerMixin, EstimatorMixin
from .base import _set_cv
from ..io.pick import _picks_to_idx
from ..parallel import parallel_func
from ..utils import logger, verbose
from .. import pick_types, pick_info
class EMS(TransformerMixin, EstimatorMixin):
"""Transformer to compute event-matched spatial filters.
This version of EMS :footcite:`SchurgerEtAl2013` operates on the entire
time course. No time
window needs to be specified. The result is a spatial filter at each
time point and a corresponding time course. Intuitively, the result
gives the similarity between the filter at each time point and the
data vector (sensors) at that time point.
.. note:: EMS only works for binary classification.
Attributes
----------
filters_ : ndarray, shape (n_channels, n_times)
The set of spatial filters.
classes_ : ndarray, shape (n_classes,)
The target classes.
References
----------
.. footbibliography::
"""
def __repr__(self): # noqa: D105
if hasattr(self, 'filters_'):
return '<EMS: fitted with %i filters on %i classes.>' % (
len(self.filters_), len(self.classes_))
else:
return '<EMS: not fitted.>'
def fit(self, X, y):
"""Fit the spatial filters.
.. note : EMS is fitted on data normalized by channel type before the
fitting of the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The training data.
y : array of int, shape (n_epochs)
The target classes.
Returns
-------
self : instance of EMS
Returns self.
"""
classes = np.unique(y)
if len(classes) != 2:
raise ValueError('EMS only works for binary classification.')
self.classes_ = classes
filters = X[y == classes[0]].mean(0) - X[y == classes[1]].mean(0)
filters /= np.linalg.norm(filters, axis=0)[None, :]
self.filters_ = filters
return self
def transform(self, X):
"""Transform the data by the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The input data.
Returns
-------
X : array, shape (n_epochs, n_times)
The input data transformed by the spatial filters.
"""
Xt = np.sum(X * self.filters_, axis=1)
return Xt
@verbose
def compute_ems(epochs, conditions=None, picks=None, n_jobs=1, cv=None,
verbose=None):
"""Compute event-matched spatial filter on epochs.
This version of EMS :footcite:`SchurgerEtAl2013` operates on the entire
time course. No time
window needs to be specified. The result is a spatial filter at each
time point and a corresponding time course. Intuitively, the result
gives the similarity between the filter at each time point and the
data vector (sensors) at that time point.
.. note : EMS only works for binary classification.
.. note : The present function applies a leave-one-out cross-validation,
following Schurger et al's paper. However, we recommend using
a stratified k-fold cross-validation. Indeed, leave-one-out tends
to overfit and cannot be used to estimate the variance of the
prediction within a given fold.
.. note : Because of the leave-one-out, this function needs an equal
number of epochs in each of the two conditions.
Parameters
----------
epochs : instance of mne.Epochs
The epochs.
conditions : list of str | None, default None
If a list of strings, strings must match the epochs.event_id's key as
well as the number of conditions supported by the objective_function.
If None keys in epochs.event_id are used.
%(picks_good_data)s
%(n_jobs)s
cv : cross-validation object | str | None, default LeaveOneOut
The cross-validation scheme.
%(verbose)s
Returns
-------
surrogate_trials : ndarray, shape (n_trials // 2, n_times)
The trial surrogates.
mean_spatial_filter : ndarray, shape (n_channels, n_times)
The set of spatial filters.
conditions : ndarray, shape (n_classes,)
The conditions used. Values correspond to original event ids.
References
----------
.. footbibliography::
"""
logger.info('...computing surrogate time series. This can take some time')
# Default to leave-one-out cv
cv = 'LeaveOneOut' if cv is None else cv
picks = _picks_to_idx(epochs.info, picks)
if not len(set(Counter(epochs.events[:, 2]).values())) == 1:
raise ValueError('The same number of epochs is required by '
'this function. Please consider '
'`epochs.equalize_event_counts`')
if conditions is None:
conditions = epochs.event_id.keys()
epochs = epochs.copy()
else:
epochs = epochs[conditions]
epochs.drop_bad()
if len(conditions) != 2:
raise ValueError('Currently this function expects exactly 2 '
'conditions but you gave me %i' %
len(conditions))
ev = epochs.events[:, 2]
# Special care to avoid path dependent mappings and orders
conditions = list(sorted(conditions))
cond_idx = [np.where(ev == epochs.event_id[k])[0] for k in conditions]
info = pick_info(epochs.info, picks)
data = epochs.get_data(picks=picks)
# Scale (z-score) the data by channel type
# XXX the z-scoring is applied outside the CV, which is not standard.
for ch_type in ['mag', 'grad', 'eeg']:
if ch_type in epochs:
# FIXME should be applied to all sort of data channels
if ch_type == 'eeg':
this_picks = pick_types(info, meg=False, eeg=True)
else:
this_picks = pick_types(info, meg=ch_type, eeg=False)
data[:, this_picks] /= np.std(data[:, this_picks])
# Setup cross-validation. Need to use _set_cv to deal with sklearn
# deprecation of cv objects.
y = epochs.events[:, 2]
_, cv_splits = _set_cv(cv, 'classifier', X=y, y=y)
parallel, p_func, _ = parallel_func(_run_ems, n_jobs=n_jobs)
# FIXME this parallelization should be removed.
# 1) it's numpy computation so it's already efficient,
# 2) it duplicates the data in RAM,
# 3) the computation is already super fast.
out = parallel(p_func(_ems_diff, data, cond_idx, train, test)
for train, test in cv_splits)
surrogate_trials, spatial_filter = zip(*out)
surrogate_trials = np.array(surrogate_trials)
spatial_filter = np.mean(spatial_filter, axis=0)
return surrogate_trials, spatial_filter, epochs.events[:, 2]
def _ems_diff(data0, data1):
"""Compute the default diff objective function."""
return np.mean(data0, axis=0) - np.mean(data1, axis=0)
def _run_ems(objective_function, data, cond_idx, train, test):
"""Run EMS."""
d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx))
d /= np.sqrt(np.sum(d ** 2, axis=0))[None, :]
# compute surrogates
return np.sum(data[test[0]] * d, axis=0), d
|
bsd-3-clause
|
junwucs/h2o-3
|
h2o-py/tests/testdir_algos/rf/pyunit_smallcatRF.py
|
5
|
1845
|
import sys
sys.path.insert(1, "../../../")
import h2o, tests
import numpy as np
from sklearn import ensemble
from sklearn.metrics import roc_auc_score
def smallcatRF():
# Training set has 26 categories from A to Z
# Categories A, C, E, G, ... are perfect predictors of y = 1
# Categories B, D, F, H, ... are perfect predictors of y = 0
#Log.info("Importing alphabet_cattest.csv data...\n")
alphabet = h2o.import_file(path=h2o.locate("smalldata/gbm_test/alphabet_cattest.csv"))
alphabet["y"] = alphabet["y"].asfactor()
#Log.info("Summary of alphabet_cattest.csv from H2O:\n")
#alphabet.summary()
# Prepare data for scikit use
trainData = np.loadtxt(h2o.locate("smalldata/gbm_test/alphabet_cattest.csv"), delimiter=',', skiprows=1,
converters={0:lambda s: ord(s.split("\"")[1])})
trainDataResponse = trainData[:,1]
trainDataFeatures = trainData[:,0]
# Train H2O GBM Model:
#Log.info("H2O GBM (Naive Split) with parameters:\nntrees = 1, max_depth = 1, nbins = 100\n")
rf_h2o = h2o.random_forest(x=alphabet[['X']], y=alphabet["y"], ntrees=1, max_depth=1, nbins=100)
# Train scikit GBM Model:
# Log.info("scikit GBM with same parameters:")
rf_sci = ensemble.RandomForestClassifier(n_estimators=1, criterion='entropy', max_depth=1)
rf_sci.fit(trainDataFeatures[:,np.newaxis],trainDataResponse)
# h2o
rf_perf = rf_h2o.model_performance(alphabet)
auc_h2o = rf_perf.auc()
# scikit
auc_sci = roc_auc_score(trainDataResponse, rf_sci.predict_proba(trainDataFeatures[:,np.newaxis])[:,1])
#Log.info(paste("scikit AUC:", auc_sci, "\tH2O AUC:", auc_h2o))
assert auc_h2o >= auc_sci, "h2o (auc) performance degradation, with respect to scikit"
if __name__ == "__main__":
tests.run_test(sys.argv, smallcatRF)
|
apache-2.0
|
anirudhjayaraman/scikit-learn
|
examples/cluster/plot_kmeans_stability_low_dim_dense.py
|
338
|
4324
|
"""
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
|
bsd-3-clause
|
CroatianMeteorNetwork/RMS
|
RMS/Formats/Platepar.py
|
1
|
57061
|
""" CMN-style astrometric calibration.
"""
# The MIT License
# Copyright (c) 2016 Denis Vida
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function, division, absolute_import
import os
import json
import copy
import datetime
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize
from RMS.Astrometry.Conversions import date2JD, jd2Date, trueRaDec2ApparentAltAz
import RMS.Astrometry.ApplyAstrometry
from RMS.Math import angularSeparation, sphericalPointFromHeadingAndDistance
# Import Cython functions
import pyximport
pyximport.install(setup_args={'include_dirs':[np.get_include()]})
from RMS.Astrometry.CyFunctions import cyTrueRaDec2ApparentAltAz, cyApparentAltAz2TrueRADec, \
pyRefractionTrueToApparent
class stationData(object):
""" Holds information about one meteor station (location) and observed points.
"""
def __init__(self, file_name):
self.file_name = file_name
self.station_code = ''
self.lon = 0
self.lat = 0
self.h = 0
self.points = []
def __str__(self):
return 'Station: ' + self.station_code + ' data points: ' + str(len(self.points))
def parseInf(file_name):
""" Parse information from an INF file to a stationData object.
"""
station_data_obj = stationData(file_name)
with open(file_name) as f:
for line in f.readlines()[2:]:
line = line.split()
if 'Station_Code' in line[0]:
station_data_obj.station_code = line[1]
elif 'Long' in line[0]:
station_data_obj.lon = float(line[1])
elif 'Lati' in line[0]:
station_data_obj.lat = float(line[1])
elif 'Height' in line[0]:
station_data_obj.h = int(line[1])
else:
station_data_obj.points.append(map(float, line))
return station_data_obj
def getCatalogStarsImagePositions(catalog_stars, jd, platepar):
""" Get image positions of catalog stars using the current platepar values.
Arguments:
catalog_stars: [2D list] A list of (ra, dec, mag) pairs of catalog stars.
jd: [float] Julian date for transformation.
platepar: [Platepar]
Return:
(x_array, y_array mag_catalog): [tuple of ndarrays] X, Y positons and magnitudes of stars on the
image.
"""
ra_catalog, dec_catalog, mag_catalog = catalog_stars.T
# Convert star RA, Dec to image coordinates
x_array, y_array = RMS.Astrometry.ApplyAstrometry.raDecToXYPP(ra_catalog, dec_catalog, jd, platepar)
return x_array, y_array, mag_catalog
def getPairedStarsSkyPositions(img_x, img_y, jd, platepar):
""" Compute RA, Dec of all paired stars on the image given the platepar.
Arguments:
img_x: [ndarray] Array of column values of the stars.
img_y: [ndarray] Array of row values of the stars.
jd: [float] Julian date for transformation.
platepar: [Platepar instance] Platepar object.
Return:
(ra_array, dec_array): [tuple of ndarrays] Arrays of RA and Dec of stars on the image.
"""
# Compute RA, Dec of image stars
img_time = jd2Date(jd)
_, ra_array, dec_array, _ = RMS.Astrometry.ApplyAstrometry.xyToRaDecPP(len(img_x)*[img_time], img_x, \
img_y, len(img_x)*[1], platepar, extinction_correction=False)
return ra_array, dec_array
class Platepar(object):
def __init__(self, distortion_type="poly3+radial"):
""" Astrometric and photometric calibration plate parameters. Several distortion types are supported.
Arguments:
file_name: [string] Path to the platepar file.
Keyword arguments:
distortion_type: [str] Distortion type. It can be one of the following:
- "poly3+radial" - 3rd order polynomial fit including a single radial term
- "poly3+radial3" - 3rd order polynomial fit including two radial terms (r + r^3)
- "radial3-all" - 3rd order radial distortion, all powers
- "radial4-all" - 4rd order radial distortion, all powers
- "radial5-all" - 5rd order radial distortion, all powers
- "radial3-odd" - 3rd order radial distortion, only odd powers
- "radial5-odd" - 5th order radial distortion, only odd powers
- "radial7-odd" - 7th order radial distortion, only odd powers
- "radial9-odd" - 7th order radial distortion, only odd powers
Return:
self: [object] Instance of this class with loaded platepar parameters.
"""
self.version = 2
# Set the distortion type
self.distortion_type = distortion_type
self.setDistortionType(self.distortion_type)
# Station coordinates
self.lat = self.lon = self.elev = 0
# Reference time and date
self.time = 0
self.JD = 2451545.0
# UT correction
self.UT_corr = 0
self.Ho = 0
self.X_res = 1280
self.Y_res = 720
self.fov_h = 88
self.fov_v = 45
# FOV centre
self.RA_d = 0
self.dec_d = 0
self.pos_angle_ref = 0
self.rotation_from_horiz = 0
self.az_centre = 0
self.alt_centre = 0
# FOV scale (px/deg)
self.F_scale = 1.0
# Refraction on/off
self.refraction = True
# If the calibration was done without then refraction and points on the sky are measured, then they
# need to be corrected for refraction. WARNING: This should not be used if the distortion model
# itself compensates for the refraction (e.g. the polynomial model)
self.measurement_apparent_to_true_refraction = False
# Equal aspect (X and Y scales are equal) - used ONLY for radial distortion
self.equal_aspect = True
# Force distortion centre to image centre
self.force_distortion_centre = False
# Asymmetry correction - used ONLY for radial distortion
self.asymmetry_corr = False
# Photometry calibration
self.mag_0 = -2.5
self.mag_lev = 1.0
self.mag_lev_stddev = 0.0
self.gamma = 1.0
self.vignetting_coeff = 0.0
# Extinction correction scaling
self.extinction_scale = 1.0
self.station_code = "None"
self.star_list = None
# Flag to indicate that the platepar was refined with CheckFit
self.auto_check_fit_refined = False
# Flag to indicate that the platepar was successfuly auto recalibrated on an individual FF files
self.auto_recalibrated = False
# Init the distortion parameters
self.resetDistortionParameters()
def resetDistortionParameters(self, preserve_centre=False):
""" Set the distortion parameters to zero.
Keyword arguments:
preserve_centre: [bool] Don't reset the distortion centre. False by default, in which case it will
be reset.
"""
# Store the distortion centre if it needs to be preserved
if preserve_centre:
# Preserve centre for the radial distortion
if self.distortion_type.startswith("radial"):
# Note that the radial distortion parameters are kept in the X poly array
x_centre_fwd, y_centre_fwd = self.x_poly_fwd[0], self.x_poly_fwd[1]
x_centre_rev, y_centre_rev = self.x_poly_rev[0], self.x_poly_rev[1]
else:
# Preserve centre for the polynomial distortion
x_centre_fwd, x_centre_rev = self.x_poly_fwd[0], self.x_poly_rev[0]
y_centre_fwd, y_centre_rev = self.y_poly_fwd[0], self.y_poly_rev[0]
# Reset distortion fit (forward and reverse)
self.x_poly_fwd = np.zeros(shape=(self.poly_length,), dtype=np.float64)
self.y_poly_fwd = np.zeros(shape=(self.poly_length,), dtype=np.float64)
self.x_poly_rev = np.zeros(shape=(self.poly_length,), dtype=np.float64)
self.y_poly_rev = np.zeros(shape=(self.poly_length,), dtype=np.float64)
# Preserve the image centre
if preserve_centre:
# Preserve centre for the radial distortion
if self.distortion_type.startswith("radial") and (not self.force_distortion_centre):
# Note that the radial distortion parameters are kept in the X poly array
self.x_poly_fwd[0], self.x_poly_fwd[1] = x_centre_fwd, y_centre_fwd
self.x_poly_rev[0], self.x_poly_rev[1] = x_centre_rev, y_centre_rev
else:
# Preserve centre for the polynomial distortion
self.x_poly_fwd[0], self.x_poly_rev[0] = x_centre_fwd, x_centre_rev
self.y_poly_fwd[0], self.y_poly_rev[0] = y_centre_fwd, y_centre_rev
# Reset the image centre
else:
# Set the first coeffs to 0.5, as that is the real centre of the FOV
self.x_poly_fwd[0] = 0.5
self.y_poly_fwd[0] = 0.5
self.x_poly_rev[0] = 0.5
self.y_poly_rev[0] = 0.5
# If the distortion is radial, set the second X parameter to 0.5, as x_poly[1] is used for the Y
# offset in the radial models
if self.distortion_type.startswith("radial"):
self.x_poly_fwd[0] /= self.X_res/2
self.x_poly_rev[0] /= self.X_res/2
self.x_poly_fwd[1] = 0.5/(self.Y_res/2)
self.x_poly_rev[1] = 0.5/(self.Y_res/2)
# If the distortion center is forced to the center of the image, reset all parameters to zero
if self.force_distortion_centre:
self.x_poly_fwd *= 0
self.x_poly_rev *= 0
self.x_poly = self.x_poly_fwd
self.y_poly = self.y_poly_fwd
def setDistortionType(self, distortion_type, reset_params=True):
""" Sets the distortion type. """
# List of distortion types
self.distortion_type_list = [
"poly3+radial", \
"poly3+radial3", \
"poly3+radial5", \
"radial3-all", \
"radial4-all", \
"radial5-all", \
"radial3-odd", \
"radial5-odd", \
"radial7-odd", \
"radial9-odd", \
]
# Lenghts of full polynomials, (including distortion center, aspect, and asymmetry correction for
# radial distortions)
self.distortion_type_poly_length = [
12, 13, 14, 7, 8, 9, 6, 7, 8, 9
]
# Set the length of the distortion polynomial depending on the distortion type
if distortion_type in self.distortion_type_list:
# If the new distortion type (poly vs radial) is different from the old, reset the parameters
if (distortion_type[:4] != self.distortion_type[:4]):
reset_params = True
# If the all vs odd only radial powers type is changed, reset the distortion
if distortion_type.startswith("radial"):
if (distortion_type[-3:] != self.distortion_type[-3:]):
reset_params = True
self.distortion_type = distortion_type
# Get the polynomial length
self.poly_length = self.distortion_type_poly_length[self.distortion_type_list.index(distortion_type)]
# Remove distortion center for radial distortions if it's not used
if distortion_type.startswith("radial"):
if self.force_distortion_centre:
self.poly_length -= 2
# Remove aspect parameter for radial distortions if it's not used
if distortion_type.startswith("radial"):
if self.equal_aspect:
self.poly_length -= 1
# Remove asymmetry correction parameters for radial distortions if they are not used
if distortion_type.startswith("radial"):
if not self.asymmetry_corr:
self.poly_length -= 2
else:
raise ValueError("The distortion type is not recognized: {:s}".format(self.distortion_type))
# Reset distortion parameters
if reset_params:
self.resetDistortionParameters()
# Set the correct polynomial size
self.padDictParams()
def addVignettingCoeff(self, use_flat):
""" Add a vignetting coeff to the platepar if it doesn't have one.
Arguments:
use_flat: [bool] Is the flat used or not.
"""
# Add a vignetting coefficient if it's not set
if self.vignetting_coeff is None:
# Only add it if a flat is not used
if use_flat:
self.vignetting_coeff = 0.0
else:
# Use 0.001 rad/px as the default coefficeint, as that's the one for 3.6 mm f/0.95 and 16 mm
# f/1.0 lenses. The vignetting coeff is dependent on the resolution, the default value of
# 0.001 rad/px is for 720p.
self.vignetting_coeff = 0.001*np.hypot(1280, 720)/np.hypot(self.X_res, self.Y_res)
def fitPointing(self, jd, img_stars, catalog_stars, fixed_scale=False):
""" Fit pointing parameters to the list of star image and celectial catalog coordinates.
At least 4 stars are needed to fit the rigid body parameters.
New parameters are saved to the given object (self).
Arguments:
jd: [float] Julian date of the image.
img_stars: [list] A list of (x, y, intensity_sum) entires for every star.
catalog_stars: [list] A list of (ra, dec, mag) entries for every star (degrees).
Keyword arguments:
fixed_scale: [bool] Keep the scale fixed. False by default.
"""
def _calcImageResidualsAstro(params, platepar, jd, catalog_stars, img_stars):
""" Calculates the differences between the stars on the image and catalog stars in image
coordinates with the given astrometrical solution.
"""
# Extract fitting parameters
ra_ref, dec_ref, pos_angle_ref = params[:3]
if not fixed_scale:
F_scale = params[3]
img_x, img_y, _ = img_stars.T
pp_copy = copy.deepcopy(platepar)
# Assign guessed parameters
pp_copy.RA_d = ra_ref
pp_copy.dec_d = dec_ref
pp_copy.pos_angle_ref = pos_angle_ref
if not fixed_scale:
pp_copy.F_scale = abs(F_scale)
# Get image coordinates of catalog stars
catalog_x, catalog_y, catalog_mag = getCatalogStarsImagePositions(catalog_stars, jd, pp_copy)
# Calculate the sum of squared distances between image stars and catalog stars
dist_sum = np.sum((catalog_x - img_x)**2 + (catalog_y - img_y)**2)
return dist_sum
def _calcSkyResidualsAstro(params, platepar, jd, catalog_stars, img_stars):
""" Calculates the differences between the stars on the image and catalog stars in sky
coordinates with the given astrometrical solution.
"""
# Extract fitting parameters
ra_ref, dec_ref, pos_angle_ref = params[:3]
if not fixed_scale:
F_scale = params[3]
img_x, img_y, _ = img_stars.T
pp_copy = copy.deepcopy(platepar)
# Assign guessed parameters
pp_copy.RA_d = ra_ref
pp_copy.dec_d = dec_ref
pp_copy.pos_angle_ref = pos_angle_ref
if not fixed_scale:
pp_copy.F_scale = abs(F_scale)
img_x, img_y, _ = img_stars.T
# Get image coordinates of catalog stars
ra_array, dec_array = getPairedStarsSkyPositions(img_x, img_y, jd, pp_copy)
ra_catalog, dec_catalog, _ = catalog_stars.T
# Compute the sum of the angular separation
separation_sum = np.sum(angularSeparation(np.radians(ra_array), np.radians(dec_array), \
np.radians(ra_catalog), np.radians(dec_catalog))**2)
return separation_sum
# Initial parameters for the astrometric fit
p0 = [self.RA_d, self.dec_d, self.pos_angle_ref]
# Add fitting scale if not fixed
if not fixed_scale:
p0 += [abs(self.F_scale)]
# Fit the astrometric parameters using the reverse transform for reference
res = scipy.optimize.minimize(_calcImageResidualsAstro, p0, \
args=(self, jd, catalog_stars, img_stars), method='SLSQP')
# # Fit the astrometric parameters using the forward transform for reference
# WARNING: USING THIS MAKES THE FIT UNSTABLE
# res = scipy.optimize.minimize(_calcSkyResidualsAstro, p0, args=(self, jd, \
# catalog_stars, img_stars), method='Nelder-Mead')
# Update fitted astrometric parameters
self.RA_d, self.dec_d, self.pos_angle_ref = res.x[:3]
if not fixed_scale:
self.F_scale = res.x[3]
# Force scale to be positive
self.F_scale = abs(self.F_scale)
# Update alt/az of pointing
self.updateRefAltAz()
def fitAstrometry(self, jd, img_stars, catalog_stars, first_platepar_fit=False, fit_only_pointing=False,
fixed_scale=False):
""" Fit astrometric parameters to the list of star image and celectial catalog coordinates.
At least 4 stars are needed to fit the rigid body parameters.
New parameters are saved to the given object (self).
Arguments:
jd: [float] Julian date of the image.
img_stars: [list] A list of (x, y, intensity_sum) entires for every star.
catalog_stars: [list] A list of (ra, dec, mag) entries for every star (degrees).
Keyword arguments:
first_platepar_fit: [bool] Fit a platepar from scratch. False by default.
fit_only_pointing: [bool] Only fit the pointing parameters, and not distortion.
fixed_scale: [bool] Keep the scale fixed. False by default.
"""
def _calcImageResidualsDistortion(params, platepar, jd, catalog_stars, img_stars, dimension):
""" Calculates the differences between the stars on the image and catalog stars in image
coordinates with the given astrometrical solution.
Arguments:
...
dimension: [str] 'x' for X polynomial fit, 'y' for Y polynomial fit
"""
# Set distortion parameters
pp_copy = copy.deepcopy(platepar)
if (dimension == 'x') or (dimension == 'radial'):
pp_copy.x_poly_rev = params
pp_copy.y_poly_rev = np.zeros(platepar.poly_length)
else:
pp_copy.x_poly_rev = np.zeros(platepar.poly_length)
pp_copy.y_poly_rev = params
img_x, img_y, _ = img_stars.T
# Get image coordinates of catalog stars
catalog_x, catalog_y, catalog_mag = getCatalogStarsImagePositions(catalog_stars, jd, pp_copy)
# Calculate the sum of squared distances between image stars and catalog stars, per every
# dimension
if dimension == 'x':
dist_sum = np.sum((catalog_x - img_x)**2)
elif dimension == 'y':
dist_sum = np.sum((catalog_y - img_y)**2)
# Minimization for the radial distortion
else:
# Compute the image fit error
dist_sum = np.sum((catalog_x - img_x)**2 + (catalog_y - img_y)**2)
return dist_sum
# Modify the residuals function so that it takes a list of arguments
def _calcImageResidualsDistortionListArguments(params, *args, **kwargs):
return [_calcImageResidualsDistortion(param_line, *args, **kwargs) for param_line in params]
def _calcSkyResidualsDistortion(params, platepar, jd, catalog_stars, img_stars, dimension):
""" Calculates the differences between the stars on the image and catalog stars in sky
coordinates with the given astrometrical solution.
Arguments:
...
dimension: [str] 'x' for X polynomial fit, 'y' for Y polynomial fit
"""
pp_copy = copy.deepcopy(platepar)
if (dimension == 'x') or (dimension == 'radial'):
pp_copy.x_poly_fwd = params
else:
pp_copy.y_poly_fwd = params
img_x, img_y, _ = img_stars.T
# Get image coordinates of catalog stars
ra_array, dec_array = getPairedStarsSkyPositions(img_x, img_y, jd, pp_copy)
ra_catalog, dec_catalog, _ = catalog_stars.T
# Compute the sum of the angular separation
separation_sum = np.sum(angularSeparation(np.radians(ra_array), np.radians(dec_array), \
np.radians(ra_catalog), np.radians(dec_catalog))**2)
return separation_sum
# Modify the residuals function so that it takes a list of arguments
def _calcSkyResidualsDistortionListArguments(params, *args, **kwargs):
return [_calcSkyResidualsDistortion(param_line, *args, **kwargs) for param_line in params]
def _calcImageResidualsAstroAndDistortionRadial(params, platepar, jd, catalog_stars, img_stars):
""" Calculates the differences between the stars on the image and catalog stars in image
coordinates with the given astrometrical solution. Pointing and distortion paramters are used
in the fit.
Arguments:
...
dimension: [str] 'x' for X polynomial fit, 'y' for Y polynomial fit
"""
# Set distortion parameters
pp_copy = copy.deepcopy(platepar)
# Unpack pointing parameters and assign to the copy of platepar used for the fit
ra_ref, dec_ref, pos_angle_ref, F_scale = params[:4]
pp_copy = copy.deepcopy(platepar)
# Unnormalize the pointing parameters
pp_copy.RA_d = (360*ra_ref)%(360)
pp_copy.dec_d = -90 + (90*dec_ref + 90)%(180.000001)
pp_copy.pos_angle_ref = (360*pos_angle_ref)%(360)
pp_copy.F_scale = abs(F_scale)
# Assign distortion parameters
pp_copy.x_poly_rev = params[4:]
img_x, img_y, _ = img_stars.T
# Get image coordinates of catalog stars
catalog_x, catalog_y, catalog_mag = getCatalogStarsImagePositions(catalog_stars, jd, pp_copy)
# Calculate the sum of squared distances between image stars and catalog stars
dist_sum = np.sum((catalog_x - img_x)**2 + (catalog_y - img_y)**2)
return dist_sum
def _calcSkyResidualsAstroAndDistortionRadial(params, platepar, jd, catalog_stars, img_stars):
""" Calculates the differences between the stars on the image and catalog stars in celestial
coordinates with the given astrometrical solution. Pointing and distortion paramters are used
in the fit.
"""
# Set distortion parameters
pp_copy = copy.deepcopy(platepar)
# Unpack pointing parameters and assign to the copy of platepar used for the fit
ra_ref, dec_ref, pos_angle_ref, F_scale = params[:4]
pp_copy = copy.deepcopy(platepar)
# Unnormalize the pointing parameters
pp_copy.RA_d = (360*ra_ref)%(360)
pp_copy.dec_d = -90 + (90*dec_ref + 90)%(180.000001)
pp_copy.pos_angle_ref = (360*pos_angle_ref)%(360)
pp_copy.F_scale = abs(F_scale)
# Assign distortion parameters
pp_copy.x_poly_fwd = params[4:]
img_x, img_y, _ = img_stars.T
# Get image coordinates of catalog stars
ra_array, dec_array = getPairedStarsSkyPositions(img_x, img_y, jd, pp_copy)
ra_catalog, dec_catalog, _ = catalog_stars.T
# Compute the sum of the angular separation
separation_sum = np.sum(angularSeparation(np.radians(ra_array), np.radians(dec_array), \
np.radians(ra_catalog), np.radians(dec_catalog))**2)
return separation_sum
# print('ASTRO', _calcImageResidualsAstro([self.RA_d, self.dec_d,
# self.pos_angle_ref, self.F_scale], catalog_stars, img_stars))
# print('DIS_X', _calcImageResidualsDistortion(self.x_poly_rev, catalog_stars, \
# img_stars, 'x'))
# print('DIS_Y', _calcImageResidualsDistortion(self.y_poly_rev, catalog_stars, \
# img_stars, 'y'))
### ASTROMETRIC PARAMETERS FIT ###
# Fit the pointing parameters (RA, Dec, rotation, scale)
# Only do the fit for the polynomial distortion model, or the first time if the radial distortion
# is used
if self.distortion_type.startswith("poly") \
or (not self.distortion_type.startswith("poly") and first_platepar_fit) or fit_only_pointing:
self.fitPointing(jd, img_stars, catalog_stars, fixed_scale=fixed_scale)
### ###
### DISTORTION FIT ###
# Fit the polynomial distortion parameters if there are enough picked stars
min_fit_stars = self.poly_length + 1
if (len(img_stars) >= min_fit_stars) and (not fit_only_pointing):
# Fit the polynomial distortion
if self.distortion_type.startswith("poly"):
### REVERSE MAPPING FIT ###
# Fit distortion parameters in X direction, reverse mapping
res = scipy.optimize.minimize(_calcImageResidualsDistortion, self.x_poly_rev, \
args=(self, jd, catalog_stars, img_stars, 'x'), method='Nelder-Mead', \
options={'maxiter': 10000, 'adaptive': True})
# Exctact fitted X polynomial
self.x_poly_rev = res.x
# Fit distortion parameters in Y direction, reverse mapping
res = scipy.optimize.minimize(_calcImageResidualsDistortion, self.y_poly_rev, \
args=(self, jd, catalog_stars, img_stars, 'y'), method='Nelder-Mead', \
options={'maxiter': 10000, 'adaptive': True})
# Extract fitted Y polynomial
self.y_poly_rev = res.x
### ###
# If this is the first fit of the distortion, set the forward parametrs to be equal to the reverse
if first_platepar_fit:
self.x_poly_fwd = np.array(self.x_poly_rev)
self.y_poly_fwd = np.array(self.y_poly_rev)
### FORWARD MAPPING FIT ###
# Fit distortion parameters in X direction, forward mapping
res = scipy.optimize.minimize(_calcSkyResidualsDistortion, self.x_poly_fwd, \
args=(self, jd, catalog_stars, img_stars, 'x'), method='Nelder-Mead', \
options={'maxiter': 10000, 'adaptive': True})
# Extract fitted X polynomial
self.x_poly_fwd = res.x
# Fit distortion parameters in Y direction, forward mapping
res = scipy.optimize.minimize(_calcSkyResidualsDistortion, self.y_poly_fwd, \
args=(self, jd, catalog_stars, img_stars, 'y'), method='Nelder-Mead', \
options={'maxiter': 10000, 'adaptive': True})
# IMPORTANT NOTE - the X polynomial is used to store the fit paramters
self.y_poly_fwd = res.x
### ###
# Fit radial distortion (+ pointing)
else:
### FORWARD MAPPING FIT ###
# # Fit the radial distortion - the X polynomial is used to store the fit paramters
# res = scipy.optimize.minimize(_calcSkyResidualsDistortion, self.x_poly_fwd, \
# args=(self, jd, catalog_stars, img_stars, 'radial'), method='Nelder-Mead', \
# options={'maxiter': 10000, 'adaptive': True})
# # Extract distortion parameters, IMPORTANT NOTE - the X polynomial is used to store the
# # fit paramters
# self.x_poly_fwd = res.x
# Fitting the pointing direction below! - if used, it should be put BEFORE the reverse fit!
# Initial parameters for the pointing and distortion fit (normalize to the 0-1 range)
p0 = [self.RA_d/360, self.dec_d/90, self.pos_angle_ref/360, abs(self.F_scale)]
p0 += self.x_poly_fwd.tolist()
# Fit the radial distortion - the X polynomial is used to store the fit paramters
res = scipy.optimize.minimize(_calcSkyResidualsAstroAndDistortionRadial, p0, \
args=(self, jd, catalog_stars, img_stars), method='Nelder-Mead', \
options={'maxiter': 10000, 'adaptive': True})
# Update fitted astrometric parameters (Unnormalize the pointing parameters)
ra_ref, dec_ref, pos_angle_ref, F_scale = res.x[:4]
self.RA_d = (360*ra_ref)%(360)
self.dec_d = -90 + (90*dec_ref + 90)%(180.000001)
self.pos_angle_ref = (360*pos_angle_ref)%(360)
self.F_scale = abs(F_scale)
self.updateRefAltAz()
# Extract distortion parameters, IMPORTANT NOTE - the X polynomial is used to store the
# fit paramters
self.x_poly_fwd = res.x[4:]
### ###
# If this is the first fit of the distortion, set the forward parametrs to be equal to the reverse
if first_platepar_fit:
self.x_poly_rev = np.array(self.x_poly_fwd)
### REVERSE MAPPING FIT ###
# # Initial parameters for the pointing and distortion fit (normalize to the 0-1 range)
# p0 = [self.RA_d/360.0, self.dec_d/90.0, self.pos_angle_ref/360.0, abs(self.F_scale)]
# p0 += self.x_poly_rev.tolist()
# # Fit the radial distortion - the X polynomial is used to store the fit paramters
# res = scipy.optimize.minimize(_calcImageResidualsAstroAndDistortionRadial, p0, \
# args=(self, jd, catalog_stars, img_stars), method='Nelder-Mead', \
# options={'maxiter': 10000, 'adaptive': True})
# # Update fitted astrometric parameters (Unnormalize the pointing parameters)
# ra_ref, dec_ref, pos_angle_ref, F_scale = res.x[:4]
# self.RA_d = (360*ra_ref)%(360)
# self.dec_d = -90 + (90*dec_ref + 90)%(180.000001)
# self.pos_angle_ref = (360*pos_angle_ref)%(360)
# self.F_scale = abs(F_scale)
# # Compute reference Alt/Az to apparent coordinates, epoch of date
# self.updateRefAltAz()
# # Extract distortion parameters, IMPORTANT NOTE - the X polynomial is used to store the
# # fit paramters
# self.x_poly_rev = res.x[4:]
## Distortion-only fit below!
# Fit the radial distortion - the X polynomial is used to store the fit paramters
res = scipy.optimize.minimize(_calcImageResidualsDistortion, self.x_poly_rev, \
args=(self, jd, catalog_stars, img_stars, 'radial'), method='Nelder-Mead', \
options={'maxiter': 10000, 'adaptive': True})
# Extract distortion parameters, IMPORTANT NOTE - the X polynomial is used to store the
# fit paramters
self.x_poly_rev = res.x
### ###
else:
if (len(img_stars) < min_fit_stars):
print('Too few stars to fit the distortion, only the astrometric parameters where fitted!')
# Set the list of stars used for the fit to the platepar
fit_star_list = []
for img_coords, cat_coords in zip(img_stars, catalog_stars):
# Store time, image coordinate x, y, intensity, catalog ra, dec, mag
fit_star_list.append([jd] + img_coords.tolist() + cat_coords.tolist())
self.star_list = fit_star_list
# Set the flag to indicate that the platepar was manually fitted
self.auto_check_fit_refined = False
self.auto_recalibrated = False
### ###
def parseLine(self, f):
""" Read next line, split the line and convert parameters to float.
@param f: [file handle] file we want to read
@return (a1, a2, ...): [tuple of floats] parsed data from the line
"""
return map(float, f.readline().split())
def padDictParams(self):
""" Update the array length if an old platepar version was loaded which was shorter/longer. """
# Extend the array if it's too short
if self.x_poly_fwd.shape[0] < self.poly_length:
self.x_poly_fwd = np.pad(self.x_poly_fwd, (0, self.poly_length - self.x_poly_fwd.shape[0]), \
'constant', constant_values=0)
self.x_poly_rev = np.pad(self.x_poly_rev, (0, self.poly_length - self.x_poly_rev.shape[0]), \
'constant', constant_values=0)
self.y_poly_fwd = np.pad(self.y_poly_fwd, (0, self.poly_length - self.y_poly_fwd.shape[0]), \
'constant', constant_values=0)
self.y_poly_rev = np.pad(self.y_poly_rev, (0, self.poly_length - self.y_poly_rev.shape[0]), \
'constant', constant_values=0)
# Cut the array if it's too long
if self.x_poly_fwd.shape[0] > self.poly_length:
self.x_poly_fwd = self.x_poly_fwd[:self.poly_length]
self.x_poly_rev = self.x_poly_rev[:self.poly_length]
self.y_poly_fwd = self.y_poly_fwd[:self.poly_length]
self.y_poly_rev = self.y_poly_rev[:self.poly_length]
def loadFromDict(self, platepar_dict, use_flat=None):
""" Load the platepar from a dictionary. """
# Parse JSON into an object with attributes corresponding to dict keys
self.__dict__ = platepar_dict
# Add the version if it was not in the platepar (v1 platepars didn't have a version)
if not 'version' in self.__dict__:
self.version = 1
# If the refraction was not used for the fit, assume it is disabled
if not 'refraction' in self.__dict__:
self.refraction = False
# If the measurement correction for refraction (if it was not taken into account during calibration)
# is not present, assume it's false
if not 'measurement_apparent_to_true_refraction' in self.__dict__:
self.measurement_apparent_to_true_refraction = False
# Add equal aspect
if not 'equal_aspect' in self.__dict__:
self.equal_aspect = False
# Add asymmetry correction
if not 'asymmetry_corr' in self.__dict__:
self.asymmetry_corr = False
# Add forcing distortion centre to image center
if not 'force_distortion_centre' in self.__dict__:
self.force_distortion_centre = False
# Add the distortion type if not present (assume it's the polynomal type with the radial term)
if not 'distortion_type' in self.__dict__:
# Check if the variable with the typo was used and correct it
if 'distortion_type' in self.__dict__:
self.distortion_type = self.distortion_type
del self.distortion_type
# Otherwise, assume the polynomial type
else:
self.distortion_type = "poly3+radial"
# Add UT correction if it was not in the platepar
if not 'UT_corr' in self.__dict__:
self.UT_corr = 0
# Add the gamma if it was not in the platepar
if not 'gamma' in self.__dict__:
self.gamma = 1.0
# Add the vignetting coefficient if it was not in the platepar
if not 'vignetting_coeff' in self.__dict__:
self.vignetting_coeff = None
# Add the default vignetting coeff
self.addVignettingCoeff(use_flat=use_flat)
# Add extinction scale
if not 'extinction_scale' in self.__dict__:
self.extinction_scale = 1.0
# Add the list of calibration stars if it was not in the platepar
if not 'star_list' in self.__dict__:
self.star_list = []
# If v1 only the backward distortion coeffs were fitted, so use load them for both forward and
# reverse if nothing else is available
if not 'x_poly_fwd' in self.__dict__:
self.x_poly_fwd = np.array(self.x_poly)
self.x_poly_rev = np.array(self.x_poly)
self.y_poly_fwd = np.array(self.y_poly)
self.y_poly_rev = np.array(self.y_poly)
# Convert lists to numpy arrays
self.x_poly_fwd = np.array(self.x_poly_fwd)
self.x_poly_rev = np.array(self.x_poly_rev)
self.y_poly_fwd = np.array(self.y_poly_fwd)
self.y_poly_rev = np.array(self.y_poly_rev)
# Set the distortion type
self.setDistortionType(self.distortion_type, reset_params=False)
# Set polynomial parameters used by the old code
self.x_poly = self.x_poly_fwd
self.y_poly = self.y_poly_fwd
# Add rotation from horizontal
if not 'rotation_from_horiz' in self.__dict__:
self.rotation_from_horiz = RMS.Astrometry.ApplyAstrometry.rotationWrtHorizon(self)
# Calculate the datetime
self.time = jd2Date(self.JD, dt_obj=True)
def read(self, file_name, fmt=None, use_flat=None):
""" Read the platepar.
Arguments:
file_name: [str] Path and the name of the platepar to read.
Keyword arguments:
fmt: [str] Format of the platepar file. 'json' for JSON format and 'txt' for the usual CMN textual
format.
use_flat: [bool] Indicates wheter a flat is used or not. None by default.
Return:
fmt: [str]
"""
# Check if platepar exists
if not os.path.isfile(file_name):
return False
# Determine the type of the platepar if it is not given
if fmt is None:
with open(file_name) as f:
data = " ".join(f.readlines())
# Try parsing the file as JSON
try:
json.loads(data)
fmt = 'json'
except:
fmt = 'txt'
# Load the file as JSON
if fmt == 'json':
# Load the JSON file
with open(file_name) as f:
data = " ".join(f.readlines())
# Load the platepar from the JSON dictionary
self.loadFromDict(json.loads(data), use_flat=use_flat)
# Load the file as TXT (old CMN format)
else:
with open(file_name) as f:
self.UT_corr = 0
self.gamma = 1.0
self.star_list = []
# Parse latitude, longitude, elevation
self.lon, self.lat, self.elev = self.parseLine(f)
# Parse date and time as int
D, M, Y, h, m, s = map(int, f.readline().split())
# Calculate the datetime of the platepar time
self.time = datetime.datetime(Y, M, D, h, m, s)
# Convert time to JD
self.JD = date2JD(Y, M, D, h, m, s)
# Calculate the reference hour angle
T = (self.JD - 2451545.0)/36525.0
self.Ho = (280.46061837 + 360.98564736629*(self.JD - 2451545.0) + 0.000387933*T**2 - \
T**3/38710000.0)%360
# Parse camera parameters
self.X_res, self.Y_res, self.focal_length = self.parseLine(f)
# Parse the right ascension of the image centre
self.RA_d, self.RA_H, self.RA_M, self.RA_S = self.parseLine(f)
# Parse the declination of the image centre
self.dec_d, self.dec_D, self.dec_M, self.dec_S = self.parseLine(f)
# Parse the rotation parameter
self.pos_angle_ref = self.parseLine(f)[0]
# Parse the image scale (convert from arcsec/px to px/deg)
self.F_scale = self.parseLine(f)[0]
self.F_scale = 3600/self.F_scale
# Load magnitude slope parameters
self.mag_0, self.mag_lev = self.parseLine(f)
# Load X axis polynomial parameters
self.x_poly_fwd = self.x_poly_rev = np.zeros(shape=(self.poly_length,), dtype=np.float64)
for i in range(self.poly_length):
self.x_poly_fwd[i] = self.x_poly_fwd[i] = self.parseLine(f)[0]
# Load Y axis polynomial parameters
self.y_poly_fwd = self.y_poly_rev = np.zeros(shape=(self.poly_length,), dtype=np.float64)
for i in range(self.poly_length):
self.y_poly_fwd[i] = self.y_poly_rev[i] = self.parseLine(f)[0]
# Read station code
self.station_code = f.readline().replace('\r', '').replace('\n', '')
# Add a default vignetting coefficient if it already doesn't exist
self.addVignettingCoeff(use_flat)
return fmt
def jsonStr(self):
""" Returns the JSON representation of the platepar as a string. """
# Make a copy of the platepar object, which will be modified for writing
self2 = copy.deepcopy(self)
# Convert numpy arrays to list, which can be serialized
self2.x_poly_fwd = self.x_poly_fwd.tolist()
self2.x_poly_rev = self.x_poly_rev.tolist()
self2.y_poly_fwd = self.y_poly_fwd.tolist()
self2.y_poly_rev = self.y_poly_rev.tolist()
del self2.time
# For compatibility with old procedures, write the forward distortion parameters as x, y
self2.x_poly = self.x_poly_fwd.tolist()
self2.y_poly = self.y_poly_fwd.tolist()
out_str = json.dumps(self2, default=lambda o: o.__dict__, indent=4, sort_keys=True)
return out_str
def write(self, file_path, fmt=None, fov=None, ret_written=False):
""" Write platepar to file.
Arguments:
file_path: [str] Path and the name of the platepar to write.
Keyword arguments:
fmt: [str] Format of the platepar file. 'json' for JSON format and 'txt' for the usual CMN textual
format. The format is JSON by default.
fov: [tuple] Tuple of horizontal and vertical FOV size in degree. None by default.
ret_written: [bool] If True, the JSON string of the platepar instead of writing it to disk.
Return:
fmt: [str] Platepar format.
"""
# If the FOV size was given, store it
if fov is not None:
self.fov_h, self.fov_v = fov
# Set JSON to be the defualt format
if fmt is None:
fmt = 'json'
# If the format is JSON, write a JSON file
if fmt == 'json':
out_str = self.jsonStr()
with open(file_path, 'w') as f:
f.write(out_str)
if ret_written:
return fmt, out_str
# Old CMN format
else:
with open(file_path, 'w') as f:
# Write geo coords
f.write('{:9.6f} {:9.6f} {:04d}\n'.format(self.lon, self.lat, int(self.elev)))
# Calculate reference time from reference JD
Y, M, D, h, m, s, ms = list(map(int, jd2Date(self.JD)))
# Write the reference time
f.write('{:02d} {:02d} {:04d} {:02d} {:02d} {:02d}\n'.format(D, M, Y, h, m, s))
# Write resolution and focal length
f.write('{:d} {:d} {:f}\n'.format(int(self.X_res), int(self.Y_res), self.focal_length))
# Write reference RA
self.RA_H = int(self.RA_d/15)
self.RA_M = int((self.RA_d/15 - self.RA_H)*60)
self.RA_S = int(((self.RA_d/15 - self.RA_H)*60 - self.RA_M)*60)
f.write("{:7.3f} {:02d} {:02d} {:02d}\n".format(self.RA_d, self.RA_H, self.RA_M, self.RA_S))
# Write reference Dec
self.dec_D = int(self.dec_d)
self.dec_M = int((self.dec_d - self.dec_D)*60)
self.dec_S = int(((self.dec_d - self.dec_D)*60 - self.dec_M)*60)
f.write("{:+7.3f} {:02d} {:02d} {:02d}\n".format(self.dec_d, self.dec_D, self.dec_M, self.dec_S))
# Write rotation parameter
f.write('{:<7.3f}\n'.format(self.pos_angle_ref))
# Write F scale
f.write('{:<5.1f}\n'.format(3600/self.F_scale))
# Write magnitude fit
f.write("{:.3f} {:.3f}\n".format(self.mag_0, self.mag_lev))
# Write X distortion polynomial
for x_elem in self.x_poly_fwd:
f.write('{:+E}\n'.format(x_elem))
# Write y distortion polynomial
for y_elem in self.y_poly_fwd:
f.write('{:+E}\n'.format(y_elem))
# Write station code
f.write(str(self.station_code) + '\n')
if ret_written:
with open(file_path) as f:
out_str = "\n".join(f.readlines())
return fmt, out_str
return fmt
def updateRefAltAz(self):
""" Update the reference apparent azimuth and altitude from the reference RA and Dec. """
# Compute reference Alt/Az to apparent coordinates, epoch of date
az_centre, alt_centre = cyTrueRaDec2ApparentAltAz( \
np.radians(self.RA_d), np.radians(self.dec_d), self.JD, \
np.radians(self.lat), np.radians(self.lon), self.refraction)
self.az_centre, self.alt_centre = np.degrees(az_centre), np.degrees(alt_centre)
# Update the rotation wrt horizon
self.rotation_from_horiz = RMS.Astrometry.ApplyAstrometry.rotationWrtHorizon(self)
def updateRefRADec(self, skip_rot_update=False, preserve_rotation=False):
""" Update the reference RA and Dec (true in J2000) from Alt/Az (apparent in epoch of date). """
if (not skip_rot_update) and (not preserve_rotation):
# Save the current rotation w.r.t horizon value
self.rotation_from_horiz = RMS.Astrometry.ApplyAstrometry.rotationWrtHorizon(self)
# Convert the reference apparent Alt/Az in the epoch of date to true RA/Dec in J2000
ra, dec = cyApparentAltAz2TrueRADec(\
np.radians(self.az_centre), np.radians(self.alt_centre), self.JD, \
np.radians(self.lat), np.radians(self.lon), self.refraction)
# Assign the computed RA/Dec to platepar
self.RA_d = np.degrees(ra)
self.dec_d = np.degrees(dec)
if not skip_rot_update:
# Update the position angle so that the rotation wrt horizon doesn't change
self.pos_angle_ref = RMS.Astrometry.ApplyAstrometry.rotationWrtHorizonToPosAngle(self, \
self.rotation_from_horiz)
def switchToGroundPicks(self):
""" Switch the reference pointing so that points on the ground may be correctly measured. """
# If the refraction was on, turn if off and correct the centre
if self.refraction:
self.refraction = False
# Preserve the reference elevation of the pointing as the apparent pointing
# self.alt_centre = np.degrees(pyRefractionTrueToApparent(np.radians(self.alt_centre)))
self.updateRefRADec(preserve_rotation=True)
def __repr__(self):
# Compute alt/az pointing
azim, elev = trueRaDec2ApparentAltAz(self.RA_d, self.dec_d, self.JD, self.lat, self.lon, \
refraction=self.refraction)
out_str = "Platepar\n"
out_str += "--------\n"
out_str += "Camera info:\n"
out_str += " Lat (+N) = {:+11.6f} deg\n".format(self.lat)
out_str += " Lon (+E) = {:+11.6f} deg\n".format(self.lon)
out_str += " Ele (MSL) = {:11.2f} m\n".format(self.elev)
out_str += " FOV = {:6.2f} x {:6.2f} deg\n".format(*RMS.Astrometry.ApplyAstrometry.computeFOVSize(self))
out_str += " Img res = {:6d} x {:6d} px\n".format(self.X_res, self.Y_res)
out_str += "Reference pointing - equatorial (J2000):\n"
out_str += " JD = {:.10f} \n".format(self.JD)
out_str += " RA = {:11.6f} deg\n".format(self.RA_d)
out_str += " Dec = {:+11.6f} deg\n".format(self.dec_d)
out_str += " Pos ang = {:.6f} deg\n".format(self.pos_angle_ref)
out_str += " Pix scl = {:.2f} arcmin/px\n".format(60/self.F_scale)
out_str += "Reference pointing - apparent azimuthal (date):\n"
out_str += " Azim = {:.6f} deg (+E of N)\n".format(azim)
out_str += " Alt = {:.6f} deg\n".format(elev)
out_str += " Rot/hor = {:.6f} deg\n".format(RMS.Astrometry.ApplyAstrometry.rotationWrtHorizon(self))
out_str += "Distortion:\n"
out_str += " Type = {:s}\n".format(self.distortion_type)
# If the polynomial is used, the X axis parameters are stored in x_poly, otherwise radials paramters
# are used
if self.distortion_type.startswith("poly"):
out_str += " Distortion coeffs (polynomial):\n"
dist_string = "X"
# Poly parameters for printing (needed for radial which will be modified)
x_poly_fwd_print = self.x_poly_fwd
x_poly_rev_print = self.x_poly_rev
# Radial coefficients
else:
out_str += " Distortion coeffs (radial):\n"
out_str += " "
if not self.force_distortion_centre:
out_str += " x0 (px), y0 (px), "
if not self.equal_aspect:
out_str += "aspect-1, "
if self.asymmetry_corr:
out_str += " a1, a2 (deg), "
out_str +=" k1, k2, k3, k4\n"
dist_string = ""
x_poly_fwd_print = np.array(self.x_poly_fwd)
x_poly_rev_print = np.array(self.x_poly_rev)
if not self.force_distortion_centre:
# Report x0 and y0 in px (unnormalize and wrap)
x_poly_fwd_print[0] *= self.X_res/2
x_poly_fwd_print[1] *= self.Y_res/2
x_poly_rev_print[0] *= self.X_res/2
x_poly_rev_print[1] *= self.Y_res/2
x_poly_fwd_print[0] = -self.X_res/2.0 + (x_poly_fwd_print[0] + self.X_res/2.0)%self.X_res
x_poly_fwd_print[1] = -self.Y_res/2.0 + (x_poly_fwd_print[1] + self.Y_res/2.0)%self.Y_res
x_poly_rev_print[0] = -self.X_res/2.0 + (x_poly_rev_print[0] + self.X_res/2.0)%self.X_res
x_poly_rev_print[1] = -self.Y_res/2.0 + (x_poly_rev_print[1] + self.Y_res/2.0)%self.Y_res
# Convert the asymmetry correction parameter to degrees
if self.asymmetry_corr:
asym_ang_index = 4
if self.force_distortion_centre:
asym_ang_index -= 2
if self.equal_aspect:
asym_ang_index -= 1
x_poly_fwd_print[asym_ang_index] = np.degrees((2*np.pi*x_poly_fwd_print[asym_ang_index]) \
%(2*np.pi))
x_poly_rev_print[asym_ang_index] = np.degrees((2*np.pi*x_poly_rev_print[asym_ang_index]) \
%(2*np.pi))
out_str += "img2sky {:s} = {:s}\n".format(dist_string, ", ".join(["{:+8.3f}".format(c) \
if abs(c) > 10e-4 else "{:+8.1e}".format(c) for c in x_poly_fwd_print]))
out_str += "sky2img {:s} = {:s}\n".format(dist_string, ", ".join(["{:+8.3f}".format(c) \
if abs(c) > 10e-4 else "{:+8.1e}".format(c) for c in x_poly_rev_print]))
# Only print the rest if the polynomial fit is used
if self.distortion_type.startswith("poly"):
out_str += "img2sky Y = {:s}\n".format(", ".join(["{:+8.3f}".format(c) \
if abs(c) > 10e-4 else "{:+8.1e}".format(c) for c in self.y_poly_fwd]))
out_str += "sky2img Y = {:s}\n".format(", ".join(["{:+8.3f}".format(c) \
if abs(c) > 10e-4 else "{:+8.1e}".format(c) for c in self.y_poly_rev]))
return out_str
if __name__ == "__main__":
import argparse
# Init argument parser
arg_parser = argparse.ArgumentParser(description="Test the astrometry functions using the given platepar.")
arg_parser.add_argument('platepar_path', metavar='PLATEPAR', type=str, \
help='Path to the platepar file')
# Parse the command line arguments
cml_args = arg_parser.parse_args()
# Load the platepar file
pp = Platepar()
pp.read(cml_args.platepar_path)
# Try with using standard coordinates by resetting the distortion coeffs
pp.resetDistortionParameters()
# # Reset distortion fit (forward and reverse)
# pp.x_poly_fwd = np.zeros(shape=(12,), dtype=np.float64)
# pp.y_poly_fwd = np.zeros(shape=(12,), dtype=np.float64)
# pp.x_poly_rev = np.zeros(shape=(12,), dtype=np.float64)
# pp.y_poly_rev = np.zeros(shape=(12,), dtype=np.float64)
print(pp)
# Try forward and reverse mapping, and compare results
for i in range(5):
# Randomly generate a pick inside the image
x_img = np.random.uniform(0, pp.X_res)
y_img = np.random.uniform(0, pp.Y_res)
# Take current time
time_data = [2020, 5, 30, 1, 20, 34, 567]
# Map to RA/Dec
jd_data, ra_data, dec_data, _ = RMS.Astrometry.ApplyAstrometry.xyToRaDecPP([time_data], [x_img], \
[y_img], [1], pp, extinction_correction=False)
# Map back to X, Y
x_data, y_data = RMS.Astrometry.ApplyAstrometry.raDecToXYPP(ra_data, dec_data, jd_data[0], pp)
# Map forward to sky again
_, ra_data_rev, dec_data_rev, _ = RMS.Astrometry.ApplyAstrometry.xyToRaDecPP([time_data], x_data, \
y_data, [1], pp, extinction_correction=False)
print()
print("-----------------------")
print("Init image coordinates:")
print("X = {:.3f}".format(x_img))
print("Y = {:.3f}".format(y_img))
print("Sky coordinates:")
print("RA = {:.4f}".format(ra_data[0]))
print("Dec = {:+.4f}".format(dec_data[0]))
print("Reverse image coordinates:")
print("X = {:.3f}".format(x_data[0]))
print("Y = {:.3f}".format(y_data[0]))
print("Reverse sky coordinates:")
print("RA = {:.4f}".format(ra_data_rev[0]))
print("Dec = {:+.4f}".format(dec_data_rev[0]))
print("Image diff:")
print("X = {:.3f}".format(x_img - x_data[0]))
print("Y = {:.3f}".format(y_img - y_data[0]))
print("Sky diff:")
print("RA = {:.4f} amin".format(60*(ra_data[0] - ra_data_rev[0])))
print("Dec = {:+.4f} amin".format(60*(dec_data[0] - dec_data_rev[0])))
|
gpl-3.0
|
djordon/queueing-tool
|
queueing_tool/network/queue_network.py
|
1
|
60893
|
import collections
import numbers
import copy
import array
import numpy as np
from numpy.random import uniform
try:
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from matplotlib.collections import LineCollection
plt.style.use('ggplot')
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
from queueing_tool.graph import _prepare_graph
from queueing_tool.queues import (
NullQueue,
QueueServer,
LossQueue
)
from queueing_tool.network.priority_queue import PriorityQueue
class QueueingToolError(Exception):
"""Base class for exceptions in Queueing-tool."""
pass
EPS = np.float64(1e-7)
v_pens = [
[0.5, 0.5, 0.5, 0.5],
[0, 0.5, 1, 1],
[0.133, 0.545, 0.133, 1],
[0.282, 0.239, 0.545, 1],
[1, 0.135, 0, 1]
]
class QueueNetwork(object):
"""A class that simulates a network of queues.
Takes a networkx :any:`DiGraph<networkx.DiGraph>` and places queues
on each edge of the graph. The simulations are event based, and
this class handles the scheduling of events.
Each edge on the graph has a *type*, and this *type* is used to
specify the subclass of :class:`.QueueServer` and arguments used
when creating the queue that sits on each edge.
Parameters
----------
g : :any:`networkx.DiGraph`, :class:`numpy.ndarray`, dict, \
``None``, etc.
Any object that networkx can turn into a
:any:`DiGraph<networkx.DiGraph>`. The graph specifies the
network, and the queues sit on top of the edges.
q_classes : dict (optional)
Used to specify the :class:`.QueueServer` class for each edge
type. The keys are integers for the edge types, and the values
are classes.
q_args : dict (optional)
Used to specify the class arguments for each type of
:class:`.QueueServer`. The keys are integers for the edge types
and the values are dictionarys holding the arguments that are
passed when instantiating each :class:`.QueueServer` created
with that edge type.
seed : int (optional)
An integer used to initialize numpy's psuedo-random number
generator.
colors : dict (optional)
A dictionary of RGBA colors used to color the graph. The keys
are specified in the Notes section. If this parameter is
supplied but a particular key is missing, then the default
value for that key is used.
max_agents : int (optional, default: 1000)
The maximum number of agents that can be in the network at any
time.
blocking : ``{'BAS', 'RS'}`` (optional, default: ``'BAS'``)
Specifies the blocking behavior for the system. If `blocking
is not ``'RS'``, then it is assumed to be ``'BAS'``.
``'BAS'``
*Blocking After Service*: when an agent attempts to enter a
:class:`.LossQueue` that is at capacity the agent is forced
to wait at his current queue until an agent departs from
the target queue.
``'RS'``
*Repetitive Service Blocking*: when an agent attempts to
enter a :class:`.LossQueue` that is at capacity, the agent
is forced to receive another service from the queue it is
departing from. After the agent receives the service, she
then checks to see if the desired queue is still at
capacity, and if it is this process is repeated, otherwise
she enters the queue.
adjust_graph : bool (optional, default: ``True``)
Specifies whether the graph will be adjusted to make sure
terminal nodes do not cause any issues when simulating. For
most cases, this should be set to ``True``.
Attributes
----------
blocking : str
Specifies whether the system's blocking behavior is either
Blocking After Service (BAS) or Repetitive Service Blocking (RS).
colors : dict
A dictionary of colors used when drawing a graph. See the notes
for the defaults.
current_time : float
The time of the last event.
edge2queue : list
A list of queues where the ``edge2queue[k]`` returns the queue
on the edge with edge index ``k``.
g : :class:`.QueueNetworkDiGraph`
The graph for the network.
default_classes : dict
Specifies the default queue classes for each edge type.
default_colors : dict
Specifies various default colors.
default_q_colors : dict
Specifies the default colors used by the queues.
in_edges : list
A mapping between vertex indices and the in-edges at that
vertex. Specifically, ``in_edges[v]`` returns a list containing
the edge index for all edges with the head of the edge at
``v``, where ``v`` is the vertex's index number.
max_agents : int
The maximum number of agents that can be in the network at any
time.
num_agents : :class:`~numpy.ndarray`
A one-dimensional array where the ``k``\ th entry corresponds to
the total number of agents in the :class:`.QueueServer` with
edge index ``k``. This number includes agents that are
scheduled to arrive at the queue at some future time but
haven't yet.
num_edges : int
The number of edges in the graph.
num_events : int
The number of events that have occurred thus far. Every arrival
from outside the network counts as one event, but the departure
of an agent from a queue and the arrival of that same agent to
another queue counts as one event.
num_vertices : int
The number of vertices in the graph.
num_nodes : int
The number of vertices in the graph.
out_edges : list
A mapping between vertex indices and the out-edges at that
vertex. Specifically, ``out_edges[v]`` returns a list
containing the edge index for all edges with the tail of the
edge at ``v``, where ``v`` is the vertex's index number.
time : float
The time of the next event.
Raises
------
TypeError
Raised when the parameter ``g`` is not of a type that can be
made into a :any:`networkx.DiGraph`, or when ``g`` is not
``None``.
Notes
-----
* If only :class:`Agents<.Agent>` enter the network, then the
``QueueNetwork`` instance is a `Jackson network`_. The default
transition probabilities at any vertex ``v`` is
``1 / g.out_degree(v)`` for each adjacent vertex.
* This class must be initialized before any simulations can take
place. To initialize, call the :meth:`~initialize` method.
* When simulating the network, the departure of an agent from one
queue coincides with their arrival to another. There is no time
lag between these events.
* When defining your ``q_classes`` you should not assign queues
with edge type ``0`` to anything other than the
:class:`.NullQueue` class. Edges with edge type ``0`` are
treated by ``QueueNetwork`` as terminal edges (edges that point
to a terminal vertex).
* If an edge type is used in your network but not given in
``q_classes`` parameter then the defaults are used, which are:
>>> default_classes = { # doctest: +SKIP
... 0: qt.NullQueue,
... 1: qt.QueueServer,
... 2: qt.LossQueue,
... 3: qt.LossQueue,
... 4: qt.LossQueue
... }
For example, if your network has type ``0``, ``1``, and ``2``
edges but your ``q_classes`` parameter looks like:
>>> my_classes = {1 : qt.ResourceQueue} # doctest: +SKIP
then each type ``0`` and type ``2`` edge is a :class:`.NullQueue`
and :class:`.LossQueue` respectively.
* The following properties are assigned as a node or edge attribute
to the graph; their default values for each edge or node is shown:
* ``vertex_pen_width``: ``1.1``,
* ``vertex_size``: ``8``,
* ``edge_control_points``: ``[]``
* ``edge_marker_size``: ``8``
* ``edge_pen_width``: ``1.25``
There are also property maps created for graph visualization,
they are ``vertex_color``, ``vertex_fill_color``, ``pos``, and
``edge_color``. The default colors, which are used by various
methods, are:
>>> default_colors = {
... 'vertex_fill_color': [0.9, 0.9, 0.9, 1.0],
... 'vertex_color': [0.0, 0.5, 1.0, 1.0],
... 'vertex_highlight': [0.5, 0.5, 0.5, 1.0],
... 'edge_departure': [0, 0, 0, 1],
... 'vertex_active': [0.1, 1.0, 0.5, 1.0],
... 'vertex_inactive': [0.9, 0.9, 0.9, 0.8],
... 'edge_active': [0.1, 0.1, 0.1, 1.0],
... 'edge_inactive': [0.8, 0.8, 0.8, 0.3],
... 'bgcolor': [1, 1, 1, 1]
... }
.. _Jackson network: http://en.wikipedia.org/wiki/Jackson_network
Examples
--------
The following creates a queueing network with the Moebius-Kantor
graph. Each queue has 5 servers, and the same arrival and service
distributions:
>>> import queueing_tool as qt
>>> import networkx as nx
>>> import numpy as np
>>>
>>> g = nx.moebius_kantor_graph()
>>> q_cl = {1: qt.QueueServer}
>>> def arr(t):
... return t + np.random.gamma(4, 0.0025)
>>> def ser(t):
... return t + np.random.exponential(0.025)
>>> q_ar = {
... 1: {
... 'arrival_f': arr,
... 'service_f': ser,
... 'num_servers': 5
... }
... }
>>> net = qt.QueueNetwork(g, q_classes=q_cl, q_args=q_ar, seed=13)
To specify that arrivals enter from type 1 edges and simulate run:
>>> net.initialize(edge_type=1)
>>> net.simulate(n=100)
Now we'd like to see how many agents are in type 1 edges:
>>> nA = [(q.num_system, q.edge[2]) for q in net.edge2queue if q.edge[3] == 1]
>>> nA.sort(reverse=True)
>>> nA[:5] # doctest: +SKIP
[(4, 37), (4, 34), (3, 43), (3, 32), (3, 30)]
To view the state of the network do the following (note, you need
to have pygraphviz installed and your graph may be rotated):
>>> net.simulate(n=500)
>>> pos = nx.nx_agraph.graphviz_layout(g.to_undirected(), prog='neato') # doctest: +SKIP
>>> net.draw(pos=pos) # doctest: +SKIP
<...>
.. figure:: my_network1.png
:align: center
"""
default_colors = {
'vertex_fill_color': [0.95, 0.95, 0.95, 1.0],
'vertex_color': [0.0, 0.5, 1.0, 1.0],
'vertex_highlight': [0.5, 0.5, 0.5, 1.0],
'edge_departure': [0, 0, 0, 1],
'vertex_active': [0.1, 1.0, 0.5, 1.0],
'vertex_inactive': [0.95, 0.95, 0.95, 1.0],
'edge_active': [0.1, 0.1, 0.1, 1.0],
'edge_inactive': [0.8, 0.8, 0.8, 0.3],
'bgcolor': [1, 1, 1, 1]
}
default_classes = {
0: NullQueue,
1: QueueServer,
2: LossQueue,
3: LossQueue,
4: LossQueue
}
default_q_colors = {
k: {'edge_loop_color': [0, 0, 0, 0],
'edge_color': [0.8, 0.8, 0.8, 1.0],
'vertex_fill_color': [0.95, 0.95, 0.95, 1.0],
'vertex_color': v_pens[k]}
for k in range(5)
}
def __init__(self, g, q_classes=None, q_args=None, seed=None, colors=None,
max_agents=1000, blocking='BAS', adjust_graph=True):
if not isinstance(blocking, str):
raise TypeError("blocking must be a string")
self._t = 0
self.num_events = 0
self.max_agents = max_agents
self._initialized = False
self._prev_edge = None
self._fancy_heap = PriorityQueue()
self._blocking = True if blocking.lower() != 'rs' else False
if colors is None:
colors = {}
for key, value in self.default_colors.items():
if key not in colors:
colors[key] = value
self.colors = colors
if q_classes is None:
q_classes = self.default_classes
else:
for k in set(self.default_classes.keys()) - set(q_classes.keys()):
q_classes[k] = self.default_classes[k]
if q_args is None:
q_args = {k: {} for k in range(5)}
else:
for k in set(q_classes.keys()) - set(q_args.keys()):
q_args[k] = {}
for key, args in q_args.items():
if 'colors' not in args:
args['colors'] = self.default_q_colors.get(key, self.default_q_colors[1])
if isinstance(seed, numbers.Integral):
np.random.seed(seed)
if g is not None:
g, qs = _prepare_graph(g, self.colors, q_classes, q_args, adjust_graph)
self.nV = g.number_of_nodes()
self.nE = g.number_of_edges()
self.edge2queue = qs
self.num_agents = np.zeros(g.number_of_edges(), int)
self.out_edges = [0 for v in range(self.nV)]
self.in_edges = [0 for v in range(self.nV)]
self._route_probs = [0 for v in range(self.nV)]
for v in g.nodes():
vod = g.out_degree(v)
probs = array.array('d', [1. / vod for i in range(vod)])
self.out_edges[v] = [g.edge_index[e] for e in sorted(g.out_edges(v))]
self.in_edges[v] = [g.edge_index[e] for e in sorted(g.in_edges(v))]
self._route_probs[v] = probs
g.freeze()
self.g = g
def __repr__(self):
the_string = 'QueueNetwork. # nodes: {0}, edges: {1}, agents: {2}'
return the_string.format(self.nV, self.nE, np.sum(self.num_agents))
@property
def blocking(self):
return 'BAS' if self._blocking else 'RS'
@blocking.setter
def blocking(self, tmp):
if not isinstance(tmp, str):
raise TypeError("blocking must be a string")
self._blocking = True if tmp.lower() != 'rs' else False
@property
def num_vertices(self):
return self.nV
@property
def num_nodes(self):
return self.nV
@property
def num_edges(self):
return self.nE
@property
def current_time(self):
return self._t
@property
def time(self):
if self._fancy_heap.size > 0:
e = self._fancy_heap.array_edges[0]
t = self.edge2queue[e]._time
else:
t = np.infty
return t
def animate(self, out=None, t=None, line_kwargs=None,
scatter_kwargs=None, **kwargs):
"""Animates the network as it's simulating.
The animations can be saved to disk or viewed in interactive
mode. Closing the window ends the animation if viewed in
interactive mode. This method calls
:meth:`~matplotlib.axes.scatter`, and
:class:`~matplotlib.collections.LineCollection`, and any
keyword arguments they accept can be passed to them.
Parameters
----------
out : str (optional)
The location where the frames for the images will be saved.
If this parameter is not given, then the animation is shown
in interactive mode.
t : float (optional)
The amount of simulation time to simulate forward. If
given, and ``out`` is given, ``t`` is used instead of
``n``.
line_kwargs : dict (optional, default: None)
Any keyword arguments accepted by
:class:`~matplotlib.collections.LineCollection`.
scatter_kwargs : dict (optional, default: None)
Any keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.scatter`.
bgcolor : list (optional, keyword only)
A list with 4 floats representing a RGBA color. The
default is defined in ``self.colors['bgcolor']``.
figsize : tuple (optional, keyword only, default: ``(7, 7)``)
The width and height of the figure in inches.
**kwargs :
This method calls
:class:`~matplotlib.animation.FuncAnimation` and
optionally :meth:`.matplotlib.animation.FuncAnimation.save`.
Any keyword that can be passed to these functions are
passed via ``kwargs``.
Notes
-----
There are several parameters automatically set and passed to
matplotlib's :meth:`~matplotlib.axes.Axes.scatter`,
:class:`~matplotlib.collections.LineCollection`, and
:class:`~matplotlib.animation.FuncAnimation` by default.
These include:
* :class:`~matplotlib.animation.FuncAnimation`: Uses the
defaults for that function. Saving the animation is done
by passing the 'filename' keyword argument to this method.
This method also accepts any keyword arguments accepted
by :meth:`~matplotlib.animation.FuncAnimation.save`.
* :class:`~matplotlib.collections.LineCollection`: The default
arguments are taken from
:meth:`.QueueNetworkDiGraph.lines_scatter_args`.
* :meth:`~matplotlib.axes.Axes.scatter`: The default
arguments are taken from
:meth:`.QueueNetworkDiGraph.lines_scatter_args`.
Raises
------
QueueingToolError
Will raise a :exc:`.QueueingToolError` if the
``QueueNetwork`` has not been initialized. Call
:meth:`.initialize` before running.
Examples
--------
This function works similarly to ``QueueNetwork's``
:meth:`.draw` method.
>>> import queueing_tool as qt
>>> g = qt.generate_pagerank_graph(100, seed=13)
>>> net = qt.QueueNetwork(g, seed=13)
>>> net.initialize()
>>> net.animate(figsize=(4, 4)) # doctest: +SKIP
To stop the animation just close the window. If you want to
write the animation to disk run something like the following:
>>> kwargs = {
... 'filename': 'test.mp4',
... 'frames': 300,
... 'fps': 30,
... 'writer': 'mencoder',
... 'figsize': (4, 4),
... 'vertex_size': 15
... }
>>> net.animate(**kwargs) # doctest: +SKIP
"""
if not self._initialized:
msg = ("Network has not been initialized. "
"Call '.initialize()' first.")
raise QueueingToolError(msg)
if not HAS_MATPLOTLIB:
msg = "Matplotlib is necessary to animate a simulation."
raise ImportError(msg)
self._update_all_colors()
kwargs.setdefault('bgcolor', self.colors['bgcolor'])
fig = plt.figure(figsize=kwargs.get('figsize', (7, 7)))
ax = fig.gca()
mpl_kwargs = {
'line_kwargs': line_kwargs,
'scatter_kwargs': scatter_kwargs,
'pos': kwargs.get('pos')
}
line_args, scat_args = self.g.lines_scatter_args(**mpl_kwargs)
lines = LineCollection(**line_args)
lines = ax.add_collection(lines)
scatt = ax.scatter(**scat_args)
t = np.infty if t is None else t
now = self._t
def update(frame_number):
if t is not None:
if self._t > now + t:
return False
self._simulate_next_event(slow=True)
lines.set_color(line_args['colors'])
scatt.set_edgecolors(scat_args['edgecolors'])
scatt.set_facecolor(scat_args['c'])
if hasattr(ax, 'set_facecolor'):
ax.set_facecolor(kwargs['bgcolor'])
else:
ax.set_axis_bgcolor(kwargs['bgcolor'])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
animation_args = {
'fargs': None,
'event_source': None,
'init_func': None,
'frames': None,
'blit': False,
'interval': 10,
'repeat': None,
'func': update,
'repeat_delay': None,
'fig': fig,
'save_count': None,
}
for key, value in kwargs.items():
if key in animation_args:
animation_args[key] = value
animation = FuncAnimation(**animation_args)
if 'filename' not in kwargs:
plt.ioff()
plt.show()
else:
save_args = {
'filename': None,
'writer': None,
'fps': None,
'dpi': None,
'codec': None,
'bitrate': None,
'extra_args': None,
'metadata': None,
'extra_anim': None,
'savefig_kwargs': None
}
for key, value in kwargs.items():
if key in save_args:
save_args[key] = value
animation.save(**save_args)
def clear(self):
"""Resets the queue to its initial state.
The attributes ``t``, ``num_events``, ``num_agents`` are set to
zero, :meth:`.reset_colors` is called, and the
:meth:`.QueueServer.clear` method is called for each queue in
the network.
Notes
-----
``QueueNetwork`` must be re-initialized before any simulations
can run.
"""
self._t = 0
self.num_events = 0
self.num_agents = np.zeros(self.nE, int)
self._fancy_heap = PriorityQueue()
self._prev_edge = None
self._initialized = False
self.reset_colors()
for q in self.edge2queue:
q.clear()
def clear_data(self, queues=None, edge=None, edge_type=None):
"""Clears data from all queues.
If none of the parameters are given then every queue's data is
cleared.
Parameters
----------
queues : int or an iterable of int (optional)
The edge index (or an iterable of edge indices) identifying
the :class:`QueueServer(s)<.QueueServer>` whose data will
be cleared.
edge : 2-tuple of int or *array_like* (optional)
Explicitly specify which queues' data to clear. Must be
either:
* A 2-tuple of the edge's source and target vertex
indices, or
* An iterable of 2-tuples of the edge's source and
target vertex indices.
edge_type : int or an iterable of int (optional)
A integer, or a collection of integers identifying which
edge types will have their data cleared.
"""
queues = _get_queues(self.g, queues, edge, edge_type)
for k in queues:
self.edge2queue[k].data = {}
def copy(self):
"""Returns a deep copy of itself."""
net = QueueNetwork(None)
net.g = self.g.copy()
net.max_agents = copy.deepcopy(self.max_agents)
net.nV = copy.deepcopy(self.nV)
net.nE = copy.deepcopy(self.nE)
net.num_agents = copy.deepcopy(self.num_agents)
net.num_events = copy.deepcopy(self.num_events)
net._t = copy.deepcopy(self._t)
net._initialized = copy.deepcopy(self._initialized)
net._prev_edge = copy.deepcopy(self._prev_edge)
net._blocking = copy.deepcopy(self._blocking)
net.colors = copy.deepcopy(self.colors)
net.out_edges = copy.deepcopy(self.out_edges)
net.in_edges = copy.deepcopy(self.in_edges)
net.edge2queue = copy.deepcopy(self.edge2queue)
net._route_probs = copy.deepcopy(self._route_probs)
if net._initialized:
keys = [q._key() for q in net.edge2queue if q._time < np.infty]
net._fancy_heap = PriorityQueue(keys, net.nE)
return net
def draw(self, update_colors=True, line_kwargs=None,
scatter_kwargs=None, **kwargs):
"""Draws the network. The coloring of the network corresponds
to the number of agents at each queue.
Parameters
----------
update_colors : ``bool`` (optional, default: ``True``).
Specifies whether all the colors are updated.
line_kwargs : dict (optional, default: None)
Any keyword arguments accepted by
:class:`~matplotlib.collections.LineCollection`
scatter_kwargs : dict (optional, default: None)
Any keyword arguments accepted by
:meth:`~matplotlib.axes.Axes.scatter`.
bgcolor : list (optional, keyword only)
A list with 4 floats representing a RGBA color. The
default is defined in ``self.colors['bgcolor']``.
figsize : tuple (optional, keyword only, default: ``(7, 7)``)
The width and height of the canvas in inches.
**kwargs
Any parameters to pass to
:meth:`.QueueNetworkDiGraph.draw_graph`.
Notes
-----
This method relies heavily on
:meth:`.QueueNetworkDiGraph.draw_graph`. Also, there is a
parameter that sets the background color of the canvas, which
is the ``bgcolor`` parameter.
Examples
--------
To draw the current state of the network, call:
>>> import queueing_tool as qt
>>> g = qt.generate_pagerank_graph(100, seed=13)
>>> net = qt.QueueNetwork(g, seed=13)
>>> net.initialize(100)
>>> net.simulate(1200)
>>> net.draw() # doctest: +SKIP
If you specify a file name and location, the drawing will be
saved to disk. For example, to save the drawing to the current
working directory do the following:
>>> net.draw(fname="state.png", scatter_kwargs={'s': 40}) # doctest: +SKIP
.. figure:: current_state1.png
:align: center
The shade of each edge depicts how many agents are located at
the corresponding queue. The shade of each vertex is determined
by the total number of inbound agents. Although loops are not
visible by default, the vertex that corresponds to a loop shows
how many agents are in that loop.
There are several additional parameters that can be passed --
all :meth:`.QueueNetworkDiGraph.draw_graph` parameters are
valid. For example, to show the edges as dashed lines do the
following.
>>> net.draw(line_kwargs={'linestyle': 'dashed'}) # doctest: +SKIP
"""
if not HAS_MATPLOTLIB:
raise ImportError("matplotlib is necessary to draw the network.")
if update_colors:
self._update_all_colors()
if 'bgcolor' not in kwargs:
kwargs['bgcolor'] = self.colors['bgcolor']
self.g.draw_graph(line_kwargs=line_kwargs,
scatter_kwargs=scatter_kwargs, **kwargs)
def get_agent_data(self, queues=None, edge=None, edge_type=None, return_header=False):
"""Gets data from queues and organizes it by agent.
If none of the parameters are given then data from every
:class:`.QueueServer` is retrieved.
Parameters
----------
queues : int or *array_like* (optional)
The edge index (or an iterable of edge indices) identifying
the :class:`QueueServer(s)<.QueueServer>` whose data will
be retrieved.
edge : 2-tuple of int or *array_like* (optional)
Explicitly specify which queues to retrieve agent data
from. Must be either:
* A 2-tuple of the edge's source and target vertex
indices, or
* An iterable of 2-tuples of the edge's source and
target vertex indices.
edge_type : int or an iterable of int (optional)
A integer, or a collection of integers identifying which
edge types to retrieve agent data from.
return_header : bool (optonal, default: False)
Determines whether the column headers are returned.
Returns
-------
dict
Returns a ``dict`` where the keys are the
:class:`Agent's<.Agent>` ``agent_id`` and the values are
:class:`ndarrays<~numpy.ndarray>` for that
:class:`Agent's<.Agent>` data. The columns of this array
are as follows:
* First: The arrival time of an agent.
* Second: The service start time of an agent.
* Third: The departure time of an agent.
* Fourth: The length of the queue upon the agents arrival.
* Fifth: The total number of :class:`Agents<.Agent>` in the
:class:`.QueueServer`.
* Sixth: the :class:`QueueServer's<.QueueServer>` id
(its edge index).
headers : str (optional)
A comma seperated string of the column headers. Returns
``'arrival,service,departure,num_queued,num_total,q_id'``
"""
queues = _get_queues(self.g, queues, edge, edge_type)
data = {}
for qid in queues:
for agent_id, dat in self.edge2queue[qid].data.items():
datum = np.zeros((len(dat), 6))
datum[:, :5] = np.array(dat)
datum[:, 5] = qid
if agent_id in data:
data[agent_id] = np.vstack((data[agent_id], datum))
else:
data[agent_id] = datum
dType = [
('a', float),
('s', float),
('d', float),
('q', float),
('n', float),
('id', float)
]
for agent_id, dat in data.items():
datum = np.array([tuple(d) for d in dat.tolist()], dtype=dType)
datum = np.sort(datum, order='a')
data[agent_id] = np.array([tuple(d) for d in datum])
if return_header:
return data, 'arrival,service,departure,num_queued,num_total,q_id'
return data
def get_queue_data(self, queues=None, edge=None, edge_type=None, return_header=False):
"""Gets data from all the queues.
If none of the parameters are given then data from every
:class:`.QueueServer` is retrieved.
Parameters
----------
queues : int or an *array_like* of int, (optional)
The edge index (or an iterable of edge indices) identifying
the :class:`QueueServer(s)<.QueueServer>` whose data will
be retrieved.
edge : 2-tuple of int or *array_like* (optional)
Explicitly specify which queues to retrieve data from. Must
be either:
* A 2-tuple of the edge's source and target vertex
indices, or
* An iterable of 2-tuples of the edge's source and
target vertex indices.
edge_type : int or an iterable of int (optional)
A integer, or a collection of integers identifying which
edge types to retrieve data from.
return_header : bool (optonal, default: False)
Determines whether the column headers are returned.
Returns
-------
out : :class:`~numpy.ndarray`
* 1st: The arrival time of an agent.
* 2nd: The service start time of an agent.
* 3rd: The departure time of an agent.
* 4th: The length of the queue upon the agents arrival.
* 5th: The total number of :class:`Agents<.Agent>` in the
:class:`.QueueServer`.
* 6th: The :class:`QueueServer's<.QueueServer>` edge index.
out : str (optional)
A comma seperated string of the column headers. Returns
``'arrival,service,departure,num_queued,num_total,q_id'```
Examples
--------
Data is not collected by default. Before simulating, by sure to
turn it on (as well as initialize the network). The following
returns data from queues with ``edge_type`` 1 or 3:
>>> import queueing_tool as qt
>>> g = qt.generate_pagerank_graph(100, seed=13)
>>> net = qt.QueueNetwork(g, seed=13)
>>> net.start_collecting_data()
>>> net.initialize(10)
>>> net.simulate(2000)
>>> data = net.get_queue_data(edge_type=(1, 3))
To get data from an edge connecting two vertices do the
following:
>>> data = net.get_queue_data(edge=(1, 50))
To get data from several edges do the following:
>>> data = net.get_queue_data(edge=[(1, 50), (10, 91), (99, 99)])
You can specify the edge indices as well:
>>> data = net.get_queue_data(queues=(20, 14, 0, 4))
"""
queues = _get_queues(self.g, queues, edge, edge_type)
data = np.zeros((0, 6))
for q in queues:
dat = self.edge2queue[q].fetch_data()
if len(dat) > 0:
data = np.vstack((data, dat))
if return_header:
return data, 'arrival,service,departure,num_queued,num_total,q_id'
return data
def initialize(self, nActive=1, queues=None, edges=None, edge_type=None):
"""Prepares the ``QueueNetwork`` for simulation.
Each :class:`.QueueServer` in the network starts inactive,
which means they do not accept arrivals from outside the
network, and they have no agents in their system. This method
sets queues to active, which then allows agents to arrive from
outside the network.
Parameters
----------
nActive : int (optional, default: ``1``)
The number of queues to set as active. The queues are
selected randomly.
queues : int *array_like* (optional)
The edge index (or an iterable of edge indices) identifying
the :class:`QueueServer(s)<.QueueServer>` to make active by.
edges : 2-tuple of int or *array_like* (optional)
Explicitly specify which queues to make active. Must be
either:
* A 2-tuple of the edge's source and target vertex
indices, or
* An iterable of 2-tuples of the edge's source and
target vertex indices.
edge_type : int or an iterable of int (optional)
A integer, or a collection of integers identifying which
edge types will be set active.
Raises
------
ValueError
If ``queues``, ``egdes``, and ``edge_type`` are all ``None``
and ``nActive`` is an integer less than 1
:exc:`~ValueError` is raised.
TypeError
If ``queues``, ``egdes``, and ``edge_type`` are all ``None``
and ``nActive`` is not an integer then a :exc:`~TypeError`
is raised.
QueueingToolError
Raised if all the queues specified are
:class:`NullQueues<.NullQueue>`.
Notes
-----
:class:`NullQueues<.NullQueue>` cannot be activated, and are
sifted out if they are specified. More specifically, every edge
with edge type 0 is sifted out.
"""
if queues is None and edges is None and edge_type is None:
if nActive >= 1 and isinstance(nActive, numbers.Integral):
qs = [q.edge[2] for q in self.edge2queue if q.edge[3] != 0]
n = min(nActive, len(qs))
queues = np.random.choice(qs, size=n, replace=False)
elif not isinstance(nActive, numbers.Integral):
msg = "If queues is None, then nActive must be an integer."
raise TypeError(msg)
else:
msg = ("If queues is None, then nActive must be a "
"positive int.")
raise ValueError(msg)
else:
queues = _get_queues(self.g, queues, edges, edge_type)
queues = [e for e in queues if self.edge2queue[e].edge[3] != 0]
if len(queues) == 0:
raise QueueingToolError("There were no queues to initialize.")
if len(queues) > self.max_agents:
queues = queues[:self.max_agents]
for ei in queues:
self.edge2queue[ei].set_active()
self.num_agents[ei] = self.edge2queue[ei]._num_total
keys = [q._key() for q in self.edge2queue if q._time < np.infty]
self._fancy_heap = PriorityQueue(keys, self.nE)
self._initialized = True
def next_event_description(self):
"""Returns whether the next event is an arrival or a departure
and the queue the event is accuring at.
Returns
-------
des : str
Indicates whether the next event is an arrival, a
departure, or nothing; returns ``'Arrival'``,
``'Departure'``, or ``'Nothing'``.
edge : int or ``None``
The edge index of the edge that this event will occur at.
If there are no events then ``None`` is returned.
"""
if self._fancy_heap.size == 0:
event_type = 'Nothing'
edge_index = None
else:
s = [q._key() for q in self.edge2queue]
s.sort()
e = s[0][1]
q = self.edge2queue[e]
event_type = 'Arrival' if q.next_event_description() == 1 else 'Departure'
edge_index = q.edge[2]
return event_type, edge_index
def reset_colors(self):
"""Resets all edge and vertex colors to their default values."""
for k, e in enumerate(self.g.edges()):
self.g.set_ep(e, 'edge_color', self.edge2queue[k].colors['edge_color'])
for v in self.g.nodes():
self.g.set_vp(v, 'vertex_fill_color', self.colors['vertex_fill_color'])
def set_transitions(self, mat):
"""Change the routing transitions probabilities for the
network.
Parameters
----------
mat : dict or :class:`~numpy.ndarray`
A transition routing matrix or transition dictionary. If
passed a dictionary, the keys are source vertex indices and
the values are dictionaries with target vertex indicies
as the keys and the probabilities of routing from the
source to the target as the values.
Raises
------
ValueError
A :exc:`.ValueError` is raised if: the keys in the dict
don't match with a vertex index in the graph; or if the
:class:`~numpy.ndarray` is passed with the wrong shape,
must be (``num_vertices``, ``num_vertices``); or the values
passed are not probabilities (for each vertex they are
positive and sum to 1);
TypeError
A :exc:`.TypeError` is raised if mat is not a dict or
:class:`~numpy.ndarray`.
Examples
--------
The default transition matrix is every out edge being equally
likely:
>>> import queueing_tool as qt
>>> adjacency = {
... 0: [2],
... 1: [2, 3],
... 2: [0, 1, 2, 4],
... 3: [1],
... 4: [2],
... }
>>> g = qt.adjacency2graph(adjacency)
>>> net = qt.QueueNetwork(g)
>>> net.transitions(False) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
{0: {2: 1.0},
1: {2: 0.5, 3: 0.5},
2: {0: 0.25, 1: 0.25, 2: 0.25, 4: 0.25},
3: {1: 1.0},
4: {2: 1.0}}
If you want to change only one vertex's transition
probabilities, you can do so with the following:
>>> net.set_transitions({1 : {2: 0.75, 3: 0.25}})
>>> net.transitions(False) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
{0: {2: 1.0},
1: {2: 0.75, 3: 0.25},
2: {0: 0.25, 1: 0.25, 2: 0.25, 4: 0.25},
3: {1: 1.0},
4: {2: 1.0}}
One can generate a transition matrix using
:func:`.generate_transition_matrix`. You can change all
transition probabilities with an :class:`~numpy.ndarray`:
>>> mat = qt.generate_transition_matrix(g, seed=10)
>>> net.set_transitions(mat)
>>> net.transitions(False) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
{0: {2: 1.0},
1: {2: 0.962..., 3: 0.037...},
2: {0: 0.301..., 1: 0.353..., 2: 0.235..., 4: 0.108...},
3: {1: 1.0},
4: {2: 1.0}}
See Also
--------
:meth:`.transitions` : Return the current routing
probabilities.
:func:`.generate_transition_matrix` : Generate a random routing
matrix.
"""
if isinstance(mat, dict):
for key, value in mat.items():
probs = list(value.values())
if key not in self.g.node:
msg = "One of the keys don't correspond to a vertex."
raise ValueError(msg)
elif len(self.out_edges[key]) > 0 and not np.isclose(sum(probs), 1):
msg = "Sum of transition probabilities at a vertex was not 1."
raise ValueError(msg)
elif (np.array(probs) < 0).any():
msg = "Some transition probabilities were negative."
raise ValueError(msg)
for k, e in enumerate(sorted(self.g.out_edges(key))):
self._route_probs[key][k] = value.get(e[1], 0)
elif isinstance(mat, np.ndarray):
non_terminal = np.array([self.g.out_degree(v) > 0 for v in self.g.nodes()])
if mat.shape != (self.nV, self.nV):
msg = ("Matrix is the wrong shape, should "
"be {0} x {1}.").format(self.nV, self.nV)
raise ValueError(msg)
elif not np.allclose(np.sum(mat[non_terminal, :], axis=1), 1):
msg = "Sum of transition probabilities at a vertex was not 1."
raise ValueError(msg)
elif (mat < 0).any():
raise ValueError("Some transition probabilities were negative.")
for k in range(self.nV):
for j, e in enumerate(sorted(self.g.out_edges(k))):
self._route_probs[k][j] = mat[k, e[1]]
else:
raise TypeError("mat must be a numpy array or a dict.")
def show_active(self, **kwargs):
"""Draws the network, highlighting active queues.
The colored vertices represent vertices that have at least one
queue on an in-edge that is active. Dark edges represent
queues that are active, light edges represent queues that are
inactive.
Parameters
----------
**kwargs
Any additional parameters to pass to :meth:`.draw`, and
:meth:`.QueueNetworkDiGraph.draw_graph`.
Notes
-----
Active queues are :class:`QueueServers<.QueueServer>` that
accept arrivals from outside the network. The colors are
defined by the class attribute ``colors``. The relevant keys
are ``vertex_active``, ``vertex_inactive``, ``edge_active``,
and ``edge_inactive``.
"""
g = self.g
for v in g.nodes():
self.g.set_vp(v, 'vertex_color', [0, 0, 0, 0.9])
is_active = False
my_iter = g.in_edges(v) if g.is_directed() else g.out_edges(v)
for e in my_iter:
ei = g.edge_index[e]
if self.edge2queue[ei]._active:
is_active = True
break
if is_active:
self.g.set_vp(v, 'vertex_fill_color', self.colors['vertex_active'])
else:
self.g.set_vp(v, 'vertex_fill_color', self.colors['vertex_inactive'])
for e in g.edges():
ei = g.edge_index[e]
if self.edge2queue[ei]._active:
self.g.set_ep(e, 'edge_color', self.colors['edge_active'])
else:
self.g.set_ep(e, 'edge_color', self.colors['edge_inactive'])
self.draw(update_colors=False, **kwargs)
self._update_all_colors()
def show_type(self, edge_type, **kwargs):
"""Draws the network, highlighting queues of a certain type.
The colored vertices represent self loops of type ``edge_type``.
Dark edges represent queues of type ``edge_type``.
Parameters
----------
edge_type : int
The type of vertices and edges to be shown.
**kwargs
Any additional parameters to pass to :meth:`.draw`, and
:meth:`.QueueNetworkDiGraph.draw_graph`
Notes
-----
The colors are defined by the class attribute ``colors``. The
relevant colors are ``vertex_active``, ``vertex_inactive``,
``vertex_highlight``, ``edge_active``, and ``edge_inactive``.
Examples
--------
The following code highlights all edges with edge type ``2``.
If the edge is a loop then the vertex is highlighted as well.
In this case all edges with edge type ``2`` happen to be loops.
>>> import queueing_tool as qt
>>> g = qt.generate_pagerank_graph(100, seed=13)
>>> net = qt.QueueNetwork(g, seed=13)
>>> fname = 'edge_type_2.png'
>>> net.show_type(2, fname=fname) # doctest: +SKIP
.. figure:: edge_type_2-1.png
:align: center
"""
for v in self.g.nodes():
e = (v, v)
if self.g.is_edge(e) and self.g.ep(e, 'edge_type') == edge_type:
ei = self.g.edge_index[e]
self.g.set_vp(v, 'vertex_fill_color', self.colors['vertex_highlight'])
self.g.set_vp(v, 'vertex_color', self.edge2queue[ei].colors['vertex_color'])
else:
self.g.set_vp(v, 'vertex_fill_color', self.colors['vertex_inactive'])
self.g.set_vp(v, 'vertex_color', [0, 0, 0, 0.9])
for e in self.g.edges():
if self.g.ep(e, 'edge_type') == edge_type:
self.g.set_ep(e, 'edge_color', self.colors['edge_active'])
else:
self.g.set_ep(e, 'edge_color', self.colors['edge_inactive'])
self.draw(update_colors=False, **kwargs)
self._update_all_colors()
def simulate(self, n=1, t=None):
"""Simulates the network forward.
Simulates either a specific number of events or for a specified
amount of simulation time.
Parameters
----------
n : int (optional, default: 1)
The number of events to simulate. If ``t`` is not given
then this parameter is used.
t : float (optional)
The amount of simulation time to simulate forward. If
given, ``t`` is used instead of ``n``.
Raises
------
QueueingToolError
Will raise a :exc:`.QueueingToolError` if the
``QueueNetwork`` has not been initialized. Call
:meth:`.initialize` before calling this method.
Examples
--------
Let ``net`` denote your instance of a ``QueueNetwork``. Before
you simulate, you need to initialize the network, which allows
arrivals from outside the network. To initialize with 2 (random
chosen) edges accepting arrivals run:
>>> import queueing_tool as qt
>>> g = qt.generate_pagerank_graph(100, seed=50)
>>> net = qt.QueueNetwork(g, seed=50)
>>> net.initialize(2)
To simulate the network 50000 events run:
>>> net.num_events
0
>>> net.simulate(50000)
>>> net.num_events
50000
To simulate the network for at least 75 simulation time units
run:
>>> t0 = net.current_time
>>> net.simulate(t=75)
>>> t1 = net.current_time
>>> t1 - t0 # doctest: +ELLIPSIS
75...
"""
if not self._initialized:
msg = ("Network has not been initialized. "
"Call '.initialize()' first.")
raise QueueingToolError(msg)
if t is None:
for dummy in range(n):
self._simulate_next_event(slow=False)
else:
now = self._t
while self._t < now + t:
self._simulate_next_event(slow=False)
def _simulate_next_event(self, slow=True):
if self._fancy_heap.size == 0:
self._t = np.infty
return
q1k = self._fancy_heap.pop()
q1t = q1k[0]
q1 = self.edge2queue[q1k[1]]
e1 = q1.edge[2]
event = q1.next_event_description()
self._t = q1t
self._qkey = q1k
self.num_events += 1
if event == 2: # This is a departure
e2 = q1._departures[0].desired_destination(self, q1.edge)
q2 = self.edge2queue[e2]
q2k = q2._key()
if q2.at_capacity() and e2 != e1:
q2.num_blocked += 1
q1._departures[0].blocked += 1
if self._blocking:
t = q2._departures[0]._time + EPS * uniform(0.33, 0.66)
q1.delay_service(t)
else:
q1.delay_service()
else:
agent = q1.next_event()
agent._time = q1t
q2._add_arrival(agent)
self.num_agents[e1] = q1._num_total
self.num_agents[e2] = q2._num_total
if slow:
self._update_graph_colors(qedge=q1.edge)
self._prev_edge = q1.edge
if q2._active and self.max_agents < np.infty and \
np.sum(self.num_agents) > self.max_agents - 1:
q2._active = False
q2.next_event()
self.num_agents[e2] = q2._num_total
if slow:
self._update_graph_colors(qedge=q2.edge)
self._prev_edge = q2.edge
new_q1k = q1._key()
new_q2k = q2._key()
if new_q2k[0] != q2k[0]:
self._fancy_heap.push(*new_q2k)
if new_q1k[0] < np.infty and new_q1k != new_q2k:
self._fancy_heap.push(*new_q1k)
else:
if new_q1k[0] < np.infty:
self._fancy_heap.push(*new_q1k)
elif event == 1: # This is an arrival
if q1._active and self.max_agents < np.infty and \
np.sum(self.num_agents) > self.max_agents - 1:
q1._active = False
q1.next_event()
self.num_agents[e1] = q1._num_total
if slow:
self._update_graph_colors(qedge=q1.edge)
self._prev_edge = q1.edge
new_q1k = q1._key()
if new_q1k[0] < np.infty:
self._fancy_heap.push(*new_q1k)
def start_collecting_data(self, queues=None, edge=None, edge_type=None):
"""Tells the queues to collect data on agents' arrival, service
start, and departure times.
If none of the parameters are given then every
:class:`.QueueServer` will start collecting data.
Parameters
----------
queues : :any:`int`, *array_like* (optional)
The edge index (or an iterable of edge indices) identifying
the :class:`QueueServer(s)<.QueueServer>` that will start
collecting data.
edge : 2-tuple of int or *array_like* (optional)
Explicitly specify which queues will collect data. Must be
either:
* A 2-tuple of the edge's source and target vertex
indices, or
* An iterable of 2-tuples of the edge's source and
target vertex indices.
edge_type : int or an iterable of int (optional)
A integer, or a collection of integers identifying which
edge types will be set active.
"""
queues = _get_queues(self.g, queues, edge, edge_type)
for k in queues:
self.edge2queue[k].collect_data = True
def stop_collecting_data(self, queues=None, edge=None, edge_type=None):
"""Tells the queues to stop collecting data on agents.
If none of the parameters are given then every
:class:`.QueueServer` will stop collecting data.
Parameters
----------
queues : int, *array_like* (optional)
The edge index (or an iterable of edge indices) identifying
the :class:`QueueServer(s)<.QueueServer>` that will stop
collecting data.
edge : 2-tuple of int or *array_like* (optional)
Explicitly specify which queues will stop collecting data.
Must be either:
* A 2-tuple of the edge's source and target vertex
indices, or
* An iterable of 2-tuples of the edge's source and
target vertex indices.
edge_type : int or an iterable of int (optional)
A integer, or a collection of integers identifying which
edge types will stop collecting data.
"""
queues = _get_queues(self.g, queues, edge, edge_type)
for k in queues:
self.edge2queue[k].collect_data = False
def transitions(self, return_matrix=True):
"""Returns the routing probabilities for each vertex in the
graph.
Parameters
----------
return_matrix : bool (optional, the default is ``True``)
Specifies whether an :class:`~numpy.ndarray` is returned.
If ``False``, a dict is returned instead.
Returns
-------
out : a dict or :class:`~numpy.ndarray`
The transition probabilities for each vertex in the graph.
If ``out`` is an :class:`~numpy.ndarray`, then
``out[v, u]`` returns the probability of a transition from
vertex ``v`` to vertex ``u``. If ``out`` is a dict
then ``out_edge[v][u]`` is the probability of moving from
vertex ``v`` to the vertex ``u``.
Examples
--------
Lets change the routing probabilities:
>>> import queueing_tool as qt
>>> import networkx as nx
>>> g = nx.sedgewick_maze_graph()
>>> net = qt.QueueNetwork(g)
Below is an adjacency list for the graph ``g``.
>>> ans = qt.graph2dict(g, False)
>>> {k: sorted(v) for k, v in ans.items()}
... # doctest: +NORMALIZE_WHITESPACE
{0: [2, 5, 7],
1: [7],
2: [0, 6],
3: [4, 5],
4: [3, 5, 6, 7],
5: [0, 3, 4],
6: [2, 4],
7: [0, 1, 4]}
The default transition matrix is every out edge being equally
likely:
>>> net.transitions(False) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
{0: {2: 0.333..., 5: 0.333..., 7: 0.333...},
1: {7: 1.0},
2: {0: 0.5, 6: 0.5},
3: {4: 0.5, 5: 0.5},
4: {3: 0.25, 5: 0.25, 6: 0.25, 7: 0.25},
5: {0: 0.333..., 3: 0.333..., 4: 0.333...},
6: {2: 0.5, 4: 0.5},
7: {0: 0.333..., 1: 0.333..., 4: 0.333...}}
Now we will generate a random routing matrix:
>>> mat = qt.generate_transition_matrix(g, seed=96)
>>> net.set_transitions(mat)
>>> net.transitions(False) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
{0: {2: 0.112..., 5: 0.466..., 7: 0.420...},
1: {7: 1.0},
2: {0: 0.561..., 6: 0.438...},
3: {4: 0.545..., 5: 0.454...},
4: {3: 0.374..., 5: 0.381..., 6: 0.026..., 7: 0.217...},
5: {0: 0.265..., 3: 0.460..., 4: 0.274...},
6: {2: 0.673..., 4: 0.326...},
7: {0: 0.033..., 1: 0.336..., 4: 0.630...}}
What this shows is the following: when an :class:`.Agent` is at
vertex ``2`` they will transition to vertex ``0`` with
probability ``0.561`` and route to vertex ``6`` probability
``0.438``, when at vertex ``6`` they will transition back to
vertex ``2`` with probability ``0.673`` and route vertex ``4``
probability ``0.326``, etc.
"""
if return_matrix:
mat = np.zeros((self.nV, self.nV))
for v in self.g.nodes():
ind = [e[1] for e in sorted(self.g.out_edges(v))]
mat[v, ind] = self._route_probs[v]
else:
mat = {
k: {e[1]: p for e, p in zip(sorted(self.g.out_edges(k)), value)}
for k, value in enumerate(self._route_probs)
}
return mat
def _update_all_colors(self):
do = [True for v in range(self.nV)]
for q in self.edge2queue:
e = q.edge[:2]
v = q.edge[1]
if q.edge[0] == q.edge[1]:
self.g.set_ep(e, 'edge_color', q._current_color(1))
self.g.set_vp(v, 'vertex_color', q._current_color(2))
if q.edge[3] != 0:
self.g.set_vp(v, 'vertex_fill_color', q._current_color())
do[v] = False
else:
self.g.set_ep(e, 'edge_color', q._current_color())
if do[v]:
self._update_vertex_color(v)
do[v] = False
if do[q.edge[0]]:
self._update_vertex_color(q.edge[0])
do[q.edge[0]] = False
def _update_vertex_color(self, v):
ee = (v, v)
ee_is_edge = self.g.is_edge(ee)
eei = self.g.edge_index[ee] if ee_is_edge else 0
if not ee_is_edge or (ee_is_edge and self.edge2queue[eei].edge[3] == 0):
nSy = 0
cap = 0
for ei in self.in_edges[v]:
nSy += self.edge2queue[ei].num_system
cap += self.edge2queue[ei].num_servers
div = (2 * cap) + 2.
tmp = 1. - min(nSy / div, 1.)
color = [i * tmp for i in self.colors['vertex_fill_color']]
color[3] = 1.0
self.g.set_vp(v, 'vertex_fill_color', color)
if not ee_is_edge:
self.g.set_vp(v, 'vertex_color', self.colors['vertex_color'])
def _update_graph_colors(self, qedge):
e = qedge[:2]
v = qedge[1]
if self._prev_edge is not None:
pe = self._prev_edge[:2]
pv = self._prev_edge[1]
q = self.edge2queue[self._prev_edge[2]]
if pe[0] == pe[1]:
self.g.set_ep(pe, 'edge_color', q._current_color(1))
self.g.set_vp(pv, 'vertex_color', q._current_color(2))
if q.edge[3] != 0:
self.g.set_vp(v, 'vertex_fill_color', q._current_color())
else:
self.g.set_ep(pe, 'edge_color', q._current_color())
self._update_vertex_color(pv)
q = self.edge2queue[qedge[2]]
if qedge[0] == qedge[1]:
self.g.set_ep(e, 'edge_color', q._current_color(1))
self.g.set_vp(v, 'vertex_color', q._current_color(2))
if q.edge[3] != 0:
self.g.set_vp(v, 'vertex_fill_color', q._current_color())
else:
self.g.set_ep(e, 'edge_color', q._current_color())
self._update_vertex_color(v)
def _get_queues(g, queues, edge, edge_type):
"""Used to specify edge indices from different types of arguments."""
INT = numbers.Integral
if isinstance(queues, INT):
queues = [queues]
elif queues is None:
if edge is not None:
if isinstance(edge, tuple):
if isinstance(edge[0], INT) and isinstance(edge[1], INT):
queues = [g.edge_index[edge]]
elif isinstance(edge[0], collections.Iterable):
if np.array([len(e) == 2 for e in edge]).all():
queues = [g.edge_index[e] for e in edge]
else:
queues = [g.edge_index[edge]]
elif edge_type is not None:
if isinstance(edge_type, collections.Iterable):
edge_type = set(edge_type)
else:
edge_type = set([edge_type])
tmp = []
for e in g.edges():
if g.ep(e, 'edge_type') in edge_type:
tmp.append(g.edge_index[e])
queues = np.array(tmp, int)
if queues is None:
queues = range(g.number_of_edges())
return queues
|
mit
|
ArnaudCassan/microlensing
|
microlensing/interferometry/mcmc.py
|
1
|
12518
|
# Copyright (c) Arnaud Cassan.
# Distributed under the terms of the MIT license.
import os
os.environ["OMP_NUM_THREADS"] = "1"
import numpy as np
import emcee
import matplotlib.pylab as plt
from microlensing.utils import checkandtimeit, verbosity, printi, printd, printw
from multiprocessing import Pool
from microlensing.interferometry.ESPL import mod_VIS
from microlensing.interferometry.data import obs_VIS2
class InterferolensingModels():
"""List of interferometric microlensing models
Parameters
----------
model : string
Model name.
Attributes
----------
model : string
Model name.
ndim : int
Number of parameters.
params : dict
Dictionary of parameters (name and value, default values are None).
pnames : string
Print format for parameter names.
lxnames : list
Parameter names in LaTeX format.
Usage
-----
>>> ilm = InterferolensingModels(model)
>>> ilm.pnames
>>> ilm.params['thetaE']
>>> ilm.ndim
>>> ilm.lxnames
"""
def __init__(self, model):
self.model = model
# ESPL - single epoch, contouring
if self.model == 'ESPL_CONT_SINGEP':
self.ndim = 4
self.params = {'alpha1':None, 'thetaE':None, 'eta1':None, 'u1':None}
self.lxnames = [r'$\alpha_1$', r'$\theta_E$', r'$\eta_1$', r'$u_1$']
# ESPL - single epoch, full integration
elif self.model == 'ESPL_FULLINT_SINGEP':
self.ndim = 4
self.params = {'alpha1':None, 'thetaE':None, 'eta1':None, 'u1':None}
self.lxnames = [r'$\alpha_1$', r'$\theta_E$', r'$\eta_1$', r'$u_1$']
# ESPL - single epoch, point-source approximation
elif self.model == 'ESPL_PS_SINGEP':
self.ndim = 3
self.params = {'alpha1':None, 'thetaE':None, 'u1':None}
self.lxnames = [r'$\alpha_1$', r'$\theta_E$', r'$u_1$']
# ESPL - single epoch, flat-images approsimation
elif self.model == 'ESPL_FLAT_SINGEP':
self.ndim = 3
self.params = {'alpha1':None, 'thetaE':None, 'eta1':None}
self.lxnames = [r'$\alpha_1$', r'$\theta_E$', r'$\eta_1$']
# ESPL - multi-epochs, contouring
elif self.model == 'ESPL_CONT_MULTIEP':
self.ndim = 6
self.params = {'alpha':None, 'thetaE':None, 'eta0':None, 't*':None, 't0':None, 'u0':None}
self.lxnames = [r'$\alpha$', r'$\theta_E$', r'$\eta_0$', r'$t_*$', r'$t_0$', r'$u_0$']
# ESPL - multi-epochs, full integration
elif self.model == 'ESPL_FULLINT_MULTIEP':
self.ndim = 6
self.params = {'alpha':None, 'thetaE':None, 'eta0':None, 't*':None, 't0':None, 'u0':None}
self.lxnames = [r'$\alpha$', r'$\theta_E$', r'$\eta_0$', r'$t_*$', r'$t_0$', r'$u_0$']
# ESPL - multi-epochs, full integration, microlensing parameters
elif self.model == 'ESPL_MLENSING_MULTIEP':
self.ndim = 6
self.params = {'alpha':None, 'thetaE':None, 'rho':None, 'tE':None, 't0':None, 'u0':None}
self.lxnames = [r'$\alpha$', r'$\theta_E$', r'$\rho$', r'$t_E$', r'$t_0$', r'$u_0$']
# ESPL - multi-epochs, point-source approximation
elif self.model == 'ESPL_PS_MULTIEP':
self.ndim = 5
self.params = {'alpha':None, 'thetaE':None, 'tE':None, 't0':None, 'u0':None}
self.lxnames = [r'$\alpha$', r'$\theta_E$', r'$t_E$', r'$t_0$', r'$u_0$']
# ESPL - multi-epochs, flat-images approsimation
elif self.model == 'ESPL_FLAT_MULTIEP':
self.ndim = 5
self.params = {'alpha':None, 'thetaE':None, 'eta0':None, 't*':None, 't0':None}
self.lxnames = [r'$\alpha$', r'$\theta_E$', r'$\eta_0$', r'$t_*$', r'$t_0$']
else:
raise ValueError("Wrong model ID ({}) in InterferolensingModels".format(model))
# print format for parameter names
self.pnames = '(' + ', '.join([p for p in self.params]) + ')'
def fit_emcee(oifitslist, model, params, priorfun, samplerfile, nwalkers, chainlen, ncpu, Nc=20000, LLD=0., Na=8, tol=1e-5, ref_MJD=0., ifconverged=False, resume=False):
"""Compute MCMC chains using EMCEE-3
Parameters
----------
oifitslist : list of strings
List of input OIFITS files
params : list
Constains 2-elements lists [[pi_low, pi_high], ...] of parameters bounds
Ialpha, Irho, IthetaE, Iu0, ItE, It0 : arrays of [float, float]
Intervals for drawing MCMC samples for each parameter
Nc : int
Common source contour initial sampling (global Nc_init)
and final samplig (global Nc_fin)
samplerfile : string
Name of file where the chains are stored (HDF5 format)
nwalkers : int
Number of chains in parallel
chainlen : int
Individual chain length for one MCMC run
ncpu : int
Number of chains computed in parallel (threads)
ref_MJD : float
Reference MJD
ifconverged : boolean
Automatic stop if convergence
resume : boolean
Resume previous run
hdulists (global) : list
Contains a list [u, v, VIS2, VIS2ERR, MJD] for each (ep, B, lbd)
Outputs
-------
samplerfile : string
Name of file where the chains are stored (HDF5 format)
"""
# set I/O shell display
tcol, tun, tend, tit = "\033[0m\033[34m", "\033[0m\033[1m\033[34m", "\033[0m", "\033[0m\033[3m"
# get model
ilm = InterferolensingModels(model)
ndim = ilm.ndim
mnames = ilm.pnames
printi(tcol + "Chosen model : " + tend + "{0}, {1}".format(model, mnames))
# setup
initial_length = chainlen
resume_length = chainlen
# read OIFITS files and create data fit-lists
global hdulists
hdulists = []
for oifits in oifitslist:
for b in range(6):
for l in range(6):
hdulists = hdulists + obs_VIS2(oifits, b + 1, l + 1, ref_MJD=ref_MJD)
# initialize run
if not resume:
pos = []
# Initialize the walkers
for p in params:
pos.append(np.random.uniform(p[0], p[1], nwalkers))
pos = np.array(pos).T
# Set up the backend
backend = emcee.backends.HDFBackend(samplerfile)
backend.reset(nwalkers, ndim)
# Initialize the sampler
with Pool(processes=ncpu) as pool:
sampler = emcee.EnsembleSampler(nwalkers, ndim, logprob, args=[priorfun], kwargs={'model':model, 'Nc':Nc, 'LLD':LLD ,'Na':Na, 'tol':tol}, backend=backend, pool=pool)
# Compute average changes of autocorrelation time
index = 0
autocorr = np.empty(initial_length)
# Initialize the converge variable
taup = np.inf
# Now we'll sample for up to max_n steps
for sample in sampler.sample(pos, iterations=initial_length, progress=True):
if ifconverged:
# Convergence test every 300 steps
if sampler.iteration % 300: continue
# Force computing autocorrelation
tau = sampler.get_autocorr_time(tol=0)
autocorr[index] = np.mean(tau)
index = index + 1
# Test convergence
converged = np.all(tau * 100 < sampler.iteration)
converged &= np.all(np.abs(taup - tau) / tau < 0.01)
if converged: break
taup = tau
# resume / continue run
else:
backend = emcee.backends.HDFBackend(samplerfile)
printi(tcol + "Initial size :" + tend + " {0}".format(backend.iteration))
with Pool(processes=ncpu) as pool:
sampler = emcee.EnsembleSampler(nwalkers, ndim, logprob, args=[priorfun], kwargs={'model':model, 'Nc':Nc, 'LLD':LLD ,'Na':Na, 'tol':tol}, backend=backend, pool=pool)
sampler.run_mcmc(None, resume_length, progress=True)
printi(tcol + "Final size :" + tend + " {0}".format(backend.iteration))
def logprob(x, priorfun, model=None, Nc=None, LLD=None, Na=None, tol=None):
"""Compute gobal chi2 for a set epochs, baselines and wavelenghts,
and fit for (alpha [deg], rho, thetaE [mas], u0, tE, t0) for time series
or (alpha [deg], rho, thetaE [mas], u0) for a single epoch
Parameters
----------
hdulists (global) : list
Contains a list [u, v, VIS2, VIS2ERR, MJD] for each (ep, B, lbd)
x : list
Model parameters (alpha, rho, thetaE, u0, tE, t0) for time series
or (alpha, rho, thetaE, u0) for a single epoch
Returns
-------
logp : float
log(Likelihood) = - chi^2 / 2
"""
# set I/O shell display
tcol, tun, tend, tit = "\033[0m\033[34m", "\033[0m\033[1m\033[34m", "\033[0m", "\033[0m\033[3m"
# conversion mas/deg
mas = np.pi / 180. / 3600. / 1000.
# sum chi2 over all epochs, baselines and wavelenghts
chi2 = 0.
for hdulist in hdulists:
# get individual data
u, v, VIS2, VIS2ERR, MJD = hdulist
# conversion mas -> rad (thetaE is always x[1])
u = u * mas * x[1]
v = v * mas * x[1]
# single epoch, contouring or full integration (alpha1, thetaE, eta1, u1)
if model == 'ESPL_CONT_SINGEP' or model == 'ESPL_FULLINT_SINGEP':
zetac = np.complex(0., x[3]) * np.exp(np.complex(0., np.pi / 180. * x[0]))
VISE2 = np.abs(mod_VIS(zetac, x[2] * x[3], u, v, model, Nc=Nc, LLD=LLD, Na=Na, tol=tol))**2
# single epoch, point-source approximation (alpha1, thetaE, u1)
elif model == 'ESPL_PS_SINGEP':
zetac = np.complex(0., x[2]) * np.exp(np.complex(0., np.pi / 180. * x[0]))
VISE2 = mod_VIS(zetac, 0, u, v, model)**2
# single epoch, flat-images approsimation (alpha1, thetaE, eta1)
elif model == 'ESPL_FLAT_SINGEP':
zetac = np.complex(0., 1.) * np.exp(np.complex(0., np.pi / 180. * x[0]))
VISE2 = mod_VIS(zetac, x[2], u, v, model, LLD=LLD, Na=Na, tol=tol)**2
# multi-epochs, contouring or full integration (alpha, thetaE, eta0, t*, t0, u0)
elif model == 'ESPL_CONT_MULTIEP' or model == 'ESPL_FULLINT_MULTIEP':
zetac = np.complex((MJD - x[4]) * x[2] * x[5] / x[3], x[5]) * np.exp(np.complex(0., np.pi / 180. * x[0]))
VISE2 = np.abs(mod_VIS(zetac, x[2] * x[5], u, v, model, Nc=Nc, LLD=LLD, Na=Na, tol=tol))**2
# multi-epochs, full integration, microlensing parameters (alpha, thetaE, rho, tE, t0, u0)
elif model == 'ESPL_MLENSING_MULTIEP':
zetac = np.complex((MJD - x[4]) / x[3], x[5]) * np.exp(np.complex(0., np.pi / 180. * x[0]))
VISE2 = np.abs(mod_VIS(zetac, x[2], u, v, model, Nc=Nc, LLD=LLD, Na=Na, tol=tol))**2
# multi-epochs, point-source approximation (alpha, thetaE, tE, t0, u0)
elif model == 'ESPL_PS_MULTIEP':
zetac = np.complex((MJD - x[3]) / x[2], x[4]) * np.exp(np.complex(0., np.pi / 180. * x[0]))
VISE2 = mod_VIS(zetac, 0, u, v, model)**2
# multi-epochs, flat-images approsimation (alpha, thetaE, eta0, t*, t0)
elif model == 'ESPL_FLAT_MULTIEP':
zetac = np.complex((MJD - x[4]) * x[2] / x[3], 1.) * np.exp(np.complex(0., np.pi / 180. * x[0]))
VISE2 = mod_VIS(zetac, x[2], u, v, model, LLD=LLD, Na=Na, tol=tol)**2
else:
raise ValueError("Wrong model ID ({}) in logprob".format(model))
# add chi2/pt
chi2 += (VIS2 - VISE2)**2 / VIS2ERR**2
# log probability
logp = -0.5 * chi2 + priorfun(x)
# print current values of parameters and chi2
printd(tcol + "Parameters | chi2 | log(P): " + tend + "{0} | {1} | {2}".format(x, chi2, logp))
# return log(prob)
return logp
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.